mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-24 17:23:19 +00:00
Compare commits
14 Commits
feat/cloud
...
remove-v4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ad672aa63 | ||
|
|
5be4c5d536 | ||
|
|
cb1a2a8a13 | ||
|
|
1a5d37b25a | ||
|
|
bc4273f2f8 | ||
|
|
77fdd28e93 | ||
|
|
8e08a42617 | ||
|
|
2c3042304a | ||
|
|
c9da09256e | ||
|
|
e8ed22cafb | ||
|
|
4658232025 | ||
|
|
e8add5942e | ||
|
|
ddecf05d9f | ||
|
|
bf13b26a37 |
@@ -41,31 +41,23 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:v0.142.0
|
||||
container_name: schema-migrator-sync
|
||||
telemetrystore-migrator:
|
||||
image: signoz/signoz-otel-collector:v0.142.0
|
||||
container_name: telemetrystore-migrator
|
||||
environment:
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- sync
|
||||
- --cluster-name=cluster
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --replication=true
|
||||
- --up=
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate bootstrap &&
|
||||
/signoz-otel-collector migrate sync up &&
|
||||
/signoz-otel-collector migrate async up
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:v0.142.0
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --cluster-name=cluster
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --replication=true
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
restart: on-failure
|
||||
|
||||
@@ -1,14 +1,23 @@
|
||||
services:
|
||||
signoz-otel-collector:
|
||||
image: signoz/signoz-otel-collector:v0.129.6
|
||||
image: signoz/signoz-otel-collector:v0.142.0
|
||||
container_name: signoz-otel-collector-dev
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate sync check &&
|
||||
/signoz-otel-collector --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
|
||||
3
.github/workflows/integrationci.yaml
vendored
3
.github/workflows/integrationci.yaml
vendored
@@ -48,12 +48,13 @@ jobs:
|
||||
- role
|
||||
- ttl
|
||||
- alerts
|
||||
- ingestionkeys
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 25.5.6
|
||||
- 25.10.5
|
||||
- 25.12.5
|
||||
schema-migrator-version:
|
||||
- v0.142.0
|
||||
postgres-version:
|
||||
|
||||
@@ -318,4 +318,5 @@ user:
|
||||
# The password of the root user. Must meet password requirements.
|
||||
password: ""
|
||||
# The name of the organization to create or look up for the root user.
|
||||
org_name: default
|
||||
org:
|
||||
name: default
|
||||
|
||||
@@ -61,7 +61,6 @@ x-db-depend: &db-depend
|
||||
- clickhouse
|
||||
- clickhouse-2
|
||||
- clickhouse-3
|
||||
- schema-migrator
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
@@ -136,12 +135,17 @@ services:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
configs:
|
||||
- source: clickhouse-config
|
||||
target: /etc/clickhouse-server/config.xml
|
||||
- source: clickhouse-users
|
||||
target: /etc/clickhouse-server/users.xml
|
||||
- source: clickhouse-custom-function
|
||||
target: /etc/clickhouse-server/custom-function.xml
|
||||
- source: clickhouse-cluster
|
||||
target: /etc/clickhouse-server/config.d/cluster.ha.xml
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-2:
|
||||
@@ -151,12 +155,17 @@ services:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
configs:
|
||||
- source: clickhouse-config
|
||||
target: /etc/clickhouse-server/config.xml
|
||||
- source: clickhouse-users
|
||||
target: /etc/clickhouse-server/users.xml
|
||||
- source: clickhouse-custom-function
|
||||
target: /etc/clickhouse-server/custom-function.xml
|
||||
- source: clickhouse-cluster
|
||||
target: /etc/clickhouse-server/config.d/cluster.ha.xml
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-3:
|
||||
@@ -166,37 +175,32 @@ services:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
configs:
|
||||
- source: clickhouse-config
|
||||
target: /etc/clickhouse-server/config.xml
|
||||
- source: clickhouse-users
|
||||
target: /etc/clickhouse-server/users.xml
|
||||
- source: clickhouse-custom-function
|
||||
target: /etc/clickhouse-server/custom-function.xml
|
||||
- source: clickhouse-cluster
|
||||
target: /etc/clickhouse-server/config.d/cluster.ha.xml
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.112.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
image: signoz/signoz:v0.112.1
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
|
||||
- DOT_METRICS_ENABLED=true
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -209,40 +213,48 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.142.0
|
||||
image: signoz/signoz-otel-collector:v0.142.1
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate sync check &&
|
||||
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
|
||||
configs:
|
||||
- source: otel-collector-config
|
||||
target: /etc/otel-collector-config.yaml
|
||||
- source: otel-manager-config
|
||||
target: /etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
deploy:
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.142.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
entrypoint: sh
|
||||
signoz-telemetrystore-migrator:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
||||
environment:
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- -c
|
||||
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- |
|
||||
/signoz-otel-collector migrate bootstrap &&
|
||||
/signoz-otel-collector migrate sync up &&
|
||||
/signoz-otel-collector migrate async up
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
@@ -261,3 +273,16 @@ volumes:
|
||||
name: signoz-zookeeper-2
|
||||
zookeeper-3:
|
||||
name: signoz-zookeeper-3
|
||||
configs:
|
||||
clickhouse-config:
|
||||
file: ../common/clickhouse/config.xml
|
||||
clickhouse-users:
|
||||
file: ../common/clickhouse/users.xml
|
||||
clickhouse-custom-function:
|
||||
file: ../common/clickhouse/custom-function.xml
|
||||
clickhouse-cluster:
|
||||
file: ../common/clickhouse/cluster.ha.xml
|
||||
otel-collector-config:
|
||||
file: ./otel-collector-config.yaml
|
||||
otel-manager-config:
|
||||
file: ../common/signoz/otel-collector-opamp-config.yaml
|
||||
|
||||
@@ -58,7 +58,6 @@ x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
@@ -114,30 +113,20 @@ services:
|
||||
target: /etc/clickhouse-server/config.d/cluster.xml
|
||||
volumes:
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.112.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
image: signoz/signoz:v0.112.1
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- sqlite:/var/lib/signoz/
|
||||
configs:
|
||||
- source: signoz-prometheus-config
|
||||
target: /root/config/prometheus.yml
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
- DOT_METRICS_ENABLED=true
|
||||
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -150,11 +139,14 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.142.0
|
||||
image: signoz/signoz-otel-collector:v0.142.1
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate sync check &&
|
||||
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
|
||||
configs:
|
||||
- source: otel-collector-config
|
||||
target: /etc/otel-collector-config.yaml
|
||||
@@ -163,29 +155,32 @@ services:
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
deploy:
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.142.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
entrypoint: sh
|
||||
signoz-telemetrystore-migrator:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
||||
environment:
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- -c
|
||||
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- |
|
||||
/signoz-otel-collector migrate bootstrap &&
|
||||
/signoz-otel-collector migrate sync up &&
|
||||
/signoz-otel-collector migrate async up
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
@@ -205,14 +200,6 @@ configs:
|
||||
file: ../common/clickhouse/custom-function.xml
|
||||
clickhouse-cluster:
|
||||
file: ../common/clickhouse/cluster.xml
|
||||
signoz-prometheus-config:
|
||||
file: ../common/signoz/prometheus.yml
|
||||
# If you have multiple dashboard files, you can list them individually:
|
||||
# dashboard-foo:
|
||||
# file: ../common/dashboards/foo.json
|
||||
# dashboard-bar:
|
||||
# file: ../common/dashboards/bar.json
|
||||
|
||||
otel-collector-config:
|
||||
file: ./otel-collector-config.yaml
|
||||
otel-manager-config:
|
||||
|
||||
@@ -62,8 +62,10 @@ x-db-depend: &db-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
clickhouse-2:
|
||||
condition: service_healthy
|
||||
clickhouse-3:
|
||||
condition: service_healthy
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
@@ -179,27 +181,17 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.112.0}
|
||||
image: signoz/signoz:${VERSION:-v0.112.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- DOT_METRICS_ENABLED=true
|
||||
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -210,51 +202,48 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.1}
|
||||
container_name: signoz-otel-collector
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate sync check &&
|
||||
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
signoz:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
signoz-telemetrystore-migrator:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
|
||||
container_name: schema-migrator-async
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
||||
container_name: signoz-telemetrystore-migrator
|
||||
environment:
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate bootstrap &&
|
||||
/signoz-otel-collector migrate sync up &&
|
||||
/signoz-otel-collector migrate async up
|
||||
restart: on-failure
|
||||
networks:
|
||||
signoz-net:
|
||||
|
||||
@@ -57,8 +57,6 @@ x-db-depend: &db-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
@@ -111,27 +109,17 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.112.0}
|
||||
image: signoz/signoz:${VERSION:-v0.112.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- DOT_METRICS_ENABLED=true
|
||||
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -144,45 +132,46 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.1}
|
||||
container_name: signoz-otel-collector
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate sync check &&
|
||||
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
signoz:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
signoz-telemetrystore-migrator:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
|
||||
container_name: schema-migrator-async
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
||||
container_name: signoz-telemetrystore-migrator
|
||||
environment:
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
|
||||
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
|
||||
entrypoint:
|
||||
- /bin/sh
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
- -c
|
||||
- |
|
||||
/signoz-otel-collector migrate bootstrap &&
|
||||
/signoz-otel-collector migrate sync up &&
|
||||
/signoz-otel-collector migrate async up
|
||||
restart: on-failure
|
||||
networks:
|
||||
signoz-net:
|
||||
|
||||
@@ -80,6 +80,37 @@ components:
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
AuthtypesGettableObjects:
|
||||
properties:
|
||||
resource:
|
||||
$ref: '#/components/schemas/AuthtypesResource'
|
||||
selectors:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- resource
|
||||
- selectors
|
||||
type: object
|
||||
AuthtypesGettableResources:
|
||||
properties:
|
||||
relations:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
nullable: true
|
||||
type: object
|
||||
resources:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesResource'
|
||||
type: array
|
||||
required:
|
||||
- resources
|
||||
- relations
|
||||
type: object
|
||||
AuthtypesGettableToken:
|
||||
properties:
|
||||
@@ -130,8 +161,6 @@ components:
|
||||
serviceAccountJson:
|
||||
type: string
|
||||
type: object
|
||||
AuthtypesName:
|
||||
type: object
|
||||
AuthtypesOIDCConfig:
|
||||
properties:
|
||||
claimMapping:
|
||||
@@ -154,7 +183,7 @@ components:
|
||||
resource:
|
||||
$ref: '#/components/schemas/AuthtypesResource'
|
||||
selector:
|
||||
$ref: '#/components/schemas/AuthtypesSelector'
|
||||
type: string
|
||||
required:
|
||||
- resource
|
||||
- selector
|
||||
@@ -175,6 +204,22 @@ components:
|
||||
provider:
|
||||
type: string
|
||||
type: object
|
||||
AuthtypesPatchableObjects:
|
||||
properties:
|
||||
additions:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesGettableObjects'
|
||||
nullable: true
|
||||
type: array
|
||||
deletions:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesGettableObjects'
|
||||
nullable: true
|
||||
type: array
|
||||
required:
|
||||
- additions
|
||||
- deletions
|
||||
type: object
|
||||
AuthtypesPostableAuthDomain:
|
||||
properties:
|
||||
config:
|
||||
@@ -199,7 +244,7 @@ components:
|
||||
AuthtypesResource:
|
||||
properties:
|
||||
name:
|
||||
$ref: '#/components/schemas/AuthtypesName'
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
required:
|
||||
@@ -231,8 +276,6 @@ components:
|
||||
samlIdp:
|
||||
type: string
|
||||
type: object
|
||||
AuthtypesSelector:
|
||||
type: object
|
||||
AuthtypesSessionContext:
|
||||
properties:
|
||||
exists:
|
||||
@@ -245,8 +288,6 @@ components:
|
||||
type: object
|
||||
AuthtypesTransaction:
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
object:
|
||||
$ref: '#/components/schemas/AuthtypesObject'
|
||||
relation:
|
||||
@@ -460,10 +501,10 @@ components:
|
||||
GatewaytypesLimitValue:
|
||||
properties:
|
||||
count:
|
||||
format: int64
|
||||
nullable: true
|
||||
type: integer
|
||||
size:
|
||||
format: int64
|
||||
nullable: true
|
||||
type: integer
|
||||
type: object
|
||||
GatewaytypesPagination:
|
||||
@@ -1668,40 +1709,6 @@ components:
|
||||
- status
|
||||
- error
|
||||
type: object
|
||||
RoletypesGettableResources:
|
||||
properties:
|
||||
relations:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
nullable: true
|
||||
type: object
|
||||
resources:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesResource'
|
||||
nullable: true
|
||||
type: array
|
||||
required:
|
||||
- resources
|
||||
- relations
|
||||
type: object
|
||||
RoletypesPatchableObjects:
|
||||
properties:
|
||||
additions:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesObject'
|
||||
nullable: true
|
||||
type: array
|
||||
deletions:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesObject'
|
||||
nullable: true
|
||||
type: array
|
||||
required:
|
||||
- additions
|
||||
- deletions
|
||||
type: object
|
||||
RoletypesPatchableRole:
|
||||
properties:
|
||||
description:
|
||||
@@ -1737,6 +1744,7 @@ components:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
- name
|
||||
- description
|
||||
- type
|
||||
@@ -1874,6 +1882,8 @@ components:
|
||||
$ref: '#/components/schemas/TypesUser'
|
||||
userId:
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
TypesGettableGlobalConfig:
|
||||
properties:
|
||||
@@ -1886,6 +1896,8 @@ components:
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
TypesInvite:
|
||||
properties:
|
||||
@@ -1909,6 +1921,8 @@ components:
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
TypesOrganization:
|
||||
properties:
|
||||
@@ -1929,6 +1943,8 @@ components:
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
TypesPostableAPIKey:
|
||||
properties:
|
||||
@@ -1992,6 +2008,8 @@ components:
|
||||
type: string
|
||||
token:
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
TypesStorableAPIKey:
|
||||
properties:
|
||||
@@ -2017,6 +2035,8 @@ components:
|
||||
type: string
|
||||
userId:
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
TypesUser:
|
||||
properties:
|
||||
@@ -2038,6 +2058,8 @@ components:
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
ZeustypesGettableHost:
|
||||
properties:
|
||||
@@ -2170,6 +2192,35 @@ paths:
|
||||
summary: Check permissions
|
||||
tags:
|
||||
- authz
|
||||
/api/v1/authz/resources:
|
||||
get:
|
||||
deprecated: false
|
||||
description: Gets all the available resources
|
||||
operationId: AuthzResources
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/AuthtypesGettableResources'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
summary: Get resources
|
||||
tags:
|
||||
- authz
|
||||
/api/v1/changePassword/{id}:
|
||||
post:
|
||||
deprecated: false
|
||||
@@ -4342,7 +4393,7 @@ paths:
|
||||
properties:
|
||||
data:
|
||||
items:
|
||||
$ref: '#/components/schemas/AuthtypesObject'
|
||||
$ref: '#/components/schemas/AuthtypesGettableObjects'
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
@@ -4415,7 +4466,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RoletypesPatchableObjects'
|
||||
$ref: '#/components/schemas/AuthtypesPatchableObjects'
|
||||
responses:
|
||||
"204":
|
||||
content:
|
||||
@@ -4473,52 +4524,6 @@ paths:
|
||||
summary: Patch objects for a role by relation
|
||||
tags:
|
||||
- role
|
||||
/api/v1/roles/resources:
|
||||
get:
|
||||
deprecated: false
|
||||
description: Gets all the available resources for role assignment
|
||||
operationId: GetResources
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/RoletypesGettableResources'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- ADMIN
|
||||
- tokenizer:
|
||||
- ADMIN
|
||||
summary: Get resources
|
||||
tags:
|
||||
- role
|
||||
/api/v1/user:
|
||||
get:
|
||||
deprecated: false
|
||||
@@ -5091,7 +5096,7 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/GatewaytypesPostableIngestionKey'
|
||||
responses:
|
||||
"200":
|
||||
"201":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@@ -5104,7 +5109,7 @@ paths:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
description: Created
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
@@ -5532,6 +5537,12 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"404":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Not Found
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
@@ -5601,6 +5612,12 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"404":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Not Found
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
@@ -5659,6 +5676,12 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"404":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Not Found
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
@@ -5718,6 +5741,12 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"404":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Not Found
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
|
||||
@@ -171,8 +171,6 @@ func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource
|
||||
for _, register := range provider.registry {
|
||||
typeables = append(typeables, register.MustGetTypeables()...)
|
||||
}
|
||||
// role module cannot self register itself!
|
||||
typeables = append(typeables, provider.MustGetTypeables()...)
|
||||
|
||||
resources := make([]*authtypes.Resource, 0)
|
||||
for _, typeable := range typeables {
|
||||
@@ -259,7 +257,7 @@ func (provider *provider) Delete(ctx context.Context, orgID valuer.UUID, id valu
|
||||
}
|
||||
|
||||
role := roletypes.NewRoleFromStorableRole(storableRole)
|
||||
err = role.CanEditDelete()
|
||||
err = role.ErrIfManaged()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
@@ -27,12 +27,12 @@ type APIHandlerOptions struct {
|
||||
RulesManager *rules.Manager
|
||||
UsageManager *usage.Manager
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
GlobalConfig global.Config
|
||||
Logger *slog.Logger // this is present in Signoz.Instrumentation but adding for quick access
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -46,13 +46,13 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz, config signoz.
|
||||
Reader: opts.DataConnector,
|
||||
RuleManager: opts.RulesManager,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
||||
Signoz: signoz,
|
||||
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
||||
Logger: opts.Logger,
|
||||
}, config)
|
||||
|
||||
if err != nil {
|
||||
@@ -101,12 +101,14 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
|
||||
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||
|
||||
router.HandleFunc(
|
||||
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
|
||||
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -14,14 +13,20 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// TODO: move this file with other cloud integration related code
|
||||
type CloudIntegrationConnectionParamsResponse struct {
|
||||
IngestionUrl string `json:"ingestion_url,omitempty"`
|
||||
IngestionKey string `json:"ingestion_key,omitempty"`
|
||||
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
|
||||
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
@@ -36,21 +41,23 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
if cloudProvider != "aws" {
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf(
|
||||
"cloud provider not supported: %s", cloudProvider,
|
||||
)), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apiKey, err := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't provision PAT for cloud integration:",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result := integrationtypes.GettableCloudIntegrationConnectionParams{
|
||||
result := CloudIntegrationConnectionParamsResponse{
|
||||
SigNozAPIKey: apiKey,
|
||||
}
|
||||
|
||||
@@ -64,17 +71,16 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
// Return the API Key (PAT) even if the rest of the params can not be deduced.
|
||||
// Params not returned from here will be requested from the user via form inputs.
|
||||
// This enables gracefully degraded but working experience even for non-cloud deployments.
|
||||
ah.opts.Logger.InfoContext(
|
||||
r.Context(),
|
||||
"ingestion params and signoz api url can not be deduced since no license was found",
|
||||
)
|
||||
render.Success(w, http.StatusOK, result)
|
||||
zap.L().Info("ingestion params and signoz api url can not be deduced since no license was found")
|
||||
ah.Respond(w, result)
|
||||
return
|
||||
}
|
||||
|
||||
signozApiUrl, err := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
signozApiUrl, apiErr := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't deduce ingestion url and signoz api url",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -83,41 +89,48 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
|
||||
gatewayUrl := ah.opts.GatewayUrl
|
||||
if len(gatewayUrl) > 0 {
|
||||
ingestionKeyString, err := ah.getOrCreateCloudProviderIngestionKey(
|
||||
|
||||
ingestionKey, apiErr := getOrCreateCloudProviderIngestionKey(
|
||||
r.Context(), gatewayUrl, license.Key, cloudProvider,
|
||||
)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't get or create ingestion key",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result.IngestionKey = ingestionKeyString
|
||||
result.IngestionKey = ingestionKey
|
||||
|
||||
} else {
|
||||
ah.opts.Logger.InfoContext(
|
||||
r.Context(),
|
||||
"ingestion key can't be deduced since no gateway url has been configured",
|
||||
)
|
||||
zap.L().Info("ingestion key can't be deduced since no gateway url has been configured")
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, result)
|
||||
ah.Respond(w, result)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider valuer.String) (string, error) {
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider string) (
|
||||
string, *basemodel.ApiError,
|
||||
) {
|
||||
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
|
||||
|
||||
integrationUser, err := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
integrationUser, apiErr := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return "", apiErr
|
||||
}
|
||||
|
||||
orgIdUUID, err := valuer.NewUUID(orgId)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't parse orgId: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't list PATs: %w", err,
|
||||
))
|
||||
}
|
||||
for _, p := range allPats {
|
||||
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
|
||||
@@ -125,10 +138,9 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
}
|
||||
}
|
||||
|
||||
ah.opts.Logger.InfoContext(
|
||||
ctx,
|
||||
zap.L().Info(
|
||||
"no PAT found for cloud integration, creating a new one",
|
||||
slog.String("cloudProvider", cloudProvider.String()),
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newPAT, err := types.NewStorableAPIKey(
|
||||
@@ -138,48 +150,68 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
))
|
||||
}
|
||||
return newPAT.Token, nil
|
||||
}
|
||||
|
||||
// TODO: move this function out of handler and use proper module structure
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(ctx context.Context, orgId string, cloudProvider valuer.String) (*types.User, error) {
|
||||
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider.String())
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
|
||||
|
||||
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||
}
|
||||
|
||||
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
|
||||
|
||||
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
return cloudIntegrationUser, nil
|
||||
}
|
||||
|
||||
// TODO: move this function out of handler and use proper module structure
|
||||
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (string, error) {
|
||||
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
|
||||
if err != nil {
|
||||
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't query for deployment info: error")
|
||||
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||
string, *basemodel.ApiError,
|
||||
) {
|
||||
// TODO: remove this struct from here
|
||||
type deploymentResponse struct {
|
||||
Name string `json:"name"`
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
}
|
||||
|
||||
resp := new(integrationtypes.GettableDeployment)
|
||||
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't query for deployment info: error: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
resp := new(deploymentResponse)
|
||||
|
||||
err = json.Unmarshal(respBytes, resp)
|
||||
if err != nil {
|
||||
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal deployment info response")
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't unmarshal deployment info response: error: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
regionDns := resp.ClusterInfo.Region.DNS
|
||||
@@ -187,10 +219,9 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
|
||||
|
||||
if len(regionDns) < 1 || len(deploymentName) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", errors.NewInternalf(
|
||||
errors.CodeInternal,
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
|
||||
@@ -198,85 +229,102 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
|
||||
return signozApiUrl, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrCreateCloudProviderIngestionKey(
|
||||
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider valuer.String,
|
||||
) (string, error) {
|
||||
type ingestionKey struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
// other attributes from gateway response not included here since they are not being used.
|
||||
}
|
||||
|
||||
type ingestionKeysSearchResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data []ingestionKey `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type createIngestionKeyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data ingestionKey `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func getOrCreateCloudProviderIngestionKey(
|
||||
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider string,
|
||||
) (string, *basemodel.ApiError) {
|
||||
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
|
||||
// see if the key already exists
|
||||
searchResult, err := requestGateway[integrationtypes.GettableIngestionKeysSearch](
|
||||
searchResult, apiErr := requestGateway[ingestionKeysSearchResponse](
|
||||
ctx,
|
||||
gatewayUrl,
|
||||
licenseKey,
|
||||
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
|
||||
nil,
|
||||
ah.opts.Logger,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
if apiErr != nil {
|
||||
return "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't search for cloudprovider ingestion key",
|
||||
)
|
||||
}
|
||||
|
||||
if searchResult.Status != "success" {
|
||||
return "", errors.NewInternalf(
|
||||
errors.CodeInternal,
|
||||
"couldn't search for cloud provider ingestion key: status: %s, error: %s",
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't search for cloudprovider ingestion key: status: %s, error: %s",
|
||||
searchResult.Status, searchResult.Error,
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
for _, k := range searchResult.Data {
|
||||
if k.Name != cloudProviderKeyName {
|
||||
continue
|
||||
}
|
||||
if k.Name == cloudProviderKeyName {
|
||||
if len(k.Value) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"ingestion keys search response not as expected",
|
||||
))
|
||||
}
|
||||
|
||||
if len(k.Value) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", errors.NewInternalf(errors.CodeInternal, "ingestion keys search response not as expected")
|
||||
return k.Value, nil
|
||||
}
|
||||
|
||||
return k.Value, nil
|
||||
}
|
||||
|
||||
ah.opts.Logger.InfoContext(
|
||||
ctx,
|
||||
zap.L().Info(
|
||||
"no existing ingestion key found for cloud integration, creating a new one",
|
||||
slog.String("cloudProvider", cloudProvider.String()),
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
createKeyResult, err := requestGateway[integrationtypes.GettableCreateIngestionKey](
|
||||
createKeyResult, apiErr := requestGateway[createIngestionKeyResponse](
|
||||
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
|
||||
map[string]any{
|
||||
"name": cloudProviderKeyName,
|
||||
"tags": []string{"integration", cloudProvider.String()},
|
||||
"tags": []string{"integration", cloudProvider},
|
||||
},
|
||||
ah.opts.Logger,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if apiErr != nil {
|
||||
return "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't create cloudprovider ingestion key",
|
||||
)
|
||||
}
|
||||
|
||||
if createKeyResult.Status != "success" {
|
||||
return "", errors.NewInternalf(
|
||||
errors.CodeInternal,
|
||||
"couldn't create cloud provider ingestion key: status: %s, error: %s",
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloudprovider ingestion key: status: %s, error: %s",
|
||||
createKeyResult.Status, createKeyResult.Error,
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
ingestionKeyString := createKeyResult.Data.Value
|
||||
if len(ingestionKeyString) < 1 {
|
||||
ingestionKey := createKeyResult.Data.Value
|
||||
if len(ingestionKey) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", errors.NewInternalf(errors.CodeInternal,
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"ingestion key creation response not as expected",
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
return ingestionKeyString, nil
|
||||
return ingestionKey, nil
|
||||
}
|
||||
|
||||
func requestGateway[ResponseType any](
|
||||
ctx context.Context, gatewayUrl, licenseKey, path string, payload any, logger *slog.Logger,
|
||||
) (*ResponseType, error) {
|
||||
ctx context.Context, gatewayUrl string, licenseKey string, path string, payload any,
|
||||
) (*ResponseType, *basemodel.ApiError) {
|
||||
|
||||
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
|
||||
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
|
||||
@@ -287,12 +335,13 @@ func requestGateway[ResponseType any](
|
||||
"X-Consumer-Groups": "ns:default",
|
||||
}
|
||||
|
||||
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload, logger)
|
||||
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload)
|
||||
}
|
||||
|
||||
func requestAndParseResponse[ResponseType any](
|
||||
ctx context.Context, url string, headers map[string]string, payload any, logger *slog.Logger,
|
||||
) (*ResponseType, error) {
|
||||
ctx context.Context, url string, headers map[string]string, payload any,
|
||||
) (*ResponseType, *basemodel.ApiError) {
|
||||
|
||||
reqMethod := http.MethodGet
|
||||
var reqBody io.Reader
|
||||
if payload != nil {
|
||||
@@ -300,14 +349,18 @@ func requestAndParseResponse[ResponseType any](
|
||||
|
||||
bodyJson, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal payload")
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't serialize request payload to JSON: %w", err,
|
||||
))
|
||||
}
|
||||
reqBody = bytes.NewBuffer(bodyJson)
|
||||
reqBody = bytes.NewBuffer([]byte(bodyJson))
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't create req")
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't prepare request: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
for k, v := range headers {
|
||||
@@ -320,26 +373,23 @@ func requestAndParseResponse[ResponseType any](
|
||||
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't make req")
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't make request: %w", err))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = response.Body.Close()
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "couldn't close response body", "error", err)
|
||||
}
|
||||
}()
|
||||
defer response.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read response body")
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't read response: %w", err))
|
||||
}
|
||||
|
||||
var resp ResponseType
|
||||
|
||||
err = json.Unmarshal(respBody, &resp)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal response body")
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't unmarshal gateway response into %T", resp,
|
||||
))
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||
@@ -120,6 +121,13 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create cloud provider integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
signoz.SQLStore,
|
||||
@@ -153,11 +161,11 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
RulesManager: rm,
|
||||
UsageManager: usageManager,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
FluxInterval: config.Querier.FluxInterval,
|
||||
GatewayUrl: config.Gateway.URL.String(),
|
||||
GlobalConfig: config.Global,
|
||||
Logger: signoz.Instrumentation.Logger(),
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts, signoz, config)
|
||||
|
||||
@@ -308,3 +308,15 @@ export const PublicDashboardPage = Loadable(
|
||||
/* webpackChunkName: "Public Dashboard Page" */ 'pages/PublicDashboard'
|
||||
),
|
||||
);
|
||||
|
||||
export const AlertTypeSelectionPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "Alert Type Selection Page" */ 'pages/AlertTypeSelection'
|
||||
),
|
||||
);
|
||||
|
||||
export const MeterExplorerPage = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "Meter Explorer Page" */ 'pages/MeterExplorer'),
|
||||
);
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import { RouteProps } from 'react-router-dom';
|
||||
import ROUTES from 'constants/routes';
|
||||
import AlertTypeSelectionPage from 'pages/AlertTypeSelection';
|
||||
import MessagingQueues from 'pages/MessagingQueues';
|
||||
import MeterExplorer from 'pages/MeterExplorer';
|
||||
|
||||
import {
|
||||
AlertHistory,
|
||||
AlertOverview,
|
||||
AlertTypeSelectionPage,
|
||||
AllAlertChannels,
|
||||
AllErrors,
|
||||
ApiMonitoring,
|
||||
@@ -29,6 +27,8 @@ import {
|
||||
LogsExplorer,
|
||||
LogsIndexToFields,
|
||||
LogsSaveViews,
|
||||
MessagingQueuesMainPage,
|
||||
MeterExplorerPage,
|
||||
MetricsExplorer,
|
||||
OldLogsExplorer,
|
||||
Onboarding,
|
||||
@@ -399,28 +399,28 @@ const routes: AppRoutes[] = [
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_KAFKA,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_KAFKA',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_CELERY_TASK,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_CELERY_TASK',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_OVERVIEW,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_OVERVIEW',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_KAFKA_DETAIL,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_KAFKA_DETAIL',
|
||||
isPrivate: true,
|
||||
},
|
||||
@@ -463,21 +463,21 @@ const routes: AppRoutes[] = [
|
||||
{
|
||||
path: ROUTES.METER,
|
||||
exact: true,
|
||||
component: MeterExplorer,
|
||||
component: MeterExplorerPage,
|
||||
key: 'METER',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.METER_EXPLORER,
|
||||
exact: true,
|
||||
component: MeterExplorer,
|
||||
component: MeterExplorerPage,
|
||||
key: 'METER_EXPLORER',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.METER_EXPLORER_VIEWS,
|
||||
exact: true,
|
||||
component: MeterExplorer,
|
||||
component: MeterExplorerPage,
|
||||
key: 'METER_EXPLORER_VIEWS',
|
||||
isPrivate: true,
|
||||
},
|
||||
|
||||
@@ -5,17 +5,24 @@
|
||||
* SigNoz
|
||||
*/
|
||||
import type {
|
||||
InvalidateOptions,
|
||||
MutationFunction,
|
||||
QueryClient,
|
||||
QueryFunction,
|
||||
QueryKey,
|
||||
UseMutationOptions,
|
||||
UseMutationResult,
|
||||
UseQueryOptions,
|
||||
UseQueryResult,
|
||||
} from 'react-query';
|
||||
import { useMutation } from 'react-query';
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type {
|
||||
AuthtypesTransactionDTO,
|
||||
AuthzCheck200,
|
||||
AuthzResources200,
|
||||
RenderErrorResponseDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
|
||||
@@ -106,3 +113,88 @@ export const useAuthzCheck = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Gets all the available resources
|
||||
* @summary Get resources
|
||||
*/
|
||||
export const authzResources = (signal?: AbortSignal) => {
|
||||
return GeneratedAPIInstance<AuthzResources200>({
|
||||
url: `/api/v1/authz/resources`,
|
||||
method: 'GET',
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getAuthzResourcesQueryKey = () => {
|
||||
return [`/api/v1/authz/resources`] as const;
|
||||
};
|
||||
|
||||
export const getAuthzResourcesQueryOptions = <
|
||||
TData = Awaited<ReturnType<typeof authzResources>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof authzResources>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
}) => {
|
||||
const { query: queryOptions } = options ?? {};
|
||||
|
||||
const queryKey = queryOptions?.queryKey ?? getAuthzResourcesQueryKey();
|
||||
|
||||
const queryFn: QueryFunction<Awaited<ReturnType<typeof authzResources>>> = ({
|
||||
signal,
|
||||
}) => authzResources(signal);
|
||||
|
||||
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
|
||||
Awaited<ReturnType<typeof authzResources>>,
|
||||
TError,
|
||||
TData
|
||||
> & { queryKey: QueryKey };
|
||||
};
|
||||
|
||||
export type AuthzResourcesQueryResult = NonNullable<
|
||||
Awaited<ReturnType<typeof authzResources>>
|
||||
>;
|
||||
export type AuthzResourcesQueryError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Get resources
|
||||
*/
|
||||
|
||||
export function useAuthzResources<
|
||||
TData = Awaited<ReturnType<typeof authzResources>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof authzResources>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
}): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
|
||||
const queryOptions = getAuthzResourcesQueryOptions(options);
|
||||
|
||||
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
|
||||
queryKey: QueryKey;
|
||||
};
|
||||
|
||||
query.queryKey = queryOptions.queryKey;
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
/**
|
||||
* @summary Get resources
|
||||
*/
|
||||
export const invalidateAuthzResources = async (
|
||||
queryClient: QueryClient,
|
||||
options?: InvalidateOptions,
|
||||
): Promise<QueryClient> => {
|
||||
await queryClient.invalidateQueries(
|
||||
{ queryKey: getAuthzResourcesQueryKey() },
|
||||
options,
|
||||
);
|
||||
|
||||
return queryClient;
|
||||
};
|
||||
|
||||
@@ -20,7 +20,7 @@ import { useMutation, useQuery } from 'react-query';
|
||||
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type {
|
||||
CreateIngestionKey200,
|
||||
CreateIngestionKey201,
|
||||
CreateIngestionKeyLimit201,
|
||||
CreateIngestionKeyLimitPathParameters,
|
||||
DeleteIngestionKeyLimitPathParameters,
|
||||
@@ -151,7 +151,7 @@ export const createIngestionKey = (
|
||||
gatewaytypesPostableIngestionKeyDTO: BodyType<GatewaytypesPostableIngestionKeyDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<CreateIngestionKey200>({
|
||||
return GeneratedAPIInstance<CreateIngestionKey201>({
|
||||
url: `/api/v2/gateway/ingestion_keys`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
|
||||
@@ -20,18 +20,17 @@ import { useMutation, useQuery } from 'react-query';
|
||||
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type {
|
||||
AuthtypesPatchableObjectsDTO,
|
||||
CreateRole201,
|
||||
DeleteRolePathParameters,
|
||||
GetObjects200,
|
||||
GetObjectsPathParameters,
|
||||
GetResources200,
|
||||
GetRole200,
|
||||
GetRolePathParameters,
|
||||
ListRoles200,
|
||||
PatchObjectsPathParameters,
|
||||
PatchRolePathParameters,
|
||||
RenderErrorResponseDTO,
|
||||
RoletypesPatchableObjectsDTO,
|
||||
RoletypesPatchableRoleDTO,
|
||||
RoletypesPostableRoleDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
@@ -575,13 +574,13 @@ export const invalidateGetObjects = async (
|
||||
*/
|
||||
export const patchObjects = (
|
||||
{ id, relation }: PatchObjectsPathParameters,
|
||||
roletypesPatchableObjectsDTO: BodyType<RoletypesPatchableObjectsDTO>,
|
||||
authtypesPatchableObjectsDTO: BodyType<AuthtypesPatchableObjectsDTO>,
|
||||
) => {
|
||||
return GeneratedAPIInstance<string>({
|
||||
url: `/api/v1/roles/${id}/relation/${relation}/objects`,
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: roletypesPatchableObjectsDTO,
|
||||
data: authtypesPatchableObjectsDTO,
|
||||
});
|
||||
};
|
||||
|
||||
@@ -594,7 +593,7 @@ export const getPatchObjectsMutationOptions = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchObjectsPathParameters;
|
||||
data: BodyType<RoletypesPatchableObjectsDTO>;
|
||||
data: BodyType<AuthtypesPatchableObjectsDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
@@ -603,7 +602,7 @@ export const getPatchObjectsMutationOptions = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchObjectsPathParameters;
|
||||
data: BodyType<RoletypesPatchableObjectsDTO>;
|
||||
data: BodyType<AuthtypesPatchableObjectsDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
@@ -620,7 +619,7 @@ export const getPatchObjectsMutationOptions = <
|
||||
Awaited<ReturnType<typeof patchObjects>>,
|
||||
{
|
||||
pathParams: PatchObjectsPathParameters;
|
||||
data: BodyType<RoletypesPatchableObjectsDTO>;
|
||||
data: BodyType<AuthtypesPatchableObjectsDTO>;
|
||||
}
|
||||
> = (props) => {
|
||||
const { pathParams, data } = props ?? {};
|
||||
@@ -634,7 +633,7 @@ export const getPatchObjectsMutationOptions = <
|
||||
export type PatchObjectsMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof patchObjects>>
|
||||
>;
|
||||
export type PatchObjectsMutationBody = BodyType<RoletypesPatchableObjectsDTO>;
|
||||
export type PatchObjectsMutationBody = BodyType<AuthtypesPatchableObjectsDTO>;
|
||||
export type PatchObjectsMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
@@ -649,7 +648,7 @@ export const usePatchObjects = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchObjectsPathParameters;
|
||||
data: BodyType<RoletypesPatchableObjectsDTO>;
|
||||
data: BodyType<AuthtypesPatchableObjectsDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
@@ -658,7 +657,7 @@ export const usePatchObjects = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchObjectsPathParameters;
|
||||
data: BodyType<RoletypesPatchableObjectsDTO>;
|
||||
data: BodyType<AuthtypesPatchableObjectsDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
@@ -666,88 +665,3 @@ export const usePatchObjects = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Gets all the available resources for role assignment
|
||||
* @summary Get resources
|
||||
*/
|
||||
export const getResources = (signal?: AbortSignal) => {
|
||||
return GeneratedAPIInstance<GetResources200>({
|
||||
url: `/api/v1/roles/resources`,
|
||||
method: 'GET',
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getGetResourcesQueryKey = () => {
|
||||
return [`/api/v1/roles/resources`] as const;
|
||||
};
|
||||
|
||||
export const getGetResourcesQueryOptions = <
|
||||
TData = Awaited<ReturnType<typeof getResources>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof getResources>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
}) => {
|
||||
const { query: queryOptions } = options ?? {};
|
||||
|
||||
const queryKey = queryOptions?.queryKey ?? getGetResourcesQueryKey();
|
||||
|
||||
const queryFn: QueryFunction<Awaited<ReturnType<typeof getResources>>> = ({
|
||||
signal,
|
||||
}) => getResources(signal);
|
||||
|
||||
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
|
||||
Awaited<ReturnType<typeof getResources>>,
|
||||
TError,
|
||||
TData
|
||||
> & { queryKey: QueryKey };
|
||||
};
|
||||
|
||||
export type GetResourcesQueryResult = NonNullable<
|
||||
Awaited<ReturnType<typeof getResources>>
|
||||
>;
|
||||
export type GetResourcesQueryError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Get resources
|
||||
*/
|
||||
|
||||
export function useGetResources<
|
||||
TData = Awaited<ReturnType<typeof getResources>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof getResources>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
}): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
|
||||
const queryOptions = getGetResourcesQueryOptions(options);
|
||||
|
||||
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
|
||||
queryKey: QueryKey;
|
||||
};
|
||||
|
||||
query.queryKey = queryOptions.queryKey;
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
/**
|
||||
* @summary Get resources
|
||||
*/
|
||||
export const invalidateGetResources = async (
|
||||
queryClient: QueryClient,
|
||||
options?: InvalidateOptions,
|
||||
): Promise<QueryClient> => {
|
||||
await queryClient.invalidateQueries(
|
||||
{ queryKey: getGetResourcesQueryKey() },
|
||||
options,
|
||||
);
|
||||
|
||||
return queryClient;
|
||||
};
|
||||
|
||||
@@ -81,7 +81,7 @@ export interface AuthtypesGettableAuthDomainDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -108,6 +108,33 @@ export interface AuthtypesGettableAuthDomainDTO {
|
||||
updatedAt?: Date;
|
||||
}
|
||||
|
||||
export interface AuthtypesGettableObjectsDTO {
|
||||
resource: AuthtypesResourceDTO;
|
||||
/**
|
||||
* @type array
|
||||
*/
|
||||
selectors: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type AuthtypesGettableResourcesDTORelations = {
|
||||
[key: string]: string[];
|
||||
} | null;
|
||||
|
||||
export interface AuthtypesGettableResourcesDTO {
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
relations: AuthtypesGettableResourcesDTORelations;
|
||||
/**
|
||||
* @type array
|
||||
*/
|
||||
resources: AuthtypesResourceDTO[];
|
||||
}
|
||||
|
||||
export interface AuthtypesGettableTokenDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -182,10 +209,6 @@ export interface AuthtypesGoogleConfigDTO {
|
||||
serviceAccountJson?: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesNameDTO {
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface AuthtypesOIDCConfigDTO {
|
||||
claimMapping?: AuthtypesAttributeMappingDTO;
|
||||
/**
|
||||
@@ -216,7 +239,10 @@ export interface AuthtypesOIDCConfigDTO {
|
||||
|
||||
export interface AuthtypesObjectDTO {
|
||||
resource: AuthtypesResourceDTO;
|
||||
selector: AuthtypesSelectorDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
selector: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesOrgSessionContextDTO {
|
||||
@@ -239,6 +265,19 @@ export interface AuthtypesPasswordAuthNSupportDTO {
|
||||
provider?: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesPatchableObjectsDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
additions: AuthtypesGettableObjectsDTO[] | null;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
deletions: AuthtypesGettableObjectsDTO[] | null;
|
||||
}
|
||||
|
||||
export interface AuthtypesPostableAuthDomainDTO {
|
||||
config?: AuthtypesAuthDomainConfigDTO;
|
||||
/**
|
||||
@@ -270,7 +309,10 @@ export interface AuthtypesPostableRotateTokenDTO {
|
||||
}
|
||||
|
||||
export interface AuthtypesResourceDTO {
|
||||
name: AuthtypesNameDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -320,10 +362,6 @@ export interface AuthtypesSamlConfigDTO {
|
||||
samlIdp?: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesSelectorDTO {
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface AuthtypesSessionContextDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
@@ -337,10 +375,6 @@ export interface AuthtypesSessionContextDTO {
|
||||
}
|
||||
|
||||
export interface AuthtypesTransactionDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
object: AuthtypesObjectDTO;
|
||||
/**
|
||||
* @type string
|
||||
@@ -623,14 +657,14 @@ export interface GatewaytypesLimitMetricValueDTO {
|
||||
export interface GatewaytypesLimitValueDTO {
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
* @nullable true
|
||||
*/
|
||||
count?: number;
|
||||
count?: number | null;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
* @nullable true
|
||||
*/
|
||||
size?: number;
|
||||
size?: number | null;
|
||||
}
|
||||
|
||||
export interface GatewaytypesPaginationDTO {
|
||||
@@ -1992,39 +2026,6 @@ export interface RenderErrorResponseDTO {
|
||||
status: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type RoletypesGettableResourcesDTORelations = {
|
||||
[key: string]: string[];
|
||||
} | null;
|
||||
|
||||
export interface RoletypesGettableResourcesDTO {
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
relations: RoletypesGettableResourcesDTORelations;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
resources: AuthtypesResourceDTO[] | null;
|
||||
}
|
||||
|
||||
export interface RoletypesPatchableObjectsDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
additions: AuthtypesObjectDTO[] | null;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
deletions: AuthtypesObjectDTO[] | null;
|
||||
}
|
||||
|
||||
export interface RoletypesPatchableRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -2056,7 +2057,7 @@ export interface RoletypesRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2197,7 +2198,7 @@ export interface TypesGettableAPIKeyDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
@@ -2250,7 +2251,7 @@ export interface TypesIdentifiableDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
}
|
||||
|
||||
export interface TypesInviteDTO {
|
||||
@@ -2266,7 +2267,7 @@ export interface TypesInviteDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2311,7 +2312,7 @@ export interface TypesOrganizationDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type integer
|
||||
* @minimum 0
|
||||
@@ -2417,7 +2418,7 @@ export interface TypesResetPasswordTokenDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2441,7 +2442,7 @@ export interface TypesStorableAPIKeyDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2490,7 +2491,7 @@ export interface TypesUserDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id?: string;
|
||||
id: string;
|
||||
/**
|
||||
* @type boolean
|
||||
*/
|
||||
@@ -2606,6 +2607,14 @@ export type AuthzCheck200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type AuthzResources200 = {
|
||||
data: AuthtypesGettableResourcesDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ChangePasswordPathParameters = {
|
||||
id: string;
|
||||
};
|
||||
@@ -3017,7 +3026,7 @@ export type GetObjects200 = {
|
||||
/**
|
||||
* @type array
|
||||
*/
|
||||
data: AuthtypesObjectDTO[];
|
||||
data: AuthtypesGettableObjectsDTO[];
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3028,14 +3037,6 @@ export type PatchObjectsPathParameters = {
|
||||
id: string;
|
||||
relation: string;
|
||||
};
|
||||
export type GetResources200 = {
|
||||
data: RoletypesGettableResourcesDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListUsers200 = {
|
||||
/**
|
||||
* @type array
|
||||
@@ -3137,7 +3138,7 @@ export type GetIngestionKeys200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type CreateIngestionKey200 = {
|
||||
export type CreateIngestionKey201 = {
|
||||
data: GatewaytypesGettableCreatedIngestionKeyDTO;
|
||||
/**
|
||||
* @type string
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { UniversalYAxisUnit } from '../types';
|
||||
import { YAxisCategoryNames } from '../constants';
|
||||
import { UniversalYAxisUnit, YAxisCategory } from '../types';
|
||||
import {
|
||||
getUniversalNameFromMetricUnit,
|
||||
mapMetricUnitToUniversalUnit,
|
||||
@@ -41,29 +42,29 @@ describe('YAxisUnitSelector utils', () => {
|
||||
|
||||
describe('mergeCategories', () => {
|
||||
it('merges categories correctly', () => {
|
||||
const categories1 = [
|
||||
const categories1: YAxisCategory[] = [
|
||||
{
|
||||
name: 'Data',
|
||||
name: YAxisCategoryNames.Data,
|
||||
units: [
|
||||
{ name: 'bytes', id: UniversalYAxisUnit.BYTES },
|
||||
{ name: 'kilobytes', id: UniversalYAxisUnit.KILOBYTES },
|
||||
],
|
||||
},
|
||||
];
|
||||
const categories2 = [
|
||||
const categories2: YAxisCategory[] = [
|
||||
{
|
||||
name: 'Data',
|
||||
name: YAxisCategoryNames.Data,
|
||||
units: [{ name: 'bits', id: UniversalYAxisUnit.BITS }],
|
||||
},
|
||||
{
|
||||
name: 'Time',
|
||||
name: YAxisCategoryNames.Time,
|
||||
units: [{ name: 'seconds', id: UniversalYAxisUnit.SECONDS }],
|
||||
},
|
||||
];
|
||||
const mergedCategories = mergeCategories(categories1, categories2);
|
||||
expect(mergedCategories).toEqual([
|
||||
{
|
||||
name: 'Data',
|
||||
name: YAxisCategoryNames.Data,
|
||||
units: [
|
||||
{ name: 'bytes', id: UniversalYAxisUnit.BYTES },
|
||||
{ name: 'kilobytes', id: UniversalYAxisUnit.KILOBYTES },
|
||||
@@ -71,7 +72,7 @@ describe('YAxisUnitSelector utils', () => {
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Time',
|
||||
name: YAxisCategoryNames.Time,
|
||||
units: [{ name: 'seconds', id: UniversalYAxisUnit.SECONDS }],
|
||||
},
|
||||
]);
|
||||
|
||||
@@ -1,5 +1,36 @@
|
||||
import { UnitFamilyConfig, UniversalYAxisUnit, YAxisUnit } from './types';
|
||||
|
||||
export enum YAxisCategoryNames {
|
||||
Time = 'Time',
|
||||
Data = 'Data',
|
||||
DataRate = 'Data Rate',
|
||||
Count = 'Count',
|
||||
Operations = 'Operations',
|
||||
Percentage = 'Percentage',
|
||||
Boolean = 'Boolean',
|
||||
None = 'None',
|
||||
HashRate = 'Hash Rate',
|
||||
Miscellaneous = 'Miscellaneous',
|
||||
Acceleration = 'Acceleration',
|
||||
Angular = 'Angular',
|
||||
Area = 'Area',
|
||||
Flops = 'FLOPs',
|
||||
Concentration = 'Concentration',
|
||||
Currency = 'Currency',
|
||||
Datetime = 'Datetime',
|
||||
PowerElectrical = 'Power/Electrical',
|
||||
Flow = 'Flow',
|
||||
Force = 'Force',
|
||||
Mass = 'Mass',
|
||||
Length = 'Length',
|
||||
Pressure = 'Pressure',
|
||||
Radiation = 'Radiation',
|
||||
RotationSpeed = 'Rotation Speed',
|
||||
Temperature = 'Temperature',
|
||||
Velocity = 'Velocity',
|
||||
Volume = 'Volume',
|
||||
}
|
||||
|
||||
// Mapping of universal y-axis units to their AWS, UCUM, and OpenMetrics equivalents (if available)
|
||||
export const UniversalYAxisUnitMappings: Partial<
|
||||
Record<UniversalYAxisUnit, Set<YAxisUnit> | null>
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import { Y_AXIS_UNIT_NAMES } from './constants';
|
||||
import { YAxisCategoryNames } from './constants';
|
||||
import { UniversalYAxisUnit, YAxisCategory } from './types';
|
||||
|
||||
// Base categories for the universal y-axis units
|
||||
export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
{
|
||||
name: 'Time',
|
||||
name: YAxisCategoryNames.Time,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.SECONDS],
|
||||
@@ -37,7 +38,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Data',
|
||||
name: YAxisCategoryNames.Data,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.BYTES],
|
||||
@@ -154,7 +155,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Data Rate',
|
||||
name: YAxisCategoryNames.DataRate,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.BYTES_SECOND],
|
||||
@@ -295,7 +296,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Count',
|
||||
name: YAxisCategoryNames.Count,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.COUNT],
|
||||
@@ -312,7 +313,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Operations',
|
||||
name: YAxisCategoryNames.Operations,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.OPS_SECOND],
|
||||
@@ -353,7 +354,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Percentage',
|
||||
name: YAxisCategoryNames.Percentage,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.PERCENT],
|
||||
@@ -366,7 +367,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Boolean',
|
||||
name: YAxisCategoryNames.Boolean,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.TRUE_FALSE],
|
||||
@@ -382,7 +383,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
|
||||
export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
{
|
||||
name: 'Time',
|
||||
name: YAxisCategoryNames.Time,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.DURATION_MS],
|
||||
@@ -419,7 +420,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Data Rate',
|
||||
name: YAxisCategoryNames.DataRate,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.DATA_RATE_PACKETS_PER_SECOND],
|
||||
@@ -428,7 +429,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Boolean',
|
||||
name: YAxisCategoryNames.Boolean,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.ON_OFF],
|
||||
@@ -437,7 +438,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'None',
|
||||
name: YAxisCategoryNames.None,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.NONE],
|
||||
@@ -446,7 +447,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Hash Rate',
|
||||
name: YAxisCategoryNames.HashRate,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.HASH_RATE_HASHES_PER_SECOND],
|
||||
@@ -479,7 +480,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Miscellaneous',
|
||||
name: YAxisCategoryNames.Miscellaneous,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.MISC_STRING],
|
||||
@@ -520,7 +521,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Acceleration',
|
||||
name: YAxisCategoryNames.Acceleration,
|
||||
units: [
|
||||
{
|
||||
name:
|
||||
@@ -541,7 +542,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Angular',
|
||||
name: YAxisCategoryNames.Angular,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.ANGULAR_DEGREE],
|
||||
@@ -566,7 +567,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Area',
|
||||
name: YAxisCategoryNames.Area,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.AREA_SQUARE_METERS],
|
||||
@@ -583,7 +584,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'FLOPs',
|
||||
name: YAxisCategoryNames.Flops,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.FLOPS_FLOPS],
|
||||
@@ -620,7 +621,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Concentration',
|
||||
name: YAxisCategoryNames.Concentration,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.CONCENTRATION_PPM],
|
||||
@@ -677,7 +678,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Currency',
|
||||
name: YAxisCategoryNames.Currency,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.CURRENCY_USD],
|
||||
@@ -774,7 +775,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Datetime',
|
||||
name: YAxisCategoryNames.Datetime,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.DATETIME_ISO],
|
||||
@@ -811,7 +812,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Power/Electrical',
|
||||
name: YAxisCategoryNames.PowerElectrical,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.POWER_WATT],
|
||||
@@ -968,7 +969,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Flow',
|
||||
name: YAxisCategoryNames.Flow,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.FLOW_GALLONS_PER_MINUTE],
|
||||
@@ -1005,7 +1006,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Force',
|
||||
name: YAxisCategoryNames.Force,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.FORCE_NEWTON_METERS],
|
||||
@@ -1026,7 +1027,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Mass',
|
||||
name: YAxisCategoryNames.Mass,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.MASS_MILLIGRAM],
|
||||
@@ -1051,7 +1052,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Length',
|
||||
name: YAxisCategoryNames.Length,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.LENGTH_MILLIMETER],
|
||||
@@ -1080,7 +1081,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Pressure',
|
||||
name: YAxisCategoryNames.Pressure,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.PRESSURE_MILLIBAR],
|
||||
@@ -1117,7 +1118,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Radiation',
|
||||
name: YAxisCategoryNames.Radiation,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.RADIATION_BECQUEREL],
|
||||
@@ -1174,7 +1175,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Rotation Speed',
|
||||
name: YAxisCategoryNames.RotationSpeed,
|
||||
units: [
|
||||
{
|
||||
name:
|
||||
@@ -1200,7 +1201,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Temperature',
|
||||
name: YAxisCategoryNames.Temperature,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.TEMPERATURE_CELSIUS],
|
||||
@@ -1217,7 +1218,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Velocity',
|
||||
name: YAxisCategoryNames.Velocity,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.VELOCITY_METERS_PER_SECOND],
|
||||
@@ -1238,7 +1239,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'Volume',
|
||||
name: YAxisCategoryNames.Volume,
|
||||
units: [
|
||||
{
|
||||
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.VOLUME_MILLILITER],
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { YAxisCategoryNames } from './constants';
|
||||
|
||||
export interface YAxisUnitSelectorProps {
|
||||
value: string | undefined;
|
||||
onChange: (value: UniversalYAxisUnit) => void;
|
||||
@@ -669,7 +671,7 @@ export interface UnitFamilyConfig {
|
||||
}
|
||||
|
||||
export interface YAxisCategory {
|
||||
name: string;
|
||||
name: YAxisCategoryNames;
|
||||
units: {
|
||||
name: string;
|
||||
id: UniversalYAxisUnit;
|
||||
|
||||
@@ -172,23 +172,51 @@ function ExplorerOptions({
|
||||
const { user } = useAppContext();
|
||||
|
||||
const handleConditionalQueryModification = useCallback(
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
(defaultQuery: Query | null): string => {
|
||||
const queryToUse = defaultQuery || query;
|
||||
if (!queryToUse) {
|
||||
throw new Error('No query provided');
|
||||
}
|
||||
if (
|
||||
queryToUse?.builder?.queryData?.[0]?.aggregateOperator !==
|
||||
StringOperators.NOOP
|
||||
StringOperators.NOOP &&
|
||||
sourcepage !== DataSource.LOGS
|
||||
) {
|
||||
return JSON.stringify(queryToUse);
|
||||
}
|
||||
|
||||
// Modify aggregateOperator to count, as noop is not supported in alerts
|
||||
// Convert NOOP to COUNT for alerts and strip orderBy for logs
|
||||
const modifiedQuery = cloneDeep(queryToUse);
|
||||
if (modifiedQuery && modifiedQuery.builder?.queryData) {
|
||||
modifiedQuery.builder.queryData = modifiedQuery.builder.queryData.map(
|
||||
(item) => {
|
||||
const updatedItem = { ...item };
|
||||
|
||||
modifiedQuery.builder.queryData[0].aggregateOperator = StringOperators.COUNT;
|
||||
if (updatedItem.aggregateOperator === StringOperators.NOOP) {
|
||||
updatedItem.aggregateOperator = StringOperators.COUNT;
|
||||
}
|
||||
|
||||
return JSON.stringify(modifiedQuery);
|
||||
// Alerts do not support order by on logs explorer queries
|
||||
if (sourcepage === DataSource.LOGS && panelType === PANEL_TYPES.LIST) {
|
||||
updatedItem.orderBy = [];
|
||||
}
|
||||
|
||||
return updatedItem;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.stringify(modifiedQuery);
|
||||
} catch (err) {
|
||||
throw new Error(
|
||||
'Failed to stringify modified query: ' +
|
||||
(err instanceof Error ? err.message : String(err)),
|
||||
);
|
||||
}
|
||||
},
|
||||
[query],
|
||||
[panelType, query, sourcepage],
|
||||
);
|
||||
|
||||
const onCreateAlertsHandler = useCallback(
|
||||
@@ -757,9 +785,9 @@ function ExplorerOptions({
|
||||
);
|
||||
}, [
|
||||
disabled,
|
||||
query,
|
||||
isOneChartPerQuery,
|
||||
onCreateAlertsHandler,
|
||||
query,
|
||||
splitedQueries,
|
||||
]);
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useHistory } from 'react-router-dom';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { MOCK_QUERY } from 'container/QueryTable/Drilldown/__tests__/mockTableData';
|
||||
import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
|
||||
@@ -15,6 +16,11 @@ import { getExplorerToolBarVisibility } from '../utils';
|
||||
// Mock dependencies
|
||||
jest.mock('hooks/dashboard/useUpdateDashboard');
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useHistory: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('../utils', () => ({
|
||||
getExplorerToolBarVisibility: jest.fn(),
|
||||
generateRGBAFromHex: jest.fn(() => 'rgba(0, 0, 0, 0.08)'),
|
||||
@@ -29,6 +35,7 @@ const mockGetExplorerToolBarVisibility = jest.mocked(
|
||||
);
|
||||
|
||||
const mockUseUpdateDashboard = jest.mocked(useUpdateDashboard);
|
||||
const mockUseHistory = jest.mocked(useHistory);
|
||||
|
||||
// Mock data
|
||||
const TEST_QUERY_ID = 'test-query-id';
|
||||
@@ -103,7 +110,6 @@ describe('ExplorerOptionWrapper', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockGetExplorerToolBarVisibility.mockReturnValue(true);
|
||||
|
||||
// Mock useUpdateDashboard to return a mutation object
|
||||
mockUseUpdateDashboard.mockReturnValue(({
|
||||
mutate: jest.fn(),
|
||||
@@ -117,6 +123,28 @@ describe('ExplorerOptionWrapper', () => {
|
||||
} as unknown) as ReturnType<typeof useUpdateDashboard>);
|
||||
});
|
||||
|
||||
it('should navigate to alert creation page when "Create an Alert" is clicked in logs-explorer', async () => {
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
const mockPush = jest.fn();
|
||||
mockUseHistory.mockReturnValue(({
|
||||
push: mockPush,
|
||||
} as unknown) as ReturnType<typeof useHistory>);
|
||||
|
||||
renderExplorerOptionWrapper({ sourcepage: DataSource.LOGS });
|
||||
|
||||
const createAlertButton = screen.getByRole('button', {
|
||||
name: 'Create an Alert',
|
||||
});
|
||||
await user.click(createAlertButton);
|
||||
|
||||
expect(mockPush).toHaveBeenCalledTimes(1);
|
||||
const calledWith = mockPush.mock.calls[0][0] as string;
|
||||
const [path, search = ''] = calledWith.split('?');
|
||||
expect(path).toBe('/alerts/new');
|
||||
const params = new URLSearchParams(search);
|
||||
expect(params.has('compositeQuery')).toBe(true);
|
||||
});
|
||||
|
||||
describe('onExport functionality', () => {
|
||||
it('should call onExport when New Dashboard button is clicked in export modal', async () => {
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
|
||||
@@ -18,8 +18,8 @@ jest.mock('lib/query/createTableColumnsFromQuery', () => ({
|
||||
jest.mock('container/NewWidget/utils', () => ({
|
||||
unitOptions: jest.fn(() => [
|
||||
{ value: 'none', label: 'None' },
|
||||
{ value: 'percent', label: 'Percent' },
|
||||
{ value: 'ms', label: 'Milliseconds' },
|
||||
{ value: '%', label: 'Percent (0 - 100)' },
|
||||
{ value: 'ms', label: 'Milliseconds (ms)' },
|
||||
]),
|
||||
}));
|
||||
|
||||
@@ -39,7 +39,7 @@ const defaultProps = {
|
||||
],
|
||||
thresholdTableOptions: 'cpu_usage',
|
||||
columnUnits: { cpu_usage: 'percent', memory_usage: 'bytes' },
|
||||
yAxisUnit: 'percent',
|
||||
yAxisUnit: '%',
|
||||
moveThreshold: jest.fn(),
|
||||
};
|
||||
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
import { Dispatch, SetStateAction, useEffect, useState } from 'react';
|
||||
import { AutoComplete, Input, Typography } from 'antd';
|
||||
import { find } from 'lodash-es';
|
||||
|
||||
import { flattenedCategories } from './dataFormatCategories';
|
||||
|
||||
const findCategoryById = (
|
||||
searchValue: string,
|
||||
): Record<string, string> | undefined =>
|
||||
find(flattenedCategories, (option) => option.id === searchValue);
|
||||
const findCategoryByName = (
|
||||
searchValue: string,
|
||||
): Record<string, string> | undefined =>
|
||||
find(flattenedCategories, (option) => option.name === searchValue);
|
||||
|
||||
type OnSelectType = Dispatch<SetStateAction<string>> | ((val: string) => void);
|
||||
|
||||
/**
|
||||
* @deprecated Use DashboardYAxisUnitSelectorWrapper instead.
|
||||
*/
|
||||
function YAxisUnitSelector({
|
||||
value,
|
||||
onSelect,
|
||||
fieldLabel,
|
||||
handleClear,
|
||||
}: {
|
||||
value: string;
|
||||
onSelect: OnSelectType;
|
||||
fieldLabel: string;
|
||||
handleClear?: () => void;
|
||||
}): JSX.Element {
|
||||
const [inputValue, setInputValue] = useState('');
|
||||
|
||||
// Sync input value with the actual value prop
|
||||
useEffect(() => {
|
||||
const category = findCategoryById(value);
|
||||
setInputValue(category?.name || '');
|
||||
}, [value]);
|
||||
|
||||
const onSelectHandler = (selectedValue: string): void => {
|
||||
const category = findCategoryByName(selectedValue);
|
||||
if (category) {
|
||||
onSelect(category.id);
|
||||
setInputValue(selectedValue);
|
||||
}
|
||||
};
|
||||
|
||||
const onChangeHandler = (inputValue: string): void => {
|
||||
setInputValue(inputValue);
|
||||
// Clear the yAxisUnit if input is empty or doesn't match any option
|
||||
if (!inputValue) {
|
||||
onSelect('');
|
||||
}
|
||||
};
|
||||
|
||||
const onClearHandler = (): void => {
|
||||
setInputValue('');
|
||||
onSelect('');
|
||||
if (handleClear) {
|
||||
handleClear();
|
||||
}
|
||||
};
|
||||
|
||||
const options = flattenedCategories.map((options) => ({
|
||||
value: options.name,
|
||||
}));
|
||||
|
||||
return (
|
||||
<div className="y-axis-unit-selector">
|
||||
<Typography.Text className="heading">{fieldLabel}</Typography.Text>
|
||||
<AutoComplete
|
||||
style={{ width: '100%' }}
|
||||
rootClassName="y-axis-root-popover"
|
||||
options={options}
|
||||
allowClear
|
||||
value={inputValue}
|
||||
onChange={onChangeHandler}
|
||||
onClear={onClearHandler}
|
||||
onSelect={onSelectHandler}
|
||||
filterOption={(inputValue, option): boolean => {
|
||||
if (option) {
|
||||
return (
|
||||
option.value.toUpperCase().indexOf(inputValue.toUpperCase()) !== -1
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}}
|
||||
>
|
||||
<Input placeholder="Unit" rootClassName="input" />
|
||||
</AutoComplete>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default YAxisUnitSelector;
|
||||
|
||||
YAxisUnitSelector.defaultProps = {
|
||||
handleClear: (): void => {},
|
||||
};
|
||||
@@ -1,240 +0,0 @@
|
||||
/* eslint-disable sonarjs/no-duplicate-string */
|
||||
import { act } from 'react-dom/test-utils';
|
||||
import { render, screen, waitFor } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
|
||||
import YAxisUnitSelector from '../YAxisUnitSelector';
|
||||
|
||||
// Mock the dataFormatCategories to have predictable test data
|
||||
jest.mock('../dataFormatCategories', () => ({
|
||||
flattenedCategories: [
|
||||
{ id: 'seconds', name: 'seconds (s)' },
|
||||
{ id: 'milliseconds', name: 'milliseconds (ms)' },
|
||||
{ id: 'hours', name: 'hours (h)' },
|
||||
{ id: 'minutes', name: 'minutes (m)' },
|
||||
],
|
||||
}));
|
||||
|
||||
const MOCK_SECONDS = 'seconds';
|
||||
const MOCK_MILLISECONDS = 'milliseconds';
|
||||
|
||||
describe('YAxisUnitSelector', () => {
|
||||
const defaultProps = {
|
||||
value: MOCK_SECONDS,
|
||||
onSelect: jest.fn(),
|
||||
fieldLabel: 'Y Axis Unit',
|
||||
handleClear: jest.fn(),
|
||||
};
|
||||
|
||||
let user: ReturnType<typeof userEvent.setup>;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
user = userEvent.setup();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('Rendering (Read) & (write)', () => {
|
||||
it('renders with correct field label', () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
expect(screen.getByText('Y Axis Unit')).toBeInTheDocument();
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
expect(input).toHaveValue('seconds (s)');
|
||||
});
|
||||
|
||||
it('renders with custom field label', () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel="Custom Unit Label"
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
expect(screen.getByText('Custom Unit Label')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('displays empty input when value prop is empty', () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value=""
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
expect(screen.getByDisplayValue('')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows placeholder text', () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
expect(screen.getByPlaceholderText('Unit')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('handles numeric input', async () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
await user.clear(input);
|
||||
await user.type(input, '12345');
|
||||
expect(input).toHaveValue('12345');
|
||||
});
|
||||
|
||||
it('handles mixed content input', async () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
await user.clear(input);
|
||||
await user.type(input, 'Test123!@#');
|
||||
expect(input).toHaveValue('Test123!@#');
|
||||
});
|
||||
});
|
||||
|
||||
describe('State Management', () => {
|
||||
it('syncs input value with value prop changes', async () => {
|
||||
const { rerender } = render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
// Initial value
|
||||
expect(input).toHaveValue('seconds (s)');
|
||||
|
||||
// Change value prop
|
||||
rerender(
|
||||
<YAxisUnitSelector
|
||||
value={MOCK_MILLISECONDS}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(input).toHaveValue('milliseconds (ms)');
|
||||
});
|
||||
});
|
||||
|
||||
it('handles empty value prop correctly', async () => {
|
||||
const { rerender } = render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
// Change to empty value
|
||||
rerender(
|
||||
<YAxisUnitSelector
|
||||
value=""
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(input).toHaveValue('');
|
||||
});
|
||||
});
|
||||
|
||||
it('handles invalid value prop gracefully', async () => {
|
||||
const { rerender } = render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
// Change to invalid value
|
||||
rerender(
|
||||
<YAxisUnitSelector
|
||||
value="invalid_id"
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(input).toHaveValue('');
|
||||
});
|
||||
});
|
||||
|
||||
it('maintains local state during typing', async () => {
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value={defaultProps.value}
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
const input = screen.getByRole('combobox');
|
||||
|
||||
// first clear then type
|
||||
await user.clear(input);
|
||||
await user.type(input, 'test');
|
||||
expect(input).toHaveValue('test');
|
||||
|
||||
// Value prop change should not override local typing
|
||||
await act(async () => {
|
||||
// Simulate prop change
|
||||
render(
|
||||
<YAxisUnitSelector
|
||||
value="bytes"
|
||||
onSelect={defaultProps.onSelect}
|
||||
fieldLabel={defaultProps.fieldLabel}
|
||||
handleClear={defaultProps.handleClear}
|
||||
/>,
|
||||
);
|
||||
});
|
||||
|
||||
// Local typing should be preserved
|
||||
expect(input).toHaveValue('test');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,613 +1,53 @@
|
||||
import { flattenDeep } from 'lodash-es';
|
||||
|
||||
import {
|
||||
AccelerationFormats,
|
||||
AngularFormats,
|
||||
AreaFormats,
|
||||
BooleanFormats,
|
||||
CategoryNames,
|
||||
ConcentrationFormats,
|
||||
CurrencyFormats,
|
||||
DataFormats,
|
||||
DataRateFormats,
|
||||
DataTypeCategories,
|
||||
DatetimeFormats,
|
||||
FlopsFormats,
|
||||
FlowFormats,
|
||||
ForceFormats,
|
||||
HashRateFormats,
|
||||
LengthFormats,
|
||||
MassFormats,
|
||||
MiscellaneousFormats,
|
||||
PowerElectricalFormats,
|
||||
PressureFormats,
|
||||
RadiationFormats,
|
||||
RotationSpeedFormats,
|
||||
TemperatureFormats,
|
||||
ThroughputFormats,
|
||||
TimeFormats,
|
||||
VelocityFormats,
|
||||
VolumeFormats,
|
||||
} from './types';
|
||||
UniversalUnitToGrafanaUnit,
|
||||
YAxisCategoryNames,
|
||||
} from 'components/YAxisUnitSelector/constants';
|
||||
import { YAxisSource } from 'components/YAxisUnitSelector/types';
|
||||
import { getYAxisCategories } from 'components/YAxisUnitSelector/utils';
|
||||
import { convertValue } from 'lib/getConvertedValue';
|
||||
|
||||
export const dataTypeCategories: DataTypeCategories = [
|
||||
{
|
||||
name: CategoryNames.Time,
|
||||
formats: [
|
||||
{ name: 'Hertz (1/s)', id: TimeFormats.Hertz },
|
||||
{ name: 'nanoseconds (ns)', id: TimeFormats.Nanoseconds },
|
||||
{ name: 'microseconds (µs)', id: TimeFormats.Microseconds },
|
||||
{ name: 'milliseconds (ms)', id: TimeFormats.Milliseconds },
|
||||
{ name: 'seconds (s)', id: TimeFormats.Seconds },
|
||||
{ name: 'minutes (m)', id: TimeFormats.Minutes },
|
||||
{ name: 'hours (h)', id: TimeFormats.Hours },
|
||||
{ name: 'days (d)', id: TimeFormats.Days },
|
||||
{ name: 'duration in ms (dtdurationms)', id: TimeFormats.DurationMs },
|
||||
{ name: 'duration in s (dtdurations)', id: TimeFormats.DurationS },
|
||||
{ name: 'duration in h:m:s (dthms)', id: TimeFormats.DurationHms },
|
||||
{ name: 'duration in d:h:m:s (dtdhms)', id: TimeFormats.DurationDhms },
|
||||
{ name: 'timeticks (timeticks)', id: TimeFormats.Timeticks },
|
||||
{ name: 'clock in ms (clockms)', id: TimeFormats.ClockMs },
|
||||
{ name: 'clock in s (clocks)', id: TimeFormats.ClockS },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Throughput,
|
||||
formats: [
|
||||
{ name: 'counts/sec (cps)', id: ThroughputFormats.CountsPerSec },
|
||||
{ name: 'ops/sec (ops)', id: ThroughputFormats.OpsPerSec },
|
||||
{ name: 'requests/sec (reqps)', id: ThroughputFormats.RequestsPerSec },
|
||||
{ name: 'reads/sec (rps)', id: ThroughputFormats.ReadsPerSec },
|
||||
{ name: 'writes/sec (wps)', id: ThroughputFormats.WritesPerSec },
|
||||
{ name: 'I/O operations/sec (iops)', id: ThroughputFormats.IOOpsPerSec },
|
||||
{ name: 'counts/min (cpm)', id: ThroughputFormats.CountsPerMin },
|
||||
{ name: 'ops/min (opm)', id: ThroughputFormats.OpsPerMin },
|
||||
{ name: 'reads/min (rpm)', id: ThroughputFormats.ReadsPerMin },
|
||||
{ name: 'writes/min (wpm)', id: ThroughputFormats.WritesPerMin },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Data,
|
||||
formats: [
|
||||
{ name: 'bytes(IEC)', id: DataFormats.BytesIEC },
|
||||
{ name: 'bytes(SI)', id: DataFormats.BytesSI },
|
||||
{ name: 'bits(IEC)', id: DataFormats.BitsIEC },
|
||||
{ name: 'bits(SI)', id: DataFormats.BitsSI },
|
||||
{ name: 'kibibytes', id: DataFormats.KibiBytes },
|
||||
{ name: 'kilobytes', id: DataFormats.KiloBytes },
|
||||
{ name: 'mebibytes', id: DataFormats.MebiBytes },
|
||||
{ name: 'megabytes', id: DataFormats.MegaBytes },
|
||||
{ name: 'gibibytes', id: DataFormats.GibiBytes },
|
||||
{ name: 'gigabytes', id: DataFormats.GigaBytes },
|
||||
{ name: 'tebibytes', id: DataFormats.TebiBytes },
|
||||
{ name: 'terabytes', id: DataFormats.TeraBytes },
|
||||
{ name: 'pebibytes', id: DataFormats.PebiBytes },
|
||||
{ name: 'petabytes', id: DataFormats.PetaBytes },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.DataRate,
|
||||
formats: [
|
||||
{ name: 'packets/sec', id: DataRateFormats.PacketsPerSec },
|
||||
{ name: 'bytes/sec(IEC)', id: DataRateFormats.BytesPerSecIEC },
|
||||
{ name: 'bytes/sec(SI)', id: DataRateFormats.BytesPerSecSI },
|
||||
{ name: 'bits/sec(IEC)', id: DataRateFormats.BitsPerSecIEC },
|
||||
{ name: 'bits/sec(SI)', id: DataRateFormats.BitsPerSecSI },
|
||||
{ name: 'kibibytes/sec', id: DataRateFormats.KibiBytesPerSec },
|
||||
{ name: 'kibibits/sec', id: DataRateFormats.KibiBitsPerSec },
|
||||
{ name: 'kilobytes/sec', id: DataRateFormats.KiloBytesPerSec },
|
||||
{ name: 'kilobits/sec', id: DataRateFormats.KiloBitsPerSec },
|
||||
{ name: 'mebibytes/sec', id: DataRateFormats.MebiBytesPerSec },
|
||||
{ name: 'mebibits/sec', id: DataRateFormats.MebiBitsPerSec },
|
||||
{ name: 'megabytes/sec', id: DataRateFormats.MegaBytesPerSec },
|
||||
{ name: 'megabits/sec', id: DataRateFormats.MegaBitsPerSec },
|
||||
{ name: 'gibibytes/sec', id: DataRateFormats.GibiBytesPerSec },
|
||||
{ name: 'gibibits/sec', id: DataRateFormats.GibiBitsPerSec },
|
||||
{ name: 'gigabytes/sec', id: DataRateFormats.GigaBytesPerSec },
|
||||
{ name: 'gigabits/sec', id: DataRateFormats.GigaBitsPerSec },
|
||||
{ name: 'tebibytes/sec', id: DataRateFormats.TebiBytesPerSec },
|
||||
{ name: 'tebibits/sec', id: DataRateFormats.TebiBitsPerSec },
|
||||
{ name: 'terabytes/sec', id: DataRateFormats.TeraBytesPerSec },
|
||||
{ name: 'terabits/sec', id: DataRateFormats.TeraBitsPerSec },
|
||||
{ name: 'pebibytes/sec', id: DataRateFormats.PebiBytesPerSec },
|
||||
{ name: 'pebibits/sec', id: DataRateFormats.PebiBitsPerSec },
|
||||
{ name: 'petabytes/sec', id: DataRateFormats.PetaBytesPerSec },
|
||||
{ name: 'petabits/sec', id: DataRateFormats.PetaBitsPerSec },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.HashRate,
|
||||
formats: [
|
||||
{ name: 'hashes/sec', id: HashRateFormats.HashesPerSec },
|
||||
{ name: 'kilohashes/sec', id: HashRateFormats.KiloHashesPerSec },
|
||||
{ name: 'megahashes/sec', id: HashRateFormats.MegaHashesPerSec },
|
||||
{ name: 'gigahashes/sec', id: HashRateFormats.GigaHashesPerSec },
|
||||
{ name: 'terahashes/sec', id: HashRateFormats.TeraHashesPerSec },
|
||||
{ name: 'petahashes/sec', id: HashRateFormats.PetaHashesPerSec },
|
||||
{ name: 'exahashes/sec', id: HashRateFormats.ExaHashesPerSec },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Miscellaneous,
|
||||
formats: [
|
||||
{ name: 'none', id: MiscellaneousFormats.None },
|
||||
{ name: 'String', id: MiscellaneousFormats.String },
|
||||
{ name: 'short', id: MiscellaneousFormats.Short },
|
||||
{ name: 'Percent (0-100)', id: MiscellaneousFormats.Percent },
|
||||
{ name: 'Percent (0.0-1.0)', id: MiscellaneousFormats.PercentUnit },
|
||||
{ name: 'Humidity (%H)', id: MiscellaneousFormats.Humidity },
|
||||
{ name: 'Decibel', id: MiscellaneousFormats.Decibel },
|
||||
{ name: 'Hexadecimal (0x)', id: MiscellaneousFormats.Hexadecimal0x },
|
||||
{ name: 'Hexadecimal', id: MiscellaneousFormats.Hexadecimal },
|
||||
{ name: 'Scientific notation', id: MiscellaneousFormats.ScientificNotation },
|
||||
{ name: 'Locale format', id: MiscellaneousFormats.LocaleFormat },
|
||||
{ name: 'Pixels', id: MiscellaneousFormats.Pixels },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Acceleration,
|
||||
formats: [
|
||||
{ name: 'Meters/sec²', id: AccelerationFormats.MetersPerSecondSquared },
|
||||
{ name: 'Feet/sec²', id: AccelerationFormats.FeetPerSecondSquared },
|
||||
{ name: 'G unit', id: AccelerationFormats.GUnit },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Angle,
|
||||
formats: [
|
||||
{ name: 'Degrees (°)', id: AngularFormats.Degree },
|
||||
{ name: 'Radians', id: AngularFormats.Radian },
|
||||
{ name: 'Gradian', id: AngularFormats.Gradian },
|
||||
{ name: 'Arc Minutes', id: AngularFormats.ArcMinute },
|
||||
{ name: 'Arc Seconds', id: AngularFormats.ArcSecond },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Area,
|
||||
formats: [
|
||||
{ name: 'Square Meters (m²)', id: AreaFormats.SquareMeters },
|
||||
{ name: 'Square Feet (ft²)', id: AreaFormats.SquareFeet },
|
||||
{ name: 'Square Miles (mi²)', id: AreaFormats.SquareMiles },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Computation,
|
||||
formats: [
|
||||
{ name: 'FLOP/s', id: FlopsFormats.FLOPs },
|
||||
{ name: 'MFLOP/s', id: FlopsFormats.MFLOPs },
|
||||
{ name: 'GFLOP/s', id: FlopsFormats.GFLOPs },
|
||||
{ name: 'TFLOP/s', id: FlopsFormats.TFLOPs },
|
||||
{ name: 'PFLOP/s', id: FlopsFormats.PFLOPs },
|
||||
{ name: 'EFLOP/s', id: FlopsFormats.EFLOPs },
|
||||
{ name: 'ZFLOP/s', id: FlopsFormats.ZFLOPs },
|
||||
{ name: 'YFLOP/s', id: FlopsFormats.YFLOPs },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Concentration,
|
||||
formats: [
|
||||
{ name: 'parts-per-million (ppm)', id: ConcentrationFormats.PPM },
|
||||
{ name: 'parts-per-billion (ppb)', id: ConcentrationFormats.PPB },
|
||||
{ name: 'nanogram per cubic meter (ng/m³)', id: ConcentrationFormats.NgM3 },
|
||||
{
|
||||
name: 'nanogram per normal cubic meter (ng/Nm³)',
|
||||
id: ConcentrationFormats.NgNM3,
|
||||
},
|
||||
{ name: 'microgram per cubic meter (μg/m³)', id: ConcentrationFormats.UgM3 },
|
||||
{
|
||||
name: 'microgram per normal cubic meter (μg/Nm³)',
|
||||
id: ConcentrationFormats.UgNM3,
|
||||
},
|
||||
{ name: 'milligram per cubic meter (mg/m³)', id: ConcentrationFormats.MgM3 },
|
||||
{
|
||||
name: 'milligram per normal cubic meter (mg/Nm³)',
|
||||
id: ConcentrationFormats.MgNM3,
|
||||
},
|
||||
{ name: 'gram per cubic meter (g/m³)', id: ConcentrationFormats.GM3 },
|
||||
{
|
||||
name: 'gram per normal cubic meter (g/Nm³)',
|
||||
id: ConcentrationFormats.GNM3,
|
||||
},
|
||||
{ name: 'milligrams per decilitre (mg/dL)', id: ConcentrationFormats.MgDL },
|
||||
{ name: 'millimoles per litre (mmol/L)', id: ConcentrationFormats.MmolL },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Currency,
|
||||
formats: [
|
||||
{ name: 'Dollars ($)', id: CurrencyFormats.USD },
|
||||
{ name: 'Pounds (£)', id: CurrencyFormats.GBP },
|
||||
{ name: 'Euro (€)', id: CurrencyFormats.EUR },
|
||||
{ name: 'Yen (¥)', id: CurrencyFormats.JPY },
|
||||
{ name: 'Rubles (₽)', id: CurrencyFormats.RUB },
|
||||
{ name: 'Hryvnias (₴)', id: CurrencyFormats.UAH },
|
||||
{ name: 'Real (R$)', id: CurrencyFormats.BRL },
|
||||
{ name: 'Danish Krone (kr)', id: CurrencyFormats.DKK },
|
||||
{ name: 'Icelandic Króna (kr)', id: CurrencyFormats.ISK },
|
||||
{ name: 'Norwegian Krone (kr)', id: CurrencyFormats.NOK },
|
||||
{ name: 'Swedish Krona (kr)', id: CurrencyFormats.SEK },
|
||||
{ name: 'Czech koruna (czk)', id: CurrencyFormats.CZK },
|
||||
{ name: 'Swiss franc (CHF)', id: CurrencyFormats.CHF },
|
||||
{ name: 'Polish Złoty (PLN)', id: CurrencyFormats.PLN },
|
||||
{ name: 'Bitcoin (฿)', id: CurrencyFormats.BTC },
|
||||
{ name: 'Milli Bitcoin (฿)', id: CurrencyFormats.MBTC },
|
||||
{ name: 'Micro Bitcoin (฿)', id: CurrencyFormats.UBTC },
|
||||
{ name: 'South African Rand (R)', id: CurrencyFormats.ZAR },
|
||||
{ name: 'Indian Rupee (₹)', id: CurrencyFormats.INR },
|
||||
{ name: 'South Korean Won (₩)', id: CurrencyFormats.KRW },
|
||||
{ name: 'Indonesian Rupiah (Rp)', id: CurrencyFormats.IDR },
|
||||
{ name: 'Philippine Peso (PHP)', id: CurrencyFormats.PHP },
|
||||
{ name: 'Vietnamese Dong (VND)', id: CurrencyFormats.VND },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Datetime,
|
||||
formats: [
|
||||
{ name: 'Datetime ISO', id: DatetimeFormats.ISO },
|
||||
{
|
||||
name: 'Datetime ISO (No date if today)',
|
||||
id: DatetimeFormats.ISONoDateIfToday,
|
||||
},
|
||||
{ name: 'Datetime US', id: DatetimeFormats.US },
|
||||
{
|
||||
name: 'Datetime US (No date if today)',
|
||||
id: DatetimeFormats.USNoDateIfToday,
|
||||
},
|
||||
{ name: 'Datetime local', id: DatetimeFormats.Local },
|
||||
{
|
||||
name: 'Datetime local (No date if today)',
|
||||
id: DatetimeFormats.LocalNoDateIfToday,
|
||||
},
|
||||
{ name: 'Datetime default', id: DatetimeFormats.System },
|
||||
{ name: 'From Now', id: DatetimeFormats.FromNow },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Energy,
|
||||
formats: [
|
||||
{ name: 'Watt (W)', id: PowerElectricalFormats.WATT },
|
||||
{ name: 'Kilowatt (kW)', id: PowerElectricalFormats.KWATT },
|
||||
{ name: 'Megawatt (MW)', id: PowerElectricalFormats.MEGWATT },
|
||||
{ name: 'Gigawatt (GW)', id: PowerElectricalFormats.GWATT },
|
||||
{ name: 'Milliwatt (mW)', id: PowerElectricalFormats.MWATT },
|
||||
{ name: 'Watt per square meter (W/m²)', id: PowerElectricalFormats.WM2 },
|
||||
{ name: 'Volt-Ampere (VA)', id: PowerElectricalFormats.VOLTAMP },
|
||||
{ name: 'Kilovolt-Ampere (kVA)', id: PowerElectricalFormats.KVOLTAMP },
|
||||
{
|
||||
name: 'Volt-Ampere reactive (VAr)',
|
||||
id: PowerElectricalFormats.VOLTAMPREACT,
|
||||
},
|
||||
{
|
||||
name: 'Kilovolt-Ampere reactive (kVAr)',
|
||||
id: PowerElectricalFormats.KVOLTAMPREACT,
|
||||
},
|
||||
{ name: 'Watt-hour (Wh)', id: PowerElectricalFormats.WATTH },
|
||||
{
|
||||
name: 'Watt-hour per Kilogram (Wh/kg)',
|
||||
id: PowerElectricalFormats.WATTHPERKG,
|
||||
},
|
||||
{ name: 'Kilowatt-hour (kWh)', id: PowerElectricalFormats.KWATTH },
|
||||
{ name: 'Kilowatt-min (kWm)', id: PowerElectricalFormats.KWATTM },
|
||||
{ name: 'Ampere-hour (Ah)', id: PowerElectricalFormats.AMPH },
|
||||
{ name: 'Kiloampere-hour (kAh)', id: PowerElectricalFormats.KAMPH },
|
||||
{ name: 'Milliampere-hour (mAh)', id: PowerElectricalFormats.MAMPH },
|
||||
{ name: 'Joule (J)', id: PowerElectricalFormats.JOULE },
|
||||
{ name: 'Electron volt (eV)', id: PowerElectricalFormats.EV },
|
||||
{ name: 'Ampere (A)', id: PowerElectricalFormats.AMP },
|
||||
{ name: 'Kiloampere (kA)', id: PowerElectricalFormats.KAMP },
|
||||
{ name: 'Milliampere (mA)', id: PowerElectricalFormats.MAMP },
|
||||
{ name: 'Volt (V)', id: PowerElectricalFormats.VOLT },
|
||||
{ name: 'Kilovolt (kV)', id: PowerElectricalFormats.KVOLT },
|
||||
{ name: 'Millivolt (mV)', id: PowerElectricalFormats.MVOLT },
|
||||
{ name: 'Decibel-milliwatt (dBm)', id: PowerElectricalFormats.DBM },
|
||||
{ name: 'Ohm (Ω)', id: PowerElectricalFormats.OHM },
|
||||
{ name: 'Kiloohm (kΩ)', id: PowerElectricalFormats.KOHM },
|
||||
{ name: 'Megaohm (MΩ)', id: PowerElectricalFormats.MOHM },
|
||||
{ name: 'Farad (F)', id: PowerElectricalFormats.FARAD },
|
||||
{ name: 'Microfarad (µF)', id: PowerElectricalFormats.µFARAD },
|
||||
{ name: 'Nanofarad (nF)', id: PowerElectricalFormats.NFARAD },
|
||||
{ name: 'Picofarad (pF)', id: PowerElectricalFormats.PFARAD },
|
||||
{ name: 'Femtofarad (fF)', id: PowerElectricalFormats.FFARAD },
|
||||
{ name: 'Henry (H)', id: PowerElectricalFormats.HENRY },
|
||||
{ name: 'Millihenry (mH)', id: PowerElectricalFormats.MHENRY },
|
||||
{ name: 'Microhenry (µH)', id: PowerElectricalFormats.µHENRY },
|
||||
{ name: 'Lumens (Lm)', id: PowerElectricalFormats.LUMENS },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Flow,
|
||||
formats: [
|
||||
{ name: 'Gallons/min (gpm)', id: FlowFormats.FLOWGPM },
|
||||
{ name: 'Cubic meters/sec (cms)', id: FlowFormats.FLOWCMS },
|
||||
{ name: 'Cubic feet/sec (cfs)', id: FlowFormats.FLOWCFS },
|
||||
{ name: 'Cubic feet/min (cfm)', id: FlowFormats.FLOWCFM },
|
||||
{ name: 'Litre/hour', id: FlowFormats.LITREH },
|
||||
{ name: 'Litre/min (L/min)', id: FlowFormats.FLOWLPM },
|
||||
{ name: 'milliLitre/min (mL/min)', id: FlowFormats.FLOWMLPM },
|
||||
{ name: 'Lux (lx)', id: FlowFormats.LUX },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Force,
|
||||
formats: [
|
||||
{ name: 'Newton-meters (Nm)', id: ForceFormats.FORCENM },
|
||||
{ name: 'Kilonewton-meters (kNm)', id: ForceFormats.FORCEKNM },
|
||||
{ name: 'Newtons (N)', id: ForceFormats.FORCEN },
|
||||
{ name: 'Kilonewtons (kN)', id: ForceFormats.FORCEKN },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Mass,
|
||||
formats: [
|
||||
{ name: 'milligram (mg)', id: MassFormats.MASSMG },
|
||||
{ name: 'gram (g)', id: MassFormats.MASSG },
|
||||
{ name: 'pound (lb)', id: MassFormats.MASSLB },
|
||||
{ name: 'kilogram (kg)', id: MassFormats.MASSKG },
|
||||
{ name: 'metric ton (t)', id: MassFormats.MASST },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Length,
|
||||
formats: [
|
||||
{ name: 'millimeter (mm)', id: LengthFormats.LENGTHMM },
|
||||
{ name: 'inch (in)', id: LengthFormats.LENGTHIN },
|
||||
{ name: 'feet (ft)', id: LengthFormats.LENGTHFT },
|
||||
{ name: 'meter (m)', id: LengthFormats.LENGTHM },
|
||||
{ name: 'kilometer (km)', id: LengthFormats.LENGTHKM },
|
||||
{ name: 'mile (mi)', id: LengthFormats.LENGTHMI },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Pressure,
|
||||
formats: [
|
||||
{ name: 'Millibars', id: PressureFormats.PRESSUREMBAR },
|
||||
{ name: 'Bars', id: PressureFormats.PRESSUREBAR },
|
||||
{ name: 'Kilobars', id: PressureFormats.PRESSUREKBAR },
|
||||
{ name: 'Pascals', id: PressureFormats.PRESSUREPA },
|
||||
{ name: 'Hectopascals', id: PressureFormats.PRESSUREHPA },
|
||||
{ name: 'Kilopascals', id: PressureFormats.PRESSUREKPA },
|
||||
{ name: 'Inches of mercury', id: PressureFormats.PRESSUREHG },
|
||||
{ name: 'PSI', id: PressureFormats.PRESSUREPSI },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Radiation,
|
||||
formats: [
|
||||
{ name: 'Becquerel (Bq)', id: RadiationFormats.RADBQ },
|
||||
{ name: 'curie (Ci)', id: RadiationFormats.RADCI },
|
||||
{ name: 'Gray (Gy)', id: RadiationFormats.RADGY },
|
||||
{ name: 'rad', id: RadiationFormats.RADRAD },
|
||||
{ name: 'Sievert (Sv)', id: RadiationFormats.RADSV },
|
||||
{ name: 'milliSievert (mSv)', id: RadiationFormats.RADMSV },
|
||||
{ name: 'microSievert (µSv)', id: RadiationFormats.RADUSV },
|
||||
{ name: 'rem', id: RadiationFormats.RADREM },
|
||||
{ name: 'Exposure (C/kg)', id: RadiationFormats.RADEXPCKG },
|
||||
{ name: 'roentgen (R)', id: RadiationFormats.RADR },
|
||||
{ name: 'Sievert/hour (Sv/h)', id: RadiationFormats.RADSVH },
|
||||
{ name: 'milliSievert/hour (mSv/h)', id: RadiationFormats.RADMSVH },
|
||||
{ name: 'microSievert/hour (µSv/h)', id: RadiationFormats.RADUSVH },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.RotationSpeed,
|
||||
formats: [
|
||||
{ name: 'Revolutions per minute (rpm)', id: RotationSpeedFormats.ROTRPM },
|
||||
{ name: 'Hertz (Hz)', id: RotationSpeedFormats.ROTHZ },
|
||||
{ name: 'Radians per second (rad/s)', id: RotationSpeedFormats.ROTRADS },
|
||||
{ name: 'Degrees per second (°/s)', id: RotationSpeedFormats.ROTDEGS },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Temperature,
|
||||
formats: [
|
||||
{ name: 'Celsius (°C)', id: TemperatureFormats.CELSIUS },
|
||||
{ name: 'Fahrenheit (°F)', id: TemperatureFormats.FAHRENHEIT },
|
||||
{ name: 'Kelvin (K)', id: TemperatureFormats.KELVIN },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Velocity,
|
||||
formats: [
|
||||
{ name: 'meters/second (m/s)', id: VelocityFormats.METERS_PER_SECOND },
|
||||
{ name: 'kilometers/hour (km/h)', id: VelocityFormats.KILOMETERS_PER_HOUR },
|
||||
{ name: 'miles/hour (mph)', id: VelocityFormats.MILES_PER_HOUR },
|
||||
{ name: 'knot (kn)', id: VelocityFormats.KNOT },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Volume,
|
||||
formats: [
|
||||
{ name: 'millilitre (mL)', id: VolumeFormats.MILLILITRE },
|
||||
{ name: 'litre (L)', id: VolumeFormats.LITRE },
|
||||
{ name: 'cubic meter', id: VolumeFormats.CUBIC_METER },
|
||||
{ name: 'Normal cubic meter', id: VolumeFormats.NORMAL_CUBIC_METER },
|
||||
{ name: 'cubic decimeter', id: VolumeFormats.CUBIC_DECIMETER },
|
||||
{ name: 'gallons', id: VolumeFormats.GALLONS },
|
||||
],
|
||||
},
|
||||
{
|
||||
name: CategoryNames.Boolean,
|
||||
formats: [
|
||||
{ name: 'True / False', id: BooleanFormats.TRUE_FALSE },
|
||||
{ name: 'Yes / No', id: BooleanFormats.YES_NO },
|
||||
{ name: 'On / Off', id: BooleanFormats.ON_OFF },
|
||||
],
|
||||
},
|
||||
];
|
||||
// Function to get the category name for a given unit ID (Grafana or universal)
|
||||
export const getCategoryName = (unitId: string): YAxisCategoryNames | null => {
|
||||
const categories = getYAxisCategories(YAxisSource.DASHBOARDS);
|
||||
|
||||
export const flattenedCategories = flattenDeep(
|
||||
dataTypeCategories.map((category) => category.formats),
|
||||
);
|
||||
const foundCategory = categories.find((category) =>
|
||||
category.units.some((unit) => {
|
||||
// Units in Y-axis categories use universal unit IDs.
|
||||
// Thresholds / column units often use Grafana-style IDs.
|
||||
// Treat a unit as matching if either:
|
||||
// - it is already the universal ID, or
|
||||
// - it matches the mapped Grafana ID for that universal unit.
|
||||
if (unit.id === unitId) {
|
||||
return true;
|
||||
}
|
||||
|
||||
type ConversionFactors = {
|
||||
[key: string]: {
|
||||
[key: string]: number | null;
|
||||
};
|
||||
const grafanaId = UniversalUnitToGrafanaUnit[unit.id];
|
||||
return grafanaId === unitId;
|
||||
}),
|
||||
);
|
||||
|
||||
return foundCategory ? foundCategory.name : null;
|
||||
};
|
||||
|
||||
// Object containing conversion factors for various categories and formats
|
||||
const conversionFactors: ConversionFactors = {
|
||||
[CategoryNames.Time]: {
|
||||
[TimeFormats.Hertz]: 1,
|
||||
[TimeFormats.Nanoseconds]: 1e-9,
|
||||
[TimeFormats.Microseconds]: 1e-6,
|
||||
[TimeFormats.Milliseconds]: 1e-3,
|
||||
[TimeFormats.Seconds]: 1,
|
||||
[TimeFormats.Minutes]: 60,
|
||||
[TimeFormats.Hours]: 3600,
|
||||
[TimeFormats.Days]: 86400,
|
||||
[TimeFormats.DurationMs]: 1e-3,
|
||||
[TimeFormats.DurationS]: 1,
|
||||
[TimeFormats.DurationHms]: null, // Requires special handling
|
||||
[TimeFormats.DurationDhms]: null, // Requires special handling
|
||||
[TimeFormats.Timeticks]: null, // Requires special handling
|
||||
[TimeFormats.ClockMs]: 1e-3,
|
||||
[TimeFormats.ClockS]: 1,
|
||||
},
|
||||
[CategoryNames.Throughput]: {
|
||||
[ThroughputFormats.CountsPerSec]: 1,
|
||||
[ThroughputFormats.OpsPerSec]: 1,
|
||||
[ThroughputFormats.RequestsPerSec]: 1,
|
||||
[ThroughputFormats.ReadsPerSec]: 1,
|
||||
[ThroughputFormats.WritesPerSec]: 1,
|
||||
[ThroughputFormats.IOOpsPerSec]: 1,
|
||||
[ThroughputFormats.CountsPerMin]: 1 / 60,
|
||||
[ThroughputFormats.OpsPerMin]: 1 / 60,
|
||||
[ThroughputFormats.ReadsPerMin]: 1 / 60,
|
||||
[ThroughputFormats.WritesPerMin]: 1 / 60,
|
||||
},
|
||||
[CategoryNames.Data]: {
|
||||
[DataFormats.BytesIEC]: 1,
|
||||
[DataFormats.BytesSI]: 1,
|
||||
[DataFormats.BitsIEC]: 0.125,
|
||||
[DataFormats.BitsSI]: 0.125,
|
||||
[DataFormats.KibiBytes]: 1024,
|
||||
[DataFormats.KiloBytes]: 1000,
|
||||
[DataFormats.MebiBytes]: 1048576,
|
||||
[DataFormats.MegaBytes]: 1000000,
|
||||
[DataFormats.GibiBytes]: 1073741824,
|
||||
[DataFormats.GigaBytes]: 1000000000,
|
||||
[DataFormats.TebiBytes]: 1099511627776,
|
||||
[DataFormats.TeraBytes]: 1000000000000,
|
||||
[DataFormats.PebiBytes]: 1125899906842624,
|
||||
[DataFormats.PetaBytes]: 1000000000000000,
|
||||
},
|
||||
[CategoryNames.DataRate]: {
|
||||
[DataRateFormats.PacketsPerSec]: null, // Cannot convert directly to other data rates
|
||||
[DataRateFormats.BytesPerSecIEC]: 1,
|
||||
[DataRateFormats.BytesPerSecSI]: 1,
|
||||
[DataRateFormats.BitsPerSecIEC]: 0.125,
|
||||
[DataRateFormats.BitsPerSecSI]: 0.125,
|
||||
[DataRateFormats.KibiBytesPerSec]: 1024,
|
||||
[DataRateFormats.KibiBitsPerSec]: 128,
|
||||
[DataRateFormats.KiloBytesPerSec]: 1000,
|
||||
[DataRateFormats.KiloBitsPerSec]: 125,
|
||||
[DataRateFormats.MebiBytesPerSec]: 1048576,
|
||||
[DataRateFormats.MebiBitsPerSec]: 131072,
|
||||
[DataRateFormats.MegaBytesPerSec]: 1000000,
|
||||
[DataRateFormats.MegaBitsPerSec]: 125000,
|
||||
[DataRateFormats.GibiBytesPerSec]: 1073741824,
|
||||
[DataRateFormats.GibiBitsPerSec]: 134217728,
|
||||
[DataRateFormats.GigaBytesPerSec]: 1000000000,
|
||||
[DataRateFormats.GigaBitsPerSec]: 125000000,
|
||||
[DataRateFormats.TebiBytesPerSec]: 1099511627776,
|
||||
[DataRateFormats.TebiBitsPerSec]: 137438953472,
|
||||
[DataRateFormats.TeraBytesPerSec]: 1000000000000,
|
||||
[DataRateFormats.TeraBitsPerSec]: 125000000000,
|
||||
[DataRateFormats.PebiBytesPerSec]: 1125899906842624,
|
||||
[DataRateFormats.PebiBitsPerSec]: 140737488355328,
|
||||
[DataRateFormats.PetaBytesPerSec]: 1000000000000000,
|
||||
[DataRateFormats.PetaBitsPerSec]: 125000000000000,
|
||||
},
|
||||
[CategoryNames.Miscellaneous]: {
|
||||
[MiscellaneousFormats.None]: null,
|
||||
[MiscellaneousFormats.String]: null,
|
||||
[MiscellaneousFormats.Short]: null,
|
||||
[MiscellaneousFormats.Percent]: 1,
|
||||
[MiscellaneousFormats.PercentUnit]: 100,
|
||||
[MiscellaneousFormats.Humidity]: 1,
|
||||
[MiscellaneousFormats.Decibel]: null,
|
||||
[MiscellaneousFormats.Hexadecimal0x]: null,
|
||||
[MiscellaneousFormats.Hexadecimal]: null,
|
||||
[MiscellaneousFormats.ScientificNotation]: null,
|
||||
[MiscellaneousFormats.LocaleFormat]: null,
|
||||
[MiscellaneousFormats.Pixels]: null,
|
||||
},
|
||||
[CategoryNames.Boolean]: {
|
||||
[BooleanFormats.TRUE_FALSE]: null, // Not convertible
|
||||
[BooleanFormats.YES_NO]: null, // Not convertible
|
||||
[BooleanFormats.ON_OFF]: null, // Not convertible
|
||||
},
|
||||
};
|
||||
|
||||
// Function to get the conversion factor between two units in a specific category
|
||||
function getConversionFactor(
|
||||
fromUnit: string,
|
||||
toUnit: string,
|
||||
category: CategoryNames,
|
||||
): number | null {
|
||||
// Retrieves the conversion factors for the specified category
|
||||
const categoryFactors = conversionFactors[category];
|
||||
if (!categoryFactors) {
|
||||
return null; // Returns null if the category does not exist
|
||||
}
|
||||
const fromFactor = categoryFactors[fromUnit];
|
||||
const toFactor = categoryFactors[toUnit];
|
||||
if (
|
||||
fromFactor === undefined ||
|
||||
toFactor === undefined ||
|
||||
fromFactor === null ||
|
||||
toFactor === null
|
||||
) {
|
||||
return null; // Returns null if either unit does not exist or is not convertible
|
||||
}
|
||||
return fromFactor / toFactor; // Returns the conversion factor ratio
|
||||
}
|
||||
|
||||
// Function to convert a value from one unit to another
|
||||
export function convertUnit(
|
||||
value: number,
|
||||
fromUnitId?: string,
|
||||
toUnitId?: string,
|
||||
): number | null {
|
||||
let fromUnit: string | undefined;
|
||||
let toUnit: string | undefined;
|
||||
|
||||
// Finds the category that contains the specified units and extracts fromUnit and toUnit using array methods
|
||||
const category = dataTypeCategories.find((category) =>
|
||||
category.formats.some((format) => {
|
||||
if (format.id === fromUnitId) {
|
||||
fromUnit = format.id;
|
||||
}
|
||||
if (format.id === toUnitId) {
|
||||
toUnit = format.id;
|
||||
}
|
||||
return fromUnit && toUnit; // Break out early if both units are found
|
||||
}),
|
||||
);
|
||||
|
||||
if (!category || !fromUnit || !toUnit) {
|
||||
if (!fromUnitId || !toUnitId) {
|
||||
return null;
|
||||
} // Return null if category or units are not found
|
||||
}
|
||||
|
||||
// Gets the conversion factor for the specified units
|
||||
const conversionFactor = getConversionFactor(
|
||||
fromUnit,
|
||||
toUnit,
|
||||
category.name as any,
|
||||
);
|
||||
if (conversionFactor === null) {
|
||||
const fromCategory = getCategoryName(fromUnitId);
|
||||
const toCategory = getCategoryName(toUnitId);
|
||||
|
||||
// If either unit is unknown or the categories don't match, the conversion is invalid
|
||||
if (!fromCategory || !toCategory || fromCategory !== toCategory) {
|
||||
return null;
|
||||
} // Return null if conversion is not possible
|
||||
}
|
||||
|
||||
return value * conversionFactor;
|
||||
// Delegate the actual numeric conversion (or identity) to the shared helper,
|
||||
// which understands both Grafana-style and universal unit IDs.
|
||||
return convertValue(value, fromUnitId, toUnitId);
|
||||
}
|
||||
|
||||
// Function to get the category name for a given unit ID
|
||||
export const getCategoryName = (unitId: string): CategoryNames | null => {
|
||||
// Finds the category that contains the specified unit ID
|
||||
const foundCategory = dataTypeCategories.find((category) =>
|
||||
category.formats.some((format) => format.id === unitId),
|
||||
);
|
||||
return foundCategory ? (foundCategory.name as CategoryNames) : null;
|
||||
};
|
||||
|
||||
@@ -2,6 +2,9 @@ import { Layout } from 'react-grid-layout';
|
||||
import { DefaultOptionType } from 'antd/es/select';
|
||||
import { omitIdFromQuery } from 'components/ExplorerCard/utils';
|
||||
import { PrecisionOptionsEnum } from 'components/Graph/types';
|
||||
import { YAxisCategoryNames } from 'components/YAxisUnitSelector/constants';
|
||||
import { YAxisSource } from 'components/YAxisUnitSelector/types';
|
||||
import { getYAxisCategories } from 'components/YAxisUnitSelector/utils';
|
||||
import {
|
||||
initialQueryBuilderFormValuesMap,
|
||||
PANEL_TYPES,
|
||||
@@ -21,11 +24,7 @@ import { IBuilderQuery, Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { EQueryType } from 'types/common/dashboard';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import {
|
||||
dataTypeCategories,
|
||||
getCategoryName,
|
||||
} from './RightContainer/dataFormatCategories';
|
||||
import { CategoryNames } from './RightContainer/types';
|
||||
import { getCategoryName } from './RightContainer/dataFormatCategories';
|
||||
|
||||
export const getIsQueryModified = (
|
||||
currentQuery: Query,
|
||||
@@ -606,14 +605,21 @@ export const PANEL_TYPE_TO_QUERY_TYPES: Record<PANEL_TYPES, EQueryType[]> = {
|
||||
* the label and value for each format.
|
||||
*/
|
||||
export const getCategorySelectOptionByName = (
|
||||
name?: CategoryNames | string,
|
||||
): DefaultOptionType[] =>
|
||||
dataTypeCategories
|
||||
.find((category) => category.name === name)
|
||||
?.formats.map((format) => ({
|
||||
label: format.name,
|
||||
value: format.id,
|
||||
})) || [];
|
||||
name?: YAxisCategoryNames,
|
||||
): DefaultOptionType[] => {
|
||||
const categories = getYAxisCategories(YAxisSource.DASHBOARDS);
|
||||
if (!categories.length) {
|
||||
return [];
|
||||
}
|
||||
return (
|
||||
categories
|
||||
.find((category) => category.name === name)
|
||||
?.units.map((unit) => ({
|
||||
label: unit.name,
|
||||
value: unit.id,
|
||||
})) || []
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Generates unit options based on the provided column unit.
|
||||
|
||||
@@ -116,7 +116,7 @@ describe('SSOEnforcementToggle', () => {
|
||||
render(
|
||||
<SSOEnforcementToggle
|
||||
isDefaultChecked={true}
|
||||
record={{ ...mockGoogleAuthDomain, id: undefined }}
|
||||
record={{ ...mockGoogleAuthDomain, id: '' }}
|
||||
/>,
|
||||
);
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import { CategoryNames } from 'container/NewWidget/RightContainer/types';
|
||||
import { YAxisCategoryNames } from 'components/YAxisUnitSelector/constants';
|
||||
|
||||
export const categoryToSupport = [
|
||||
CategoryNames.Data,
|
||||
CategoryNames.DataRate,
|
||||
CategoryNames.Time,
|
||||
CategoryNames.Throughput,
|
||||
CategoryNames.Miscellaneous,
|
||||
CategoryNames.Boolean,
|
||||
export const categoryToSupport: YAxisCategoryNames[] = [
|
||||
YAxisCategoryNames.None,
|
||||
YAxisCategoryNames.Data,
|
||||
YAxisCategoryNames.DataRate,
|
||||
YAxisCategoryNames.Time,
|
||||
YAxisCategoryNames.Count,
|
||||
YAxisCategoryNames.Operations,
|
||||
YAxisCategoryNames.Percentage,
|
||||
YAxisCategoryNames.Miscellaneous,
|
||||
YAxisCategoryNames.Boolean,
|
||||
];
|
||||
|
||||
@@ -26,5 +26,22 @@ func (provider *provider) addAuthzRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v1/authz/resources", handler.New(provider.authZ.OpenAccess(provider.authzHandler.GetResources), handler.OpenAPIDef{
|
||||
ID: "AuthzResources",
|
||||
Tags: []string{"authz"},
|
||||
Summary: "Get resources",
|
||||
Description: "Gets all the available resources",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: new(authtypes.GettableResources),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: nil,
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func (provider *provider) addGatewayRoutes(router *mux.Router) error {
|
||||
RequestContentType: "application/json",
|
||||
Response: new(gatewaytypes.GettableCreatedIngestionKey),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
SuccessStatusCode: http.StatusCreated,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
|
||||
|
||||
@@ -81,7 +81,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
|
||||
Response: new(metricsexplorertypes.MetricAttributesResponse),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
@@ -138,7 +138,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
|
||||
Response: new(metricsexplorertypes.MetricHighlightsResponse),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
@@ -157,7 +157,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
|
||||
Response: new(metricsexplorertypes.MetricAlertsResponse),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
@@ -176,7 +176,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
|
||||
Response: new(metricsexplorertypes.MetricDashboardsResponse),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
|
||||
@@ -45,23 +45,6 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v1/roles/resources", handler.New(provider.authZ.AdminAccess(provider.authzHandler.GetResources), handler.OpenAPIDef{
|
||||
ID: "GetResources",
|
||||
Tags: []string{"role"},
|
||||
Summary: "Get resources",
|
||||
Description: "Gets all the available resources for role assignment",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: new(roletypes.GettableResources),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v1/roles/{id}", handler.New(provider.authZ.AdminAccess(provider.authzHandler.Get), handler.OpenAPIDef{
|
||||
ID: "GetRole",
|
||||
Tags: []string{"role"},
|
||||
@@ -86,7 +69,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
Description: "Gets all objects connected to the specified role via a given relation type",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: make([]*authtypes.Object, 0),
|
||||
Response: make([]*authtypes.GettableObjects, 0),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusNotFound, http.StatusNotImplemented, http.StatusUnavailableForLegalReasons},
|
||||
@@ -118,7 +101,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
Tags: []string{"role"},
|
||||
Summary: "Patch objects for a role by relation",
|
||||
Description: "Patches the objects connected to the specified role via a given relation type",
|
||||
Request: new(roletypes.PatchableObjects),
|
||||
Request: new(authtypes.PatchableObjects),
|
||||
RequestContentType: "",
|
||||
Response: nil,
|
||||
ResponseContentType: "application/json",
|
||||
|
||||
@@ -190,7 +190,7 @@ func (provider *provider) GetOrCreate(_ context.Context, _ valuer.UUID, _ *rolet
|
||||
}
|
||||
|
||||
func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource {
|
||||
return nil
|
||||
return []*authtypes.Resource{}
|
||||
}
|
||||
|
||||
func (provider *provider) GetObjects(ctx context.Context, orgID valuer.UUID, id valuer.UUID, relation authtypes.Relation) ([]*authtypes.Object, error) {
|
||||
|
||||
@@ -110,13 +110,13 @@ func (handler *handler) GetObjects(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, objects)
|
||||
render.Success(rw, http.StatusOK, authtypes.NewGettableObjects(objects))
|
||||
}
|
||||
|
||||
func (handler *handler) GetResources(rw http.ResponseWriter, r *http.Request) {
|
||||
resources := handler.authz.GetResources(r.Context())
|
||||
|
||||
render.Success(rw, http.StatusOK, roletypes.NewGettableResources(resources))
|
||||
render.Success(rw, http.StatusOK, authtypes.NewGettableResources(resources))
|
||||
}
|
||||
|
||||
func (handler *handler) List(rw http.ResponseWriter, r *http.Request) {
|
||||
@@ -197,25 +197,30 @@ func (handler *handler) PatchObjects(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
req := new(roletypes.PatchableObjects)
|
||||
if err := binding.JSON.BindBody(r.Body, req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
role, err := handler.authz.Get(ctx, valuer.MustNewUUID(claims.OrgID), id)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
patchableObjects, err := role.NewPatchableObjects(req.Additions, req.Deletions, relation)
|
||||
if err := role.ErrIfManaged(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := new(authtypes.PatchableObjects)
|
||||
if err := binding.JSON.BindBody(r.Body, req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
additions, deletions, err := authtypes.NewPatchableObjects(req.Additions, req.Deletions, relation)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = handler.authz.PatchObjects(ctx, valuer.MustNewUUID(claims.OrgID), role.Name, relation, patchableObjects.Additions, patchableObjects.Deletions)
|
||||
err = handler.authz.PatchObjects(ctx, valuer.MustNewUUID(claims.OrgID), role.Name, relation, additions, deletions)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
|
||||
@@ -3,6 +3,7 @@ package configflagger
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
@@ -32,6 +33,10 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
|
||||
for name, value := range c.Config.Boolean {
|
||||
feature, _, err := registry.GetByString(name)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "boolean")
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -46,6 +51,10 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
|
||||
for name, value := range c.Config.String {
|
||||
feature, _, err := registry.GetByString(name)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "string")
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -60,6 +69,10 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
|
||||
for name, value := range c.Config.Float {
|
||||
feature, _, err := registry.GetByString(name)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "float")
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -74,6 +87,10 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
|
||||
for name, value := range c.Config.Integer {
|
||||
feature, _, err := registry.GetByString(name)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "integer")
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -88,6 +105,10 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
|
||||
for name, value := range c.Config.Object {
|
||||
feature, _, err := registry.GetByString(name)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "object")
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
var (
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
FeatureHideRootUser = featuretypes.MustNewName("hide_root_user")
|
||||
)
|
||||
|
||||
func MustNewRegistry() featuretypes.Registry {
|
||||
@@ -25,6 +26,14 @@ func MustNewRegistry() featuretypes.Registry {
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureHideRootUser,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Stage: featuretypes.StageStable,
|
||||
Description: "Controls whether root admin user is hidden or not",
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -122,7 +122,7 @@ func (handler *handler) CreateIngestionKey(rw http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, response)
|
||||
render.Success(rw, http.StatusCreated, response)
|
||||
}
|
||||
|
||||
func (handler *handler) UpdateIngestionKey(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
)
|
||||
|
||||
// Recovery is a middleware that recovers from panics, logs the panic,
|
||||
// and returns a 500 Internal Server Error.
|
||||
type Recovery struct {
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewRecovery creates a new Recovery middleware.
|
||||
func NewRecovery(logger *slog.Logger) Wrapper {
|
||||
return &Recovery{
|
||||
logger: logger.With("pkg", "http-middleware-recovery"),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap is the middleware handler.
|
||||
func (m *Recovery) Wrap(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
m.logger.ErrorContext(
|
||||
r.Context(),
|
||||
"panic recovered",
|
||||
"err", err, "stack", string(debug.Stack()),
|
||||
)
|
||||
|
||||
render.Error(w, errors.NewInternalf(
|
||||
errors.CodeInternal, "internal server error",
|
||||
))
|
||||
}
|
||||
}()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package implmetricsexplorer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -187,6 +188,12 @@ func (h *handler) GetMetricAlerts(rw http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
out, err := h.module.GetMetricAlerts(req.Context(), orgID, metricName)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
@@ -209,6 +216,12 @@ func (h *handler) GetMetricDashboards(rw http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
out, err := h.module.GetMetricDashboards(req.Context(), orgID, metricName)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
@@ -231,6 +244,12 @@ func (h *handler) GetMetricHighlights(rw http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
highlights, err := h.module.GetMetricHighlights(req.Context(), orgID, metricName)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
@@ -266,6 +285,12 @@ func (h *handler) GetMetricAttributes(rw http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
out, err := h.module.GetMetricAttributes(req.Context(), orgID, &in)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
@@ -274,3 +299,14 @@ func (h *handler) GetMetricAttributes(rw http.ResponseWriter, req *http.Request)
|
||||
|
||||
render.Success(rw, http.StatusOK, out)
|
||||
}
|
||||
|
||||
func (h *handler) checkMetricExists(ctx context.Context, orgID valuer.UUID, metricName string) error {
|
||||
exists, err := h.module.CheckMetricExists(ctx, orgID, metricName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return errors.NewNotFoundf(errors.CodeNotFound, "metric not found: %q", metricName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -404,6 +404,26 @@ func (m *module) GetMetricAttributes(ctx context.Context, orgID valuer.UUID, req
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *module) CheckMetricExists(ctx context.Context, orgID valuer.UUID, metricName string) (bool, error) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("count(*) > 0 as metricExists")
|
||||
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
|
||||
sb.Where(sb.E("metric_name", metricName))
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
db := m.telemetryStore.ClickhouseDB()
|
||||
var exists bool
|
||||
valueCtx := ctxtypes.SetClickhouseMaxThreads(ctx, m.config.TelemetryStore.Threads)
|
||||
|
||||
err := db.QueryRow(valueCtx, query, args...).Scan(&exists)
|
||||
if err != nil {
|
||||
return false, errors.WrapInternalf(err, errors.CodeInternal, "failed to check if metric exists")
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
func (m *module) fetchMetadataFromCache(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]*metricsexplorertypes.MetricMetadata, []string) {
|
||||
hits := make(map[string]*metricsexplorertypes.MetricMetadata)
|
||||
misses := make([]string, 0)
|
||||
|
||||
@@ -23,6 +23,7 @@ type Handler interface {
|
||||
|
||||
// Module represents the metrics module interface.
|
||||
type Module interface {
|
||||
CheckMetricExists(ctx context.Context, orgID valuer.UUID, metricName string) (bool, error)
|
||||
ListMetrics(ctx context.Context, orgID valuer.UUID, params *metricsexplorertypes.ListMetricsParams) (*metricsexplorertypes.ListMetricsResponse, error)
|
||||
GetStats(ctx context.Context, orgID valuer.UUID, req *metricsexplorertypes.StatsRequest) (*metricsexplorertypes.StatsResponse, error)
|
||||
GetTreemap(ctx context.Context, orgID valuer.UUID, req *metricsexplorertypes.TreemapRequest) (*metricsexplorertypes.TreemapResponse, error)
|
||||
|
||||
@@ -120,6 +120,8 @@ func FilterResponse(results []*qbtypes.QueryRangeResponse) []*qbtypes.QueryRange
|
||||
}
|
||||
}
|
||||
resultData.Rows = filteredRows
|
||||
case *qbtypes.ScalarData:
|
||||
resultData.Data = filterScalarDataIPs(resultData.Columns, resultData.Data)
|
||||
}
|
||||
|
||||
filteredData = append(filteredData, result)
|
||||
@@ -145,6 +147,39 @@ func shouldIncludeSeries(series *qbtypes.TimeSeries) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func filterScalarDataIPs(columns []*qbtypes.ColumnDescriptor, data [][]any) [][]any {
|
||||
// Find column indices for server address fields
|
||||
serverColIndices := make([]int, 0)
|
||||
for i, col := range columns {
|
||||
if col.Name == derivedKeyHTTPHost {
|
||||
serverColIndices = append(serverColIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(serverColIndices) == 0 {
|
||||
return data
|
||||
}
|
||||
|
||||
filtered := make([][]any, 0, len(data))
|
||||
for _, row := range data {
|
||||
includeRow := true
|
||||
for _, colIdx := range serverColIndices {
|
||||
if colIdx < len(row) {
|
||||
if strVal, ok := row[colIdx].(string); ok {
|
||||
if net.ParseIP(strVal) != nil {
|
||||
includeRow = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if includeRow {
|
||||
filtered = append(filtered, row)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func shouldIncludeRow(row *qbtypes.RawRow) bool {
|
||||
if row.Data != nil {
|
||||
if domainVal, ok := row.Data[derivedKeyHTTPHost]; ok {
|
||||
|
||||
@@ -117,6 +117,59 @@ func TestFilterResponse(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should filter out IP addresses from scalar data",
|
||||
input: []*qbtypes.QueryRangeResponse{
|
||||
{
|
||||
Data: qbtypes.QueryData{
|
||||
Results: []any{
|
||||
&qbtypes.ScalarData{
|
||||
QueryName: "endpoints",
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"192.168.1.1", 10},
|
||||
{"example.com", 20},
|
||||
{"10.0.0.1", 5},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*qbtypes.QueryRangeResponse{
|
||||
{
|
||||
Data: qbtypes.QueryData{
|
||||
Results: []any{
|
||||
&qbtypes.ScalarData{
|
||||
QueryName: "endpoints",
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"example.com", 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -2,18 +2,22 @@ package impluser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type getter struct {
|
||||
store types.UserStore
|
||||
store types.UserStore
|
||||
flagger flagger.Flagger
|
||||
}
|
||||
|
||||
func NewGetter(store types.UserStore) user.Getter {
|
||||
return &getter{store: store}
|
||||
func NewGetter(store types.UserStore, flagger flagger.Flagger) user.Getter {
|
||||
return &getter{store: store, flagger: flagger}
|
||||
}
|
||||
|
||||
func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
|
||||
@@ -26,6 +30,14 @@ func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*ty
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// filter root users if feature flag `hide_root_users` is true
|
||||
evalCtx := featuretypes.NewFlaggerEvaluationContext(orgID)
|
||||
hideRootUsers := module.flagger.BooleanOrEmpty(ctx, flagger.FeatureHideRootUser, evalCtx)
|
||||
|
||||
if hideRootUsers {
|
||||
users = slices.DeleteFunc(users, func(user *types.User) bool { return user.IsRoot })
|
||||
}
|
||||
|
||||
return users, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
root "github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -463,7 +462,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(integrationtypes.IntegrationUserEmails, createdByUser.Email) {
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
||||
return
|
||||
}
|
||||
@@ -508,7 +507,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(integrationtypes.IntegrationUserEmails, createdByUser.Email) {
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/emailtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/dustin/go-humanize"
|
||||
@@ -174,7 +173,7 @@ func (m *Module) DeleteInvite(ctx context.Context, orgID string, id valuer.UUID)
|
||||
func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
|
||||
createUserOpts := root.NewCreateUserOptions(opts...)
|
||||
|
||||
// since assign is idempotent multiple calls to assign won't cause issues in case of retries.
|
||||
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
|
||||
err := module.authz.Grant(ctx, input.OrgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role), authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -280,7 +279,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
|
||||
return errors.WithAdditionalf(err, "cannot delete root user")
|
||||
}
|
||||
|
||||
if slices.Contains(integrationtypes.IntegrationUserEmails, user.Email) {
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
|
||||
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
|
||||
}
|
||||
|
||||
@@ -294,7 +293,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
|
||||
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot delete the last admin")
|
||||
}
|
||||
|
||||
// since revoke is idempotent multiple calls to revoke won't cause issues in case of retries
|
||||
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
|
||||
err = module.authz.Revoke(ctx, orgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role), authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,57 +1,55 @@
|
||||
package store
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
var (
|
||||
CodeCloudIntegrationAccountNotFound errors.Code = errors.MustNewCode("cloud_integration_account_not_found")
|
||||
)
|
||||
type cloudProviderAccountsRepository interface {
|
||||
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
|
||||
|
||||
type CloudProviderAccountsRepository interface {
|
||||
ListConnected(ctx context.Context, orgId string, provider string) ([]integrationtypes.CloudIntegration, error)
|
||||
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
Get(ctx context.Context, orgId string, provider string, id string) (*integrationtypes.CloudIntegration, error)
|
||||
|
||||
GetConnectedCloudAccount(ctx context.Context, orgId, provider string, accountID string) (*integrationtypes.CloudIntegration, error)
|
||||
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
// Insert an account or update it by (cloudProvider, id)
|
||||
// for specified non-empty fields
|
||||
Upsert(
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config []byte,
|
||||
config *types.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *integrationtypes.AgentReport,
|
||||
agentReport *types.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*integrationtypes.CloudIntegration, error)
|
||||
) (*types.CloudIntegration, *model.ApiError)
|
||||
}
|
||||
|
||||
func NewCloudProviderAccountsRepository(store sqlstore.SQLStore) CloudProviderAccountsRepository {
|
||||
return &cloudProviderAccountsSQLRepository{store: store}
|
||||
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
|
||||
*cloudProviderAccountsSQLRepository, error,
|
||||
) {
|
||||
return &cloudProviderAccountsSQLRepository{
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type cloudProviderAccountsSQLRepository struct {
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) ListConnected(
|
||||
func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) ([]integrationtypes.CloudIntegration, error) {
|
||||
accounts := []integrationtypes.CloudIntegration{}
|
||||
) ([]types.CloudIntegration, *model.ApiError) {
|
||||
accounts := []types.CloudIntegration{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&accounts).
|
||||
@@ -64,17 +62,18 @@ func (r *cloudProviderAccountsSQLRepository) ListConnected(
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
slog.ErrorContext(ctx, "error querying connected cloud accounts", "error", err)
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not query connected cloud accounts")
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query connected cloud accounts: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) Get(
|
||||
func (r *cloudProviderAccountsSQLRepository) get(
|
||||
ctx context.Context, orgId string, provider string, id string,
|
||||
) (*integrationtypes.CloudIntegration, error) {
|
||||
var result integrationtypes.CloudIntegration
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
@@ -83,25 +82,23 @@ func (r *cloudProviderAccountsSQLRepository) Get(
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, errors.WrapNotFoundf(
|
||||
err,
|
||||
CodeCloudIntegrationAccountNotFound,
|
||||
"couldn't find account with Id %s", id,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find account with Id %s", id,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud provider accounts: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) GetConnectedCloudAccount(
|
||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
ctx context.Context, orgId string, provider string, accountId string,
|
||||
) (*integrationtypes.CloudIntegration, error) {
|
||||
var result integrationtypes.CloudIntegration
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
@@ -112,25 +109,29 @@ func (r *cloudProviderAccountsSQLRepository) GetConnectedCloudAccount(
|
||||
Where("removed_at is NULL").
|
||||
Scan(ctx)
|
||||
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, errors.WrapNotFoundf(err, CodeCloudIntegrationAccountNotFound, "couldn't find connected cloud account %s", accountId)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find connected cloud account %s", accountId,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud provider accounts: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) Upsert(
|
||||
func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config []byte,
|
||||
config *types.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *integrationtypes.AgentReport,
|
||||
agentReport *types.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*integrationtypes.CloudIntegration, error) {
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
// Insert
|
||||
if id == nil {
|
||||
temp := valuer.GenerateUUID().StringValue()
|
||||
@@ -180,7 +181,7 @@ func (r *cloudProviderAccountsSQLRepository) Upsert(
|
||||
)
|
||||
}
|
||||
|
||||
integration := integrationtypes.CloudIntegration{
|
||||
integration := types.CloudIntegration{
|
||||
OrgID: orgId,
|
||||
Provider: provider,
|
||||
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
||||
@@ -188,25 +189,28 @@ func (r *cloudProviderAccountsSQLRepository) Upsert(
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Config: string(config),
|
||||
Config: config,
|
||||
AccountID: accountId,
|
||||
LastAgentReport: agentReport,
|
||||
RemovedAt: removedAt,
|
||||
}
|
||||
|
||||
_, err := r.store.BunDB().NewInsert().
|
||||
_, dbErr := r.store.BunDB().NewInsert().
|
||||
Model(&integration).
|
||||
On(onConflictClause).
|
||||
Exec(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud integration account")
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud account record: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedAccount, err := r.Get(ctx, orgId, provider, *id)
|
||||
if err != nil {
|
||||
slog.ErrorContext(ctx, "error upserting cloud integration account", "error", err)
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't get upserted cloud integration account")
|
||||
upsertedAccount, apiErr := r.get(ctx, orgId, provider, *id)
|
||||
if apiErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
|
||||
))
|
||||
}
|
||||
|
||||
return upsertedAccount, nil
|
||||
@@ -1,4 +1,4 @@
|
||||
package integrationtypes
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
624
pkg/query-service/app/cloudintegrations/controller.go
Normal file
624
pkg/query-service/app/cloudintegrations/controller.go
Normal file
@@ -0,0 +1,624 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var SupportedCloudProviders = []string{
|
||||
"aws",
|
||||
}
|
||||
|
||||
func validateCloudProviderName(name string) *model.ApiError {
|
||||
if !slices.Contains(SupportedCloudProviders, name) {
|
||||
return model.BadRequest(fmt.Errorf("invalid cloud provider: %s", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Controller struct {
|
||||
accountsRepo cloudProviderAccountsRepository
|
||||
serviceConfigRepo ServiceConfigDatabase
|
||||
}
|
||||
|
||||
func NewController(sqlStore sqlstore.SQLStore) (*Controller, error) {
|
||||
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
|
||||
}
|
||||
|
||||
serviceConfigRepo, err := newServiceConfigRepository(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
|
||||
}
|
||||
|
||||
return &Controller{
|
||||
accountsRepo: accountsRepo,
|
||||
serviceConfigRepo: serviceConfigRepo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ConnectedAccountsListResponse struct {
|
||||
Accounts []types.Account `json:"accounts"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListConnectedAccounts(ctx context.Context, orgId string, cloudProvider string) (
|
||||
*ConnectedAccountsListResponse, *model.ApiError,
|
||||
) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
||||
}
|
||||
|
||||
connectedAccounts := []types.Account{}
|
||||
for _, a := range accountRecords {
|
||||
connectedAccounts = append(connectedAccounts, a.Account())
|
||||
}
|
||||
|
||||
return &ConnectedAccountsListResponse{
|
||||
Accounts: connectedAccounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type GenerateConnectionUrlRequest struct {
|
||||
// Optional. To be specified for updates.
|
||||
AccountId *string `json:"account_id,omitempty"`
|
||||
|
||||
AccountConfig types.AccountConfig `json:"account_config"`
|
||||
|
||||
AgentConfig SigNozAgentConfig `json:"agent_config"`
|
||||
}
|
||||
|
||||
type SigNozAgentConfig struct {
|
||||
// The region in which SigNoz agent should be installed.
|
||||
Region string `json:"region"`
|
||||
|
||||
IngestionUrl string `json:"ingestion_url"`
|
||||
IngestionKey string `json:"ingestion_key"`
|
||||
SigNozAPIUrl string `json:"signoz_api_url"`
|
||||
SigNozAPIKey string `json:"signoz_api_key"`
|
||||
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type GenerateConnectionUrlResponse struct {
|
||||
AccountId string `json:"account_id"`
|
||||
ConnectionUrl string `json:"connection_url"`
|
||||
}
|
||||
|
||||
func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cloudProvider string, req GenerateConnectionUrlRequest) (*GenerateConnectionUrlResponse, *model.ApiError) {
|
||||
// Account connection with a simple connection URL may not be available for all providers.
|
||||
if cloudProvider != "aws" {
|
||||
return nil, model.BadRequest(fmt.Errorf("unsupported cloud provider: %s", cloudProvider))
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
ctx, orgId, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
agentVersion := "v0.0.8"
|
||||
if req.AgentConfig.Version != "" {
|
||||
agentVersion = req.AgentConfig.Version
|
||||
}
|
||||
|
||||
connectionUrl := fmt.Sprintf(
|
||||
"https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?",
|
||||
req.AgentConfig.Region, req.AgentConfig.Region,
|
||||
)
|
||||
|
||||
for qp, value := range map[string]string{
|
||||
"param_SigNozIntegrationAgentVersion": agentVersion,
|
||||
"param_SigNozApiUrl": req.AgentConfig.SigNozAPIUrl,
|
||||
"param_SigNozApiKey": req.AgentConfig.SigNozAPIKey,
|
||||
"param_SigNozAccountId": account.ID.StringValue(),
|
||||
"param_IngestionUrl": req.AgentConfig.IngestionUrl,
|
||||
"param_IngestionKey": req.AgentConfig.IngestionKey,
|
||||
"stackName": "signoz-integration",
|
||||
"templateURL": fmt.Sprintf(
|
||||
"https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json",
|
||||
agentVersion,
|
||||
),
|
||||
} {
|
||||
connectionUrl += fmt.Sprintf("&%s=%s", qp, url.QueryEscape(value))
|
||||
}
|
||||
|
||||
return &GenerateConnectionUrlResponse{
|
||||
AccountId: account.ID.StringValue(),
|
||||
ConnectionUrl: connectionUrl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type AccountStatusResponse struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status types.AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Controller) GetAccountStatus(ctx context.Context, orgId string, cloudProvider string, accountId string) (
|
||||
*AccountStatusResponse, *model.ApiError,
|
||||
) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
resp := AccountStatusResponse{
|
||||
Id: account.ID.StringValue(),
|
||||
CloudAccountId: account.AccountID,
|
||||
Status: account.Status(),
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
type AgentCheckInRequest struct {
|
||||
ID string `json:"account_id"`
|
||||
AccountID string `json:"cloud_account_id"`
|
||||
// Arbitrary cloud specific Agent data
|
||||
Data map[string]any `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type AgentCheckInResponse struct {
|
||||
AccountId string `json:"account_id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
RemovedAt *time.Time `json:"removed_at"`
|
||||
|
||||
IntegrationConfig IntegrationConfigForAgent `json:"integration_config"`
|
||||
}
|
||||
|
||||
type IntegrationConfigForAgent struct {
|
||||
EnabledRegions []string `json:"enabled_regions"`
|
||||
|
||||
TelemetryCollectionStrategy *CompiledCollectionStrategy `json:"telemetry,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Controller) CheckInAsAgent(ctx context.Context, orgId string, cloudProvider string, req AgentCheckInRequest) (*AgentCheckInResponse, error) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
|
||||
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||
cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
|
||||
))
|
||||
}
|
||||
|
||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
|
||||
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||
cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
|
||||
))
|
||||
}
|
||||
|
||||
agentReport := types.AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: req.Data,
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
// prepare and return integration config to be consumed by agent
|
||||
compiledStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't init telemetry collection strategy: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
agentConfig := IntegrationConfigForAgent{
|
||||
EnabledRegions: []string{},
|
||||
TelemetryCollectionStrategy: compiledStrategy,
|
||||
}
|
||||
|
||||
if account.Config != nil && account.Config.EnabledRegions != nil {
|
||||
agentConfig.EnabledRegions = account.Config.EnabledRegions
|
||||
}
|
||||
|
||||
services, err := services.Map(cloudProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, orgId, account.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "couldn't get service configs for cloud account",
|
||||
)
|
||||
}
|
||||
|
||||
// accumulate config in a fixed order to ensure same config generated across runs
|
||||
configuredServices := maps.Keys(svcConfigs)
|
||||
slices.Sort(configuredServices)
|
||||
|
||||
for _, svcType := range configuredServices {
|
||||
definition, ok := services[svcType]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
config := svcConfigs[svcType]
|
||||
|
||||
err := AddServiceStrategy(svcType, compiledStrategy, definition.Strategy, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &AgentCheckInResponse{
|
||||
AccountId: account.ID.StringValue(),
|
||||
CloudAccountId: *account.AccountID,
|
||||
RemovedAt: account.RemovedAt,
|
||||
IntegrationConfig: agentConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type UpdateAccountConfigRequest struct {
|
||||
Config types.AccountConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, cloudProvider string, accountId string, req UpdateAccountConfigRequest) (*types.Account, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecord, apiErr := c.accountsRepo.upsert(
|
||||
ctx, orgId, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
account := accountRecord.Account()
|
||||
|
||||
return &account, nil
|
||||
}
|
||||
|
||||
func (c *Controller) DisconnectAccount(ctx context.Context, orgId string, cloudProvider string, accountId string) (*types.CloudIntegration, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
tsNow := time.Now()
|
||||
account, apiErr = c.accountsRepo.upsert(
|
||||
ctx, orgId, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
type ListServicesResponse struct {
|
||||
Services []ServiceSummary `json:"services"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListServices(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId *string,
|
||||
) (*ListServicesResponse, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
definitions, apiErr := services.List(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*types.CloudServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
||||
}
|
||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, orgID, activeAccount.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "couldn't get service configs for cloud account",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
summaries := []ServiceSummary{}
|
||||
for _, def := range definitions {
|
||||
summary := ServiceSummary{
|
||||
Metadata: def.Metadata,
|
||||
}
|
||||
summary.Config = svcConfigs[summary.Id]
|
||||
|
||||
summaries = append(summaries, summary)
|
||||
}
|
||||
|
||||
return &ListServicesResponse{
|
||||
Services: summaries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Controller) GetServiceDetails(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
cloudAccountId *string,
|
||||
) (*ServiceDetails, error) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
definition, err := services.GetServiceDefinition(cloudProvider, serviceId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
details := ServiceDetails{
|
||||
Definition: *definition,
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
||||
}
|
||||
|
||||
config, apiErr := c.serviceConfigRepo.get(
|
||||
ctx, orgID, activeAccount.ID.StringValue(), serviceId,
|
||||
)
|
||||
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
|
||||
}
|
||||
|
||||
if config != nil {
|
||||
details.Config = config
|
||||
|
||||
enabled := false
|
||||
if config.Metrics != nil && config.Metrics.Enabled {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
// add links to service dashboards, making them clickable.
|
||||
for i, d := range definition.Assets.Dashboards {
|
||||
dashboardUuid := c.dashboardUuid(
|
||||
cloudProvider, serviceId, d.Id,
|
||||
)
|
||||
if enabled {
|
||||
definition.Assets.Dashboards[i].Url = fmt.Sprintf("/dashboard/%s", dashboardUuid)
|
||||
} else {
|
||||
definition.Assets.Dashboards[i].Url = "" // to unset the in-memory URL if enabled once and disabled afterwards
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &details, nil
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (u *UpdateServiceConfigRequest) Validate(def *services.Definition) error {
|
||||
if def.Id != services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", services.S3Sync)
|
||||
} else if def.Id == services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
|
||||
for region := range u.Config.Logs.S3Buckets {
|
||||
if _, found := ValidAWSRegions[region]; !found {
|
||||
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
serviceType string,
|
||||
req *UpdateServiceConfigRequest,
|
||||
) (*UpdateServiceConfigResponse, error) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
// can only update config for a valid service.
|
||||
definition, err := services.GetServiceDefinition(cloudProvider, serviceType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := req.Validate(definition); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// can only update config for a connected cloud account id
|
||||
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, req.CloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
|
||||
}
|
||||
|
||||
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
|
||||
ctx, orgID, cloudProvider, req.CloudAccountId, serviceType, req.Config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't update service config")
|
||||
}
|
||||
|
||||
return &UpdateServiceConfigResponse{
|
||||
Id: serviceType,
|
||||
Config: *updatedConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// All dashboards that are available based on cloud integrations configuration
|
||||
// across all cloud providers
|
||||
func (c *Controller) AvailableDashboards(ctx context.Context, orgId valuer.UUID) ([]*dashboardtypes.Dashboard, *model.ApiError) {
|
||||
allDashboards := []*dashboardtypes.Dashboard{}
|
||||
|
||||
for _, provider := range []string{"aws"} {
|
||||
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, provider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, fmt.Sprintf("couldn't get available dashboards for %s", provider),
|
||||
)
|
||||
}
|
||||
|
||||
allDashboards = append(allDashboards, providerDashboards...)
|
||||
}
|
||||
|
||||
return allDashboards, nil
|
||||
}
|
||||
|
||||
func (c *Controller) AvailableDashboardsForCloudProvider(ctx context.Context, orgID valuer.UUID, cloudProvider string) ([]*dashboardtypes.Dashboard, *model.ApiError) {
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgID.StringValue(), cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list connected cloud accounts")
|
||||
}
|
||||
|
||||
// for v0, service dashboards are only available when metrics are enabled.
|
||||
servicesWithAvailableMetrics := map[string]*time.Time{}
|
||||
|
||||
for _, ar := range accountRecords {
|
||||
if ar.AccountID != nil {
|
||||
configsBySvcId, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, orgID.StringValue(), ar.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
for svcId, config := range configsBySvcId {
|
||||
if config.Metrics != nil && config.Metrics.Enabled {
|
||||
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allServices, apiErr := services.List(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
svcDashboards := []*dashboardtypes.Dashboard{}
|
||||
for _, svc := range allServices {
|
||||
serviceDashboardsCreatedAt := servicesWithAvailableMetrics[svc.Id]
|
||||
if serviceDashboardsCreatedAt != nil {
|
||||
for _, d := range svc.Assets.Dashboards {
|
||||
author := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
svcDashboards = append(svcDashboards, &dashboardtypes.Dashboard{
|
||||
ID: c.dashboardUuid(cloudProvider, svc.Id, d.Id),
|
||||
Locked: true,
|
||||
Data: *d.Definition,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: *serviceDashboardsCreatedAt,
|
||||
UpdatedAt: *serviceDashboardsCreatedAt,
|
||||
},
|
||||
UserAuditable: types.UserAuditable{
|
||||
CreatedBy: author,
|
||||
UpdatedBy: author,
|
||||
},
|
||||
OrgID: orgID,
|
||||
})
|
||||
}
|
||||
servicesWithAvailableMetrics[svc.Id] = nil
|
||||
}
|
||||
}
|
||||
|
||||
return svcDashboards, nil
|
||||
}
|
||||
func (c *Controller) GetDashboardById(ctx context.Context, orgId valuer.UUID, dashboardUuid string) (*dashboardtypes.Dashboard, *model.ApiError) {
|
||||
cloudProvider, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list available dashboards")
|
||||
}
|
||||
|
||||
for _, d := range allDashboards {
|
||||
if d.ID == dashboardUuid {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, model.NotFoundError(fmt.Errorf("couldn't find dashboard with uuid: %s", dashboardUuid))
|
||||
}
|
||||
|
||||
func (c *Controller) dashboardUuid(
|
||||
cloudProvider string, svcId string, dashboardId string,
|
||||
) string {
|
||||
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
|
||||
}
|
||||
|
||||
func (c *Controller) parseDashboardUuid(dashboardUuid string) (cloudProvider string, svcId string, dashboardId string, apiErr *model.ApiError) {
|
||||
parts := strings.SplitN(dashboardUuid, "--", 4)
|
||||
if len(parts) != 4 || parts[0] != "cloud-integration" {
|
||||
return "", "", "", model.BadRequest(fmt.Errorf("invalid cloud integration dashboard id"))
|
||||
}
|
||||
|
||||
return parts[1], parts[2], parts[3], nil
|
||||
}
|
||||
|
||||
func (c *Controller) IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
|
||||
_, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
|
||||
return apiErr == nil
|
||||
}
|
||||
@@ -1,814 +0,0 @@
|
||||
package implawsprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var (
|
||||
CodeInvalidAWSRegion = errors.MustNewCode("invalid_aws_region")
|
||||
CodeDashboardNotFound = errors.MustNewCode("dashboard_not_found")
|
||||
)
|
||||
|
||||
type awsProvider struct {
|
||||
logger *slog.Logger
|
||||
querier querier.Querier
|
||||
accountsRepo integrationstore.CloudProviderAccountsRepository
|
||||
serviceConfigRepo integrationstore.ServiceConfigDatabase
|
||||
awsServiceDefinitions *services.AWSServicesProvider
|
||||
}
|
||||
|
||||
func NewAWSCloudProvider(
|
||||
logger *slog.Logger,
|
||||
accountsRepo integrationstore.CloudProviderAccountsRepository,
|
||||
serviceConfigRepo integrationstore.ServiceConfigDatabase,
|
||||
querier querier.Querier,
|
||||
) integrationtypes.CloudProvider {
|
||||
awsServiceDefinitions, err := services.NewAWSCloudProviderServices()
|
||||
if err != nil {
|
||||
panic("failed to initialize AWS service definitions: " + err.Error())
|
||||
}
|
||||
|
||||
return &awsProvider{
|
||||
logger: logger,
|
||||
querier: querier,
|
||||
accountsRepo: accountsRepo,
|
||||
serviceConfigRepo: serviceConfigRepo,
|
||||
awsServiceDefinitions: awsServiceDefinitions,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *awsProvider) GetAccountStatus(ctx context.Context, orgID, accountID string) (*integrationtypes.GettableAccountStatus, error) {
|
||||
accountRecord, err := a.accountsRepo.Get(ctx, orgID, a.GetName().String(), accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &integrationtypes.GettableAccountStatus{
|
||||
Id: accountRecord.ID.String(),
|
||||
CloudAccountId: accountRecord.AccountID,
|
||||
Status: accountRecord.Status(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) ListConnectedAccounts(ctx context.Context, orgID string) (*integrationtypes.GettableConnectedAccountsList, error) {
|
||||
accountRecords, err := a.accountsRepo.ListConnected(ctx, orgID, a.GetName().String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectedAccounts := make([]*integrationtypes.Account, 0, len(accountRecords))
|
||||
for _, r := range accountRecords {
|
||||
connectedAccounts = append(connectedAccounts, r.Account(a.GetName()))
|
||||
}
|
||||
|
||||
return &integrationtypes.GettableConnectedAccountsList{
|
||||
Accounts: connectedAccounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) AgentCheckIn(ctx context.Context, req *integrationtypes.PostableAgentCheckInPayload) (any, error) {
|
||||
// agent can't check in unless the account is already created
|
||||
existingAccount, err := a.accountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||
a.GetName().String(), req.AccountID, existingAccount.ID.StringValue(), a.GetName().String(),
|
||||
*existingAccount.AccountID)
|
||||
}
|
||||
|
||||
existingAccount, err = a.accountsRepo.GetConnectedCloudAccount(ctx, req.OrgID, a.GetName().String(), req.AccountID)
|
||||
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput,
|
||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||
a.GetName().String(), req.AccountID, req.ID, existingAccount.ID.StringValue())
|
||||
}
|
||||
|
||||
agentReport := integrationtypes.AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: req.Data,
|
||||
}
|
||||
|
||||
account, err := a.accountsRepo.Upsert(
|
||||
ctx, req.OrgID, a.GetName().String(), &req.ID, nil, &req.AccountID, &agentReport, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agentConfig, err := a.getAWSAgentConfig(ctx, account)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &integrationtypes.GettableAWSAgentCheckIn{
|
||||
AccountId: account.ID.StringValue(),
|
||||
CloudAccountId: *account.AccountID,
|
||||
RemovedAt: account.RemovedAt,
|
||||
IntegrationConfig: *agentConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) getAWSAgentConfig(ctx context.Context, account *integrationtypes.CloudIntegration) (*integrationtypes.AWSAgentIntegrationConfig, error) {
|
||||
// prepare and return integration config to be consumed by agent
|
||||
agentConfig := &integrationtypes.AWSAgentIntegrationConfig{
|
||||
EnabledRegions: []string{},
|
||||
TelemetryCollectionStrategy: &integrationtypes.AWSCollectionStrategy{
|
||||
Metrics: &integrationtypes.AWSMetricsStrategy{},
|
||||
Logs: &integrationtypes.AWSLogsStrategy{},
|
||||
S3Buckets: map[string][]string{},
|
||||
},
|
||||
}
|
||||
|
||||
accountConfig := new(integrationtypes.AWSAccountConfig)
|
||||
err := integrationtypes.UnmarshalJSON([]byte(account.Config), accountConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if accountConfig != nil && accountConfig.EnabledRegions != nil {
|
||||
agentConfig.EnabledRegions = accountConfig.EnabledRegions
|
||||
}
|
||||
|
||||
svcConfigs, err := a.serviceConfigRepo.GetAllForAccount(
|
||||
ctx, account.OrgID, account.ID.StringValue(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// accumulate config in a fixed order to ensure same config generated across runs
|
||||
configuredServices := maps.Keys(svcConfigs)
|
||||
slices.Sort(configuredServices)
|
||||
|
||||
for _, svcType := range configuredServices {
|
||||
definition, err := a.awsServiceDefinitions.GetServiceDefinition(ctx, svcType)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
config := svcConfigs[svcType]
|
||||
|
||||
serviceConfig := new(integrationtypes.AWSCloudServiceConfig)
|
||||
err = integrationtypes.UnmarshalJSON([]byte(config), serviceConfig)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if serviceConfig.Logs != nil && serviceConfig.Logs.Enabled {
|
||||
if svcType == integrationtypes.S3Sync {
|
||||
// S3 bucket sync; No cloudwatch logs are appended for this service type;
|
||||
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
|
||||
agentConfig.TelemetryCollectionStrategy.S3Buckets = serviceConfig.Logs.S3Buckets
|
||||
} else if definition.Strategy.Logs != nil { // services that includes a logs subscription
|
||||
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions = append(
|
||||
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions,
|
||||
definition.Strategy.Logs.Subscriptions...,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled && definition.Strategy.Metrics != nil {
|
||||
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters = append(
|
||||
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters,
|
||||
definition.Strategy.Metrics.StreamFilters...,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return agentConfig, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) GetName() integrationtypes.CloudProviderType {
|
||||
return integrationtypes.CloudProviderAWS
|
||||
}
|
||||
|
||||
func (a *awsProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
|
||||
svcConfigs := make(map[string]*integrationtypes.AWSCloudServiceConfig)
|
||||
if cloudAccountID != nil {
|
||||
activeAccount, err := a.accountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceConfigs, err := a.serviceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for svcType, config := range serviceConfigs {
|
||||
serviceConfig := new(integrationtypes.AWSCloudServiceConfig)
|
||||
err = integrationtypes.UnmarshalJSON([]byte(config), serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svcConfigs[svcType] = serviceConfig
|
||||
}
|
||||
}
|
||||
|
||||
summaries := make([]integrationtypes.AWSServiceSummary, 0)
|
||||
|
||||
definitions, err := a.awsServiceDefinitions.ListServiceDefinitions(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, def := range definitions {
|
||||
summary := integrationtypes.AWSServiceSummary{
|
||||
DefinitionMetadata: def.DefinitionMetadata,
|
||||
Config: nil,
|
||||
}
|
||||
|
||||
summary.Config = svcConfigs[summary.Id]
|
||||
|
||||
summaries = append(summaries, summary)
|
||||
}
|
||||
|
||||
sort.Slice(summaries, func(i, j int) bool {
|
||||
return summaries[i].DefinitionMetadata.Title < summaries[j].DefinitionMetadata.Title
|
||||
})
|
||||
|
||||
return &integrationtypes.GettableAWSServices{
|
||||
Services: summaries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) GetServiceDetails(ctx context.Context, req *integrationtypes.GetServiceDetailsReq) (any, error) {
|
||||
details := new(integrationtypes.GettableAWSServiceDetails)
|
||||
|
||||
awsDefinition, err := a.awsServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
details.AWSDefinition = *awsDefinition
|
||||
if req.CloudAccountID == nil {
|
||||
return details, nil
|
||||
}
|
||||
|
||||
config, err := a.getServiceConfig(ctx, &details.AWSDefinition, req.OrgID, a.GetName().String(), req.ServiceId, *req.CloudAccountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
return details, nil
|
||||
}
|
||||
|
||||
details.Config = config
|
||||
|
||||
connectionStatus, err := a.getServiceConnectionStatus(
|
||||
ctx,
|
||||
*req.CloudAccountID,
|
||||
req.OrgID,
|
||||
&details.AWSDefinition,
|
||||
config,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
details.ConnectionStatus = connectionStatus
|
||||
|
||||
return details, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) getServiceConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountID string,
|
||||
orgID valuer.UUID,
|
||||
def *integrationtypes.AWSDefinition,
|
||||
serviceConfig *integrationtypes.AWSCloudServiceConfig,
|
||||
) (*integrationtypes.ServiceConnectionStatus, error) {
|
||||
if def.Strategy == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
resp := new(integrationtypes.ServiceConnectionStatus)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
if def.Strategy.Metrics != nil && serviceConfig.Metrics.Enabled {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer utils.RecoverPanic(func(err interface{}, stack []byte) {
|
||||
a.logger.ErrorContext(
|
||||
ctx, "panic while getting service metrics connection status",
|
||||
"service", def.DefinitionMetadata.Id,
|
||||
"error", err,
|
||||
"stack", string(stack),
|
||||
)
|
||||
})
|
||||
defer wg.Done()
|
||||
status, _ := a.getServiceMetricsConnectionStatus(ctx, cloudAccountID, orgID, def)
|
||||
resp.Metrics = status
|
||||
}()
|
||||
}
|
||||
|
||||
if def.Strategy.Logs != nil && serviceConfig.Logs.Enabled {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer utils.RecoverPanic(func(err interface{}, stack []byte) {
|
||||
a.logger.ErrorContext(
|
||||
ctx, "panic while getting service logs connection status",
|
||||
"service", def.DefinitionMetadata.Id,
|
||||
"error", err,
|
||||
"stack", string(stack),
|
||||
)
|
||||
})
|
||||
defer wg.Done()
|
||||
status, _ := a.getServiceLogsConnectionStatus(ctx, cloudAccountID, orgID, def)
|
||||
resp.Logs = status
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) getServiceMetricsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountID string,
|
||||
orgID valuer.UUID,
|
||||
def *integrationtypes.AWSDefinition,
|
||||
) ([]*integrationtypes.SignalConnectionStatus, error) {
|
||||
if def.Strategy == nil ||
|
||||
len(def.Strategy.Metrics.StreamFilters) < 1 ||
|
||||
len(def.DataCollected.Metrics) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
statusResp := make([]*integrationtypes.SignalConnectionStatus, 0)
|
||||
|
||||
for _, metric := range def.IngestionStatusCheck.Metrics {
|
||||
statusResp = append(statusResp, &integrationtypes.SignalConnectionStatus{
|
||||
CategoryID: metric.Category,
|
||||
CategoryDisplayName: metric.DisplayName,
|
||||
})
|
||||
}
|
||||
|
||||
for index, category := range def.IngestionStatusCheck.Metrics {
|
||||
queries := make([]qbtypes.QueryEnvelope, 0)
|
||||
|
||||
for _, check := range category.Checks {
|
||||
filterExpression := fmt.Sprintf(`cloud.provider="aws" AND cloud.account.id="%s"`, cloudAccountID)
|
||||
f := ""
|
||||
for _, attribute := range check.Attributes {
|
||||
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
|
||||
if attribute.Value != "" {
|
||||
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
|
||||
}
|
||||
|
||||
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
|
||||
}
|
||||
|
||||
queries = append(queries, qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: valuer.GenerateUUID().String(),
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{{
|
||||
MetricName: check.Key,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
}},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: filterExpression,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
resp, err := a.querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
|
||||
SchemaVersion: "v5",
|
||||
Start: uint64(time.Now().Add(-time.Hour).UnixMilli()),
|
||||
End: uint64(time.Now().UnixMilli()),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
a.logger.DebugContext(ctx,
|
||||
"error querying for service metrics connection status",
|
||||
"error", err,
|
||||
"service", def.DefinitionMetadata.Id,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if resp != nil && len(resp.Data.Results) < 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
a.logger.ErrorContext(ctx, "unexpected query response type for service metrics connection status",
|
||||
"service", def.DefinitionMetadata.Id,
|
||||
)
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "unexpected query response type: %T", resp.Data.Results[0])
|
||||
}
|
||||
|
||||
if queryResponse == nil ||
|
||||
len(queryResponse.Aggregations) < 1 ||
|
||||
len(queryResponse.Aggregations[0].Series) < 1 ||
|
||||
len(queryResponse.Aggregations[0].Series[0].Values) < 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
statusResp[index] = &integrationtypes.SignalConnectionStatus{
|
||||
CategoryID: category.Category,
|
||||
CategoryDisplayName: category.DisplayName,
|
||||
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
|
||||
LastReceivedFrom: "signoz-aws-integration",
|
||||
}
|
||||
}
|
||||
|
||||
return statusResp, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) getServiceLogsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountID string,
|
||||
orgID valuer.UUID,
|
||||
def *integrationtypes.AWSDefinition,
|
||||
) ([]*integrationtypes.SignalConnectionStatus, error) {
|
||||
if def.Strategy == nil ||
|
||||
len(def.Strategy.Logs.Subscriptions) < 1 ||
|
||||
len(def.DataCollected.Logs) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
statusResp := make([]*integrationtypes.SignalConnectionStatus, 0)
|
||||
|
||||
for _, log := range def.IngestionStatusCheck.Logs {
|
||||
statusResp = append(statusResp, &integrationtypes.SignalConnectionStatus{
|
||||
CategoryID: log.Category,
|
||||
CategoryDisplayName: log.DisplayName,
|
||||
})
|
||||
}
|
||||
|
||||
for index, category := range def.IngestionStatusCheck.Logs {
|
||||
queries := make([]qbtypes.QueryEnvelope, 0)
|
||||
|
||||
for _, check := range category.Checks {
|
||||
filterExpression := fmt.Sprintf(`cloud.account.id="%s"`, cloudAccountID)
|
||||
f := ""
|
||||
for _, attribute := range check.Attributes {
|
||||
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
|
||||
if attribute.Value != "" {
|
||||
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
|
||||
}
|
||||
|
||||
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
|
||||
}
|
||||
|
||||
queries = append(queries, qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Name: valuer.GenerateUUID().String(),
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []qbtypes.LogAggregation{{
|
||||
Expression: "count()",
|
||||
}},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: filterExpression,
|
||||
},
|
||||
Limit: 10,
|
||||
Offset: 0,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
resp, err := a.querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
|
||||
SchemaVersion: "v1",
|
||||
Start: uint64(time.Now().Add(-time.Hour * 1).UnixMilli()),
|
||||
End: uint64(time.Now().UnixMilli()),
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
a.logger.DebugContext(ctx,
|
||||
"error querying for service logs connection status",
|
||||
"error", err,
|
||||
"service", def.DefinitionMetadata.Id,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if resp != nil && len(resp.Data.Results) < 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
a.logger.ErrorContext(ctx, "unexpected query response type for service logs connection status",
|
||||
"service", def.DefinitionMetadata.Id,
|
||||
)
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "unexpected query response type: %T", resp.Data.Results[0])
|
||||
}
|
||||
|
||||
if queryResponse == nil ||
|
||||
len(queryResponse.Aggregations) < 1 ||
|
||||
len(queryResponse.Aggregations[0].Series) < 1 ||
|
||||
len(queryResponse.Aggregations[0].Series[0].Values) < 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
statusResp[index] = &integrationtypes.SignalConnectionStatus{
|
||||
CategoryID: category.Category,
|
||||
CategoryDisplayName: category.DisplayName,
|
||||
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
|
||||
LastReceivedFrom: "signoz-aws-integration",
|
||||
}
|
||||
}
|
||||
|
||||
return statusResp, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) getServiceConfig(ctx context.Context,
|
||||
def *integrationtypes.AWSDefinition, orgID valuer.UUID, cloudProvider, serviceId, cloudAccountId string,
|
||||
) (*integrationtypes.AWSCloudServiceConfig, error) {
|
||||
activeAccount, err := a.accountsRepo.GetConnectedCloudAccount(ctx, orgID.String(), cloudProvider, cloudAccountId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := a.serviceConfigRepo.Get(ctx, orgID.String(), activeAccount.ID.StringValue(), serviceId)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceConfig := new(integrationtypes.AWSCloudServiceConfig)
|
||||
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config != nil && serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled {
|
||||
def.PopulateDashboardURLs(a.GetName(), serviceId)
|
||||
}
|
||||
|
||||
return serviceConfig, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
|
||||
accountRecords, err := a.accountsRepo.ListConnected(ctx, orgID.StringValue(), a.GetName().String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// for now service dashboards are only available when metrics are enabled.
|
||||
servicesWithAvailableMetrics := map[string]*time.Time{}
|
||||
|
||||
for _, ar := range accountRecords {
|
||||
if ar.AccountID != nil {
|
||||
configsBySvcId, err := a.serviceConfigRepo.GetAllForAccount(ctx, orgID.StringValue(), ar.ID.StringValue())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for svcId, config := range configsBySvcId {
|
||||
serviceConfig := new(integrationtypes.AWSCloudServiceConfig)
|
||||
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled {
|
||||
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
svcDashboards := make([]*dashboardtypes.Dashboard, 0)
|
||||
|
||||
allServices, err := a.awsServiceDefinitions.ListServiceDefinitions(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to list aws service definitions")
|
||||
}
|
||||
|
||||
for _, svc := range allServices {
|
||||
serviceDashboardsCreatedAt, ok := servicesWithAvailableMetrics[svc.Id]
|
||||
if ok {
|
||||
svcDashboards = append(
|
||||
svcDashboards,
|
||||
integrationtypes.GetDashboardsFromAssets(svc.Id, orgID, a.GetName(), serviceDashboardsCreatedAt, svc.Assets)...,
|
||||
)
|
||||
servicesWithAvailableMetrics[svc.Id] = nil
|
||||
}
|
||||
}
|
||||
|
||||
return svcDashboards, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) GetDashboard(ctx context.Context, req *integrationtypes.GettableDashboard) (*dashboardtypes.Dashboard, error) {
|
||||
allDashboards, err := a.GetAvailableDashboards(ctx, req.OrgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range allDashboards {
|
||||
if d.ID == req.ID {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFoundf(CodeDashboardNotFound, "dashboard with id %s not found", req.ID)
|
||||
}
|
||||
|
||||
func (a *awsProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationtypes.PostableConnectionArtifact) (any, error) {
|
||||
connection := new(integrationtypes.PostableAWSConnectionUrl)
|
||||
|
||||
err := integrationtypes.UnmarshalJSON(req.Data, connection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if connection.AccountConfig != nil {
|
||||
for _, region := range connection.AccountConfig.EnabledRegions {
|
||||
if integrationtypes.ValidAWSRegions[region] {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
|
||||
}
|
||||
}
|
||||
|
||||
config, err := integrationtypes.MarshalJSON(connection.AccountConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
account, err := a.accountsRepo.Upsert(
|
||||
ctx, req.OrgID, integrationtypes.CloudProviderAWS.String(), nil, config,
|
||||
nil, nil, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agentVersion := "v0.0.8"
|
||||
if connection.AgentConfig.Version != "" {
|
||||
agentVersion = connection.AgentConfig.Version
|
||||
}
|
||||
|
||||
baseURL := fmt.Sprintf("https://%s.console.aws.amazon.com/cloudformation/home",
|
||||
connection.AgentConfig.Region)
|
||||
u, _ := url.Parse(baseURL)
|
||||
|
||||
q := u.Query()
|
||||
q.Set("region", connection.AgentConfig.Region)
|
||||
u.Fragment = "/stacks/quickcreate"
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
q = u.Query()
|
||||
q.Set("stackName", "signoz-integration")
|
||||
q.Set("templateURL", fmt.Sprintf("https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json", agentVersion))
|
||||
q.Set("param_SigNozIntegrationAgentVersion", agentVersion)
|
||||
q.Set("param_SigNozApiUrl", connection.AgentConfig.SigNozAPIUrl)
|
||||
q.Set("param_SigNozApiKey", connection.AgentConfig.SigNozAPIKey)
|
||||
q.Set("param_SigNozAccountId", account.ID.StringValue())
|
||||
q.Set("param_IngestionUrl", connection.AgentConfig.IngestionUrl)
|
||||
q.Set("param_IngestionKey", connection.AgentConfig.IngestionKey)
|
||||
|
||||
return &integrationtypes.GettableAWSConnectionUrl{
|
||||
AccountId: account.ID.StringValue(),
|
||||
ConnectionUrl: u.String() + "?&" + q.Encode(), // this format is required by AWS
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) UpdateServiceConfig(ctx context.Context, req *integrationtypes.PatchableServiceConfig) (any, error) {
|
||||
definition, err := a.awsServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceConfig := new(integrationtypes.UpdatableAWSCloudServiceConfig)
|
||||
err = integrationtypes.UnmarshalJSON(req.Config, serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = serviceConfig.Config.Validate(definition); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// can only update config for a connected cloud account id
|
||||
_, err = a.accountsRepo.GetConnectedCloudAccount(
|
||||
ctx, req.OrgID, a.GetName().String(), serviceConfig.CloudAccountId,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceConfigBytes, err := integrationtypes.MarshalJSON(serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updatedConfig, err := a.serviceConfigRepo.Upsert(
|
||||
ctx, req.OrgID, a.GetName().String(), serviceConfig.CloudAccountId, req.ServiceId, serviceConfigBytes,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = integrationtypes.UnmarshalJSON(updatedConfig, serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &integrationtypes.PatchServiceConfigResponse{
|
||||
ServiceId: req.ServiceId,
|
||||
Config: serviceConfig.Config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) UpdateAccountConfig(ctx context.Context, req *integrationtypes.PatchableAccountConfig) (any, error) {
|
||||
config := new(integrationtypes.PatchableAWSAccountConfig)
|
||||
|
||||
err := integrationtypes.UnmarshalJSON(req.Data, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.Config == nil {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "account config can't be null")
|
||||
}
|
||||
|
||||
for _, region := range config.Config.EnabledRegions {
|
||||
if integrationtypes.ValidAWSRegions[region] {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
|
||||
}
|
||||
|
||||
configBytes, err := integrationtypes.MarshalJSON(config.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// account must exist to update config, but it doesn't need to be connected
|
||||
_, err = a.accountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.AccountId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accountRecord, err := a.accountsRepo.Upsert(
|
||||
ctx, req.OrgID, a.GetName().String(), &req.AccountId, configBytes, nil, nil, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return accountRecord.Account(a.GetName()), nil
|
||||
}
|
||||
|
||||
func (a *awsProvider) DisconnectAccount(ctx context.Context, orgID, accountID string) (*integrationtypes.CloudIntegration, error) {
|
||||
account, err := a.accountsRepo.Get(ctx, orgID, a.GetName().String(), accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tsNow := time.Now()
|
||||
account, err = a.accountsRepo.Upsert(
|
||||
ctx, orgID, a.GetName().String(), &accountID, nil, nil, nil, &tsNow,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
@@ -1 +1,94 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
type ServiceSummary struct {
|
||||
services.Metadata
|
||||
|
||||
Config *types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type ServiceDetails struct {
|
||||
services.Definition
|
||||
|
||||
Config *types.CloudServiceConfig `json:"config"`
|
||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
type LogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type ServiceConnectionStatus struct {
|
||||
Logs *SignalConnectionStatus `json:"logs"`
|
||||
Metrics *SignalConnectionStatus `json:"metrics"`
|
||||
}
|
||||
|
||||
type SignalConnectionStatus struct {
|
||||
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
|
||||
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
||||
}
|
||||
|
||||
type CompiledCollectionStrategy = services.CollectionStrategy
|
||||
|
||||
func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy, error) {
|
||||
if provider == "aws" {
|
||||
return &CompiledCollectionStrategy{
|
||||
Provider: "aws",
|
||||
AWSMetrics: &services.AWSMetricsStrategy{},
|
||||
AWSLogs: &services.AWSLogsStrategy{},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", provider)
|
||||
}
|
||||
|
||||
// Helper for accumulating strategies for enabled services.
|
||||
func AddServiceStrategy(serviceType string, cs *CompiledCollectionStrategy,
|
||||
definitionStrat *services.CollectionStrategy, config *types.CloudServiceConfig) error {
|
||||
if definitionStrat.Provider != cs.Provider {
|
||||
return errors.NewInternalf(CodeMismatchCloudProvider, "can't add %s service strategy to compiled strategy for %s",
|
||||
definitionStrat.Provider, cs.Provider)
|
||||
}
|
||||
|
||||
if cs.Provider == "aws" {
|
||||
if config.Logs != nil && config.Logs.Enabled {
|
||||
if serviceType == services.S3Sync {
|
||||
// S3 bucket sync; No cloudwatch logs are appended for this service type;
|
||||
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
|
||||
cs.S3Buckets = config.Logs.S3Buckets
|
||||
} else if definitionStrat.AWSLogs != nil { // services that includes a logs subscription
|
||||
cs.AWSLogs.Subscriptions = append(
|
||||
cs.AWSLogs.Subscriptions,
|
||||
definitionStrat.AWSLogs.Subscriptions...,
|
||||
)
|
||||
}
|
||||
}
|
||||
if config.Metrics != nil && config.Metrics.Enabled && definitionStrat.AWSMetrics != nil {
|
||||
cs.AWSMetrics.StreamFilters = append(
|
||||
cs.AWSMetrics.StreamFilters,
|
||||
definitionStrat.AWSMetrics.StreamFilters...,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cs.Provider)
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implawsprovider"
|
||||
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
)
|
||||
|
||||
func NewCloudProviderRegistry(
|
||||
logger *slog.Logger,
|
||||
store sqlstore.SQLStore,
|
||||
querier querier.Querier,
|
||||
) map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider {
|
||||
registry := make(map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider)
|
||||
|
||||
accountsRepo := integrationstore.NewCloudProviderAccountsRepository(store)
|
||||
serviceConfigRepo := integrationstore.NewServiceConfigRepository(store)
|
||||
|
||||
awsProviderImpl := implawsprovider.NewAWSCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
|
||||
registry[integrationtypes.CloudProviderAWS] = awsProviderImpl
|
||||
|
||||
return registry
|
||||
}
|
||||
@@ -1,63 +1,64 @@
|
||||
package store
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
var (
|
||||
CodeServiceConfigNotFound = errors.MustNewCode("service_config_not_found")
|
||||
)
|
||||
|
||||
type ServiceConfigDatabase interface {
|
||||
Get(
|
||||
get(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceType string,
|
||||
) ([]byte, error)
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
Upsert(
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config []byte,
|
||||
) ([]byte, error)
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
GetAllForAccount(
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
map[string][]byte,
|
||||
error,
|
||||
configsBySvcId map[string]*types.CloudServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
|
||||
func NewServiceConfigRepository(store sqlstore.SQLStore) ServiceConfigDatabase {
|
||||
return &serviceConfigSQLRepository{store: store}
|
||||
func newServiceConfigRepository(store sqlstore.SQLStore) (
|
||||
*serviceConfigSQLRepository, error,
|
||||
) {
|
||||
return &serviceConfigSQLRepository{
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type serviceConfigSQLRepository struct {
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) Get(
|
||||
func (r *serviceConfigSQLRepository) get(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceType string,
|
||||
) ([]byte, error) {
|
||||
var result integrationtypes.CloudIntegrationService
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
var result types.CloudIntegrationService
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
@@ -66,30 +67,36 @@ func (r *serviceConfigSQLRepository) Get(
|
||||
Where("ci.id = ?", cloudAccountId).
|
||||
Where("cis.type = ?", serviceType).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, errors.WrapNotFoundf(err, CodeServiceConfigNotFound, "couldn't find config for cloud account %s", cloudAccountId)
|
||||
}
|
||||
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud service config")
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find config for cloud account %s",
|
||||
cloudAccountId,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return []byte(result.Config), nil
|
||||
return &result.Config, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) Upsert(
|
||||
func (r *serviceConfigSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config []byte,
|
||||
) ([]byte, error) {
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
// get cloud integration id from account id
|
||||
// if the account is not connected, we don't need to upsert the config
|
||||
var cloudIntegrationId string
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model((*integrationtypes.CloudIntegration)(nil)).
|
||||
Model((*types.CloudIntegration)(nil)).
|
||||
Column("id").
|
||||
Where("provider = ?", cloudProvider).
|
||||
Where("account_id = ?", cloudAccountId).
|
||||
@@ -97,24 +104,20 @@ func (r *serviceConfigSQLRepository) Upsert(
|
||||
Where("removed_at is NULL").
|
||||
Where("last_agent_report is not NULL").
|
||||
Scan(ctx, &cloudIntegrationId)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, errors.WrapNotFoundf(
|
||||
err,
|
||||
CodeCloudIntegrationAccountNotFound,
|
||||
"couldn't find active cloud integration account",
|
||||
)
|
||||
}
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud integration id")
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud integration id: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
serviceConfig := integrationtypes.CloudIntegrationService{
|
||||
serviceConfig := types.CloudIntegrationService{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Config: string(config),
|
||||
Config: config,
|
||||
Type: serviceId,
|
||||
CloudIntegrationID: cloudIntegrationId,
|
||||
}
|
||||
@@ -123,18 +126,21 @@ func (r *serviceConfigSQLRepository) Upsert(
|
||||
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud service config")
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return config, nil
|
||||
return &serviceConfig.Config, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) GetAllForAccount(
|
||||
func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (map[string][]byte, error) {
|
||||
var serviceConfigs []integrationtypes.CloudIntegrationService
|
||||
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
|
||||
serviceConfigs := []types.CloudIntegrationService{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&serviceConfigs).
|
||||
@@ -143,13 +149,15 @@ func (r *serviceConfigSQLRepository) GetAllForAccount(
|
||||
Where("ci.org_id = ?", orgID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query service configs from db")
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query service configs from db: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
result := make(map[string][]byte)
|
||||
result := map[string]*types.CloudServiceConfig{}
|
||||
|
||||
for _, r := range serviceConfigs {
|
||||
result[r.Type] = []byte(r.Config)
|
||||
result[r.Type] = &r.Config
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@@ -7,24 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ApplicationELB_ConsumedLCUs_count",
|
||||
"attributes": []
|
||||
},
|
||||
{
|
||||
"key": "aws_ApplicationELB_ProcessedBytes_sum",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
|
||||
@@ -7,75 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": true
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "rest_api",
|
||||
"display_name": "REST API Metrics",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ApiGateway_Count_count",
|
||||
"attributes": [
|
||||
{
|
||||
"name": "ApiName",
|
||||
"operator": "EXISTS",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"category": "http_api",
|
||||
"display_name": "HTTP API Metrics",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ApiGateway_Count_count",
|
||||
"attributes": [
|
||||
{
|
||||
"name": "ApiId",
|
||||
"operator": "EXISTS",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"category": "websocket_api",
|
||||
"display_name": "Websocket API Metrics",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ApiGateway_Count_count",
|
||||
"attributes": [
|
||||
{
|
||||
"name": "ApiId",
|
||||
"operator": "EXISTS",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"logs": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"name": "aws.cloudwatch.log_group_name",
|
||||
"operator": "ILIKE",
|
||||
"value": "API-Gateway%"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -217,146 +148,6 @@
|
||||
"name": "aws_ApiGateway_Latency_sum",
|
||||
"unit": "Milliseconds",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_4xx_sum",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_4xx_max",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_4xx_min",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_4xx_count",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_5xx_sum",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_5xx_max",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_5xx_min",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_5xx_count",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_DataProcessed_sum",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_DataProcessed_max",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_DataProcessed_min",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_DataProcessed_count",
|
||||
"unit": "Bytes",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ExecutionError_sum",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ExecutionError_max",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ExecutionError_min",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ExecutionError_count",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ClientError_sum",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ClientError_max",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ClientError_min",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ClientError_count",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_IntegrationError_sum",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_IntegrationError_max",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_IntegrationError_min",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_IntegrationError_count",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ConnectCount_sum",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ConnectCount_max",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ConnectCount_min",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
},
|
||||
{
|
||||
"name": "aws_ApiGateway_ConnectCount_count",
|
||||
"unit": "Count",
|
||||
"type": "Gauge"
|
||||
}
|
||||
],
|
||||
"logs": [
|
||||
|
||||
@@ -7,24 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_DynamoDB_AccountMaxReads_max",
|
||||
"attributes": []
|
||||
},
|
||||
{
|
||||
"key": "aws_DynamoDB_AccountProvisionedReadCapacityUtilization_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -409,4 +391,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,24 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_EC2_CPUUtilization_max",
|
||||
"attributes": []
|
||||
},
|
||||
{
|
||||
"key": "aws_EC2_NetworkIn_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -533,4 +515,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,81 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": true
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "overview",
|
||||
"display_name": "Overview",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ECS_CPUUtilization_max",
|
||||
"attributes": []
|
||||
},
|
||||
{
|
||||
"key": "aws_ECS_MemoryUtilization_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"category": "containerinsights",
|
||||
"display_name": "Container Insights",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ECS_ContainerInsights_NetworkRxBytes_max",
|
||||
"attributes": []
|
||||
},
|
||||
{
|
||||
"key": "aws_ECS_ContainerInsights_StorageReadBytes_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"category": "enhanced_containerinsights",
|
||||
"display_name": "Enhanced Container Insights",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ECS_ContainerInsights_ContainerCpuUtilization_max",
|
||||
"attributes": [
|
||||
{
|
||||
"name": "TaskId",
|
||||
"operator": "EXISTS",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "aws_ECS_ContainerInsights_TaskMemoryUtilization_max",
|
||||
"attributes": [
|
||||
{
|
||||
"name": "TaskId",
|
||||
"operator": "EXISTS",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"logs": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"name": "aws.cloudwatch.log_group_name",
|
||||
"operator": "ILIKE",
|
||||
"value": "%/ecs/%"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,20 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_ElastiCache_CacheHitRate_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics":[
|
||||
{
|
||||
@@ -1942,7 +1928,7 @@
|
||||
"unit": "Percent",
|
||||
"type": "Gauge",
|
||||
"description": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"telemetry_collection_strategy": {
|
||||
@@ -1965,4 +1951,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,37 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": true
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_Lambda_Invocations_sum",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"logs": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"name": "aws.cloudwatch.log_group_name",
|
||||
"operator": "ILIKE",
|
||||
"value": "/aws/lambda%"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
|
||||
@@ -7,20 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_Kafka_KafkaDataLogsDiskUsed_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -1102,3 +1088,4 @@
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,37 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": true
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_RDS_CPUUtilization_max",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"logs": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"name": "resources.aws.cloudwatch.log_group_name",
|
||||
"operator": "ILIKE",
|
||||
"value": "/aws/rds%"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -831,4 +800,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,20 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_SNS_NumberOfMessagesPublished_sum",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -141,4 +127,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,24 +7,6 @@
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"ingestion_status_check": {
|
||||
"metrics": [
|
||||
{
|
||||
"category": "$default",
|
||||
"display_name": "Default",
|
||||
"checks": [
|
||||
{
|
||||
"key": "aws_SQS_SentMessageSize_max",
|
||||
"attributes": []
|
||||
},
|
||||
{
|
||||
"key": "aws_SQS_NumberOfMessagesSent_sum",
|
||||
"attributes": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
@@ -265,4 +247,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
91
pkg/query-service/app/cloudintegrations/services/models.go
Normal file
91
pkg/query-service/app/cloudintegrations/services/models.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
)
|
||||
|
||||
type Metadata struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
}
|
||||
|
||||
type Definition struct {
|
||||
Metadata
|
||||
|
||||
Overview string `json:"overview"` // markdown
|
||||
|
||||
Assets Assets `json:"assets"`
|
||||
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
|
||||
DataCollected DataCollected `json:"data_collected"`
|
||||
|
||||
Strategy *CollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type Assets struct {
|
||||
Dashboards []Dashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollected struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CollectionStrategy struct {
|
||||
Provider string `json:"provider"`
|
||||
|
||||
AWSMetrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||
AWSLogs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type
|
||||
}
|
||||
|
||||
type AWSMetricsStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
StreamFilters []struct {
|
||||
// json tags here are in the shape expected by AWS API as detailed at
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
} `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
type AWSLogsStrategy struct {
|
||||
Subscriptions []struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
} `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
Id string `json:"id"`
|
||||
Url string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Image string `json:"image"`
|
||||
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
@@ -2,90 +2,128 @@ package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
koanfJson "github.com/knadh/koanf/parsers/json"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
S3Sync = "s3sync"
|
||||
)
|
||||
|
||||
var (
|
||||
CodeServiceDefinitionNotFound = errors.MustNewCode("service_definition_not_dound")
|
||||
CodeUnsupportedCloudProvider = errors.MustNewCode("unsupported_cloud_provider")
|
||||
CodeUnsupportedServiceType = errors.MustNewCode("unsupported_service_type")
|
||||
)
|
||||
|
||||
type (
|
||||
AWSServicesProvider struct {
|
||||
definitions map[string]*integrationtypes.AWSDefinition
|
||||
func List(cloudProvider string) ([]Definition, *model.ApiError) {
|
||||
cloudServices, found := supportedServices[cloudProvider]
|
||||
if !found || cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
))
|
||||
}
|
||||
)
|
||||
|
||||
func (a *AWSServicesProvider) ListServiceDefinitions(ctx context.Context) (map[string]*integrationtypes.AWSDefinition, error) {
|
||||
return a.definitions, nil
|
||||
services := maps.Values(cloudServices)
|
||||
sort.Slice(services, func(i, j int) bool {
|
||||
return services[i].Id < services[j].Id
|
||||
})
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (a *AWSServicesProvider) GetServiceDefinition(ctx context.Context, serviceName string) (*integrationtypes.AWSDefinition, error) {
|
||||
def, ok := a.definitions[serviceName]
|
||||
if !ok {
|
||||
return nil, errors.NewNotFoundf(CodeServiceDefinitionNotFound, "aws service definition not found: %s", serviceName)
|
||||
}
|
||||
|
||||
return def, nil
|
||||
}
|
||||
|
||||
func NewAWSCloudProviderServices() (*AWSServicesProvider, error) {
|
||||
definitions, err := readAllServiceDefinitions(integrationtypes.CloudProviderAWS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceDefinitions := make(map[string]*integrationtypes.AWSDefinition)
|
||||
for id, def := range definitions {
|
||||
typedDef, ok := def.(*integrationtypes.AWSDefinition)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type for AWS service definition %s", id)
|
||||
}
|
||||
serviceDefinitions[id] = typedDef
|
||||
}
|
||||
|
||||
return &AWSServicesProvider{
|
||||
definitions: serviceDefinitions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//go:embed definitions/*
|
||||
var definitionFiles embed.FS
|
||||
|
||||
func readAllServiceDefinitions(cloudProvider valuer.String) (map[string]any, error) {
|
||||
rootDirName := "definitions"
|
||||
|
||||
cloudProviderDirPath := path.Join(rootDirName, cloudProvider.String())
|
||||
|
||||
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(cloudServices) < 1 {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "no service definitions found in %s", cloudProviderDirPath)
|
||||
func Map(cloudProvider string) (map[string]Definition, error) {
|
||||
cloudServices, found := supportedServices[cloudProvider]
|
||||
if !found || cloudServices == nil {
|
||||
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
|
||||
}
|
||||
|
||||
return cloudServices, nil
|
||||
}
|
||||
|
||||
func readServiceDefinitionsFromDir(cloudProvider valuer.String, cloudProviderDirPath string) (map[string]any, error) {
|
||||
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't list integrations dirs")
|
||||
func GetServiceDefinition(cloudProvider, serviceType string) (*Definition, error) {
|
||||
cloudServices := supportedServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
|
||||
}
|
||||
|
||||
svcDefs := make(map[string]any)
|
||||
svc, exists := cloudServices[serviceType]
|
||||
if !exists {
|
||||
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedServiceType, "%s service not found: %s", cloudProvider, serviceType)
|
||||
}
|
||||
|
||||
return &svc, nil
|
||||
}
|
||||
|
||||
// End of API. Logic for reading service definition files follows
|
||||
|
||||
// Service details read from ./serviceDefinitions
|
||||
// { "providerName": { "service_id": {...}} }
|
||||
var supportedServices map[string]map[string]Definition
|
||||
|
||||
func init() {
|
||||
err := readAllServiceDefinitions()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf(
|
||||
"couldn't read cloud service definitions: %w", err,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed definitions/*
|
||||
var definitionFiles embed.FS
|
||||
|
||||
func readAllServiceDefinitions() error {
|
||||
supportedServices = map[string]map[string]Definition{}
|
||||
|
||||
rootDirName := "definitions"
|
||||
|
||||
cloudProviderDirs, err := fs.ReadDir(definitionFiles, rootDirName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
|
||||
}
|
||||
|
||||
for _, d := range cloudProviderDirs {
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
cloudProvider := d.Name()
|
||||
|
||||
cloudProviderDirPath := path.Join(rootDirName, cloudProvider)
|
||||
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't read %s service definitions: %w", cloudProvider, err)
|
||||
}
|
||||
|
||||
if len(cloudServices) < 1 {
|
||||
return fmt.Errorf("no %s services could be read", cloudProvider)
|
||||
}
|
||||
|
||||
supportedServices[cloudProvider] = cloudServices
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath string) (
|
||||
map[string]Definition, error,
|
||||
) {
|
||||
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
|
||||
}
|
||||
|
||||
svcDefs := map[string]Definition{}
|
||||
|
||||
for _, d := range svcDefDirs {
|
||||
if !d.IsDir() {
|
||||
@@ -95,71 +133,103 @@ func readServiceDefinitionsFromDir(cloudProvider valuer.String, cloudProviderDir
|
||||
svcDirPath := path.Join(cloudProviderDirPath, d.Name())
|
||||
s, err := readServiceDefinition(cloudProvider, svcDirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("couldn't read svc definition for %s: %w", d.Name(), err)
|
||||
}
|
||||
|
||||
_, exists := svcDefs[s.GetId()]
|
||||
_, exists := svcDefs[s.Id]
|
||||
if exists {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "duplicate service definition for id %s at %s", s.GetId(), d.Name())
|
||||
return nil, fmt.Errorf(
|
||||
"duplicate service definition for id %s at %s", s.Id, d.Name(),
|
||||
)
|
||||
}
|
||||
svcDefs[s.GetId()] = s
|
||||
svcDefs[s.Id] = *s
|
||||
}
|
||||
|
||||
return svcDefs, nil
|
||||
}
|
||||
|
||||
func readServiceDefinition(cloudProvider valuer.String, svcDirpath string) (integrationtypes.Definition, error) {
|
||||
func readServiceDefinition(cloudProvider string, svcDirpath string) (*Definition, error) {
|
||||
integrationJsonPath := path.Join(svcDirpath, "integration.json")
|
||||
|
||||
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read integration definition in %s", svcDirpath)
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't find integration.json in %s: %w",
|
||||
svcDirpath, err,
|
||||
)
|
||||
}
|
||||
|
||||
integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't parse integration definition in %s", svcDirpath)
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse integration.json from %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
hydrated, err := integrations.HydrateFileUris(integrationSpec, definitionFiles, svcDirpath)
|
||||
hydrated, err := integrations.HydrateFileUris(
|
||||
integrationSpec, definitionFiles, svcDirpath,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't hydrate integration definition in %s", svcDirpath)
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't hydrate files referenced in service definition %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
hydratedSpec := hydrated.(map[string]any)
|
||||
|
||||
var serviceDef integrationtypes.Definition
|
||||
|
||||
switch cloudProvider {
|
||||
case integrationtypes.CloudProviderAWS:
|
||||
serviceDef = &integrationtypes.AWSDefinition{}
|
||||
default:
|
||||
// ideally this shouldn't happen hence throwing internal error
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "unsupported cloud provider: %s", cloudProvider)
|
||||
serviceDef, err := ParseStructWithJsonTagsFromMap[Definition](hydratedSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse hydrated JSON spec read from %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
err = parseStructWithJsonTagsFromMap(hydratedSpec, serviceDef)
|
||||
err = validateServiceDefinition(serviceDef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = serviceDef.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
|
||||
}
|
||||
|
||||
serviceDef.Strategy.Provider = cloudProvider
|
||||
|
||||
return serviceDef, nil
|
||||
|
||||
}
|
||||
|
||||
func parseStructWithJsonTagsFromMap(data map[string]any, target interface{}) error {
|
||||
mapJson, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal service definition json data")
|
||||
func validateServiceDefinition(s *Definition) error {
|
||||
// Validate dashboard data
|
||||
seenDashboardIds := map[string]interface{}{}
|
||||
for _, dd := range s.Assets.Dashboards {
|
||||
if _, seen := seenDashboardIds[dd.Id]; seen {
|
||||
return fmt.Errorf("multiple dashboards found with id %s", dd.Id)
|
||||
}
|
||||
seenDashboardIds[dd.Id] = nil
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(bytes.NewReader(mapJson))
|
||||
decoder.DisallowUnknownFields()
|
||||
err = decoder.Decode(target)
|
||||
if err != nil {
|
||||
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal service definition json data")
|
||||
if s.Strategy == nil {
|
||||
return fmt.Errorf("telemetry_collection_strategy is required")
|
||||
}
|
||||
|
||||
// potentially more to follow
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseStructWithJsonTagsFromMap[StructType any](data map[string]any) (
|
||||
*StructType, error,
|
||||
) {
|
||||
mapJson, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't marshal map to json: %w", err)
|
||||
}
|
||||
|
||||
var res StructType
|
||||
decoder := json.NewDecoder(bytes.NewReader(mapJson))
|
||||
decoder.DisallowUnknownFields()
|
||||
err = decoder.Decode(&res)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't unmarshal json back to struct: %w", err)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
package services
|
||||
|
||||
// TODO: add more tests for services package
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAvailableServices(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// should be able to list available services.
|
||||
_, apiErr := List("bad-cloud-provider")
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
awsSvcs, apiErr := List("aws")
|
||||
require.Nil(apiErr)
|
||||
require.Greater(len(awsSvcs), 0)
|
||||
|
||||
// should be able to get details of a service
|
||||
_, err := GetServiceDefinition(
|
||||
"aws", "bad-service-id",
|
||||
)
|
||||
require.NotNil(err)
|
||||
require.True(errors.Ast(err, errors.TypeNotFound))
|
||||
|
||||
svc, err := GetServiceDefinition(
|
||||
"aws", awsSvcs[0].Id,
|
||||
)
|
||||
require.Nil(err)
|
||||
require.Equal(*svc, awsSvcs[0])
|
||||
}
|
||||
|
||||
@@ -6,7 +6,11 @@ import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
|
||||
"io"
|
||||
"math"
|
||||
@@ -21,19 +25,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
||||
@@ -45,6 +44,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/contextlinks"
|
||||
traceFunnelsModule "github.com/SigNoz/signoz/pkg/modules/tracefunnel"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
|
||||
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||
@@ -111,7 +111,7 @@ type APIHandler struct {
|
||||
|
||||
IntegrationsController *integrations.Controller
|
||||
|
||||
cloudIntegrationsRegistry map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
|
||||
@@ -158,6 +158,9 @@ type APIHandlerOpts struct {
|
||||
// Integrations
|
||||
IntegrationsController *integrations.Controller
|
||||
|
||||
// Cloud Provider Integrations
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
|
||||
// Log parsing pipelines
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
|
||||
@@ -171,8 +174,6 @@ type APIHandlerOpts struct {
|
||||
QueryParserAPI *queryparser.API
|
||||
|
||||
Signoz *signoz.SigNoz
|
||||
|
||||
Logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
@@ -208,18 +209,12 @@ func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, erro
|
||||
summaryService := metricsexplorer.NewSummaryService(opts.Reader, opts.RuleManager, opts.Signoz.Modules.Dashboard)
|
||||
//quickFilterModule := quickfilter.NewAPI(opts.QuickFilterModule)
|
||||
|
||||
cloudIntegrationsRegistry := cloudintegrations.NewCloudProviderRegistry(
|
||||
opts.Logger,
|
||||
opts.Signoz.SQLStore,
|
||||
opts.Signoz.Querier,
|
||||
)
|
||||
|
||||
aH := &APIHandler{
|
||||
reader: opts.Reader,
|
||||
temporalityMap: make(map[string]map[v3.Temporality]bool),
|
||||
ruleManager: opts.RuleManager,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
cloudIntegrationsRegistry: cloudIntegrationsRegistry,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
querier: querier,
|
||||
querierV2: querierv2,
|
||||
@@ -1214,22 +1209,13 @@ func (aH *APIHandler) Get(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
dashboard := new(dashboardtypes.Dashboard)
|
||||
if integrationtypes.IsCloudIntegrationDashboardUuid(id) {
|
||||
cloudProvider, err := integrationtypes.GetCloudProviderFromDashboardID(id)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
if aH.CloudIntegrationsController.IsCloudIntegrationDashboardUuid(id) {
|
||||
cloudIntegrationDashboard, apiErr := aH.CloudIntegrationsController.GetDashboardById(ctx, orgID, id)
|
||||
if apiErr != nil {
|
||||
render.Error(rw, errorsV2.Wrapf(apiErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to get dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
integrationDashboard, err := aH.cloudIntegrationsRegistry[cloudProvider].GetDashboard(ctx, &integrationtypes.GettableDashboard{
|
||||
ID: id,
|
||||
OrgID: orgID,
|
||||
})
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
dashboard = integrationDashboard
|
||||
dashboard = cloudIntegrationDashboard
|
||||
} else if aH.IntegrationsController.IsInstalledIntegrationDashboardID(id) {
|
||||
integrationDashboard, apiErr := aH.IntegrationsController.GetInstalledIntegrationDashboardById(ctx, orgID, id)
|
||||
if apiErr != nil {
|
||||
@@ -1293,13 +1279,11 @@ func (aH *APIHandler) List(rw http.ResponseWriter, r *http.Request) {
|
||||
dashboards = append(dashboards, installedIntegrationDashboards...)
|
||||
}
|
||||
|
||||
for _, provider := range aH.cloudIntegrationsRegistry {
|
||||
cloudIntegrationDashboards, err := provider.GetAvailableDashboards(ctx, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
|
||||
} else {
|
||||
dashboards = append(dashboards, cloudIntegrationDashboards...)
|
||||
}
|
||||
cloudIntegrationDashboards, apiErr := aH.CloudIntegrationsController.AvailableDashboards(ctx, orgID)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
|
||||
} else {
|
||||
dashboards = append(dashboards, cloudIntegrationDashboards...)
|
||||
}
|
||||
|
||||
gettableDashboards, err := dashboardtypes.NewGettableDashboardsFromDashboards(dashboards)
|
||||
@@ -3275,15 +3259,15 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(w http.ResponseWriter, r *h
|
||||
lookbackSeconds = 15 * 60
|
||||
}
|
||||
|
||||
connectionStatus, err := aH.calculateConnectionStatus(
|
||||
connectionStatus, apiErr := aH.calculateConnectionStatus(
|
||||
r.Context(), orgID, connectionTests, lookbackSeconds,
|
||||
)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to calculate integration connection status")
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, connectionStatus)
|
||||
aH.Respond(w, connectionStatus)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) calculateConnectionStatus(
|
||||
@@ -3291,11 +3275,10 @@ func (aH *APIHandler) calculateConnectionStatus(
|
||||
orgID valuer.UUID,
|
||||
connectionTests *integrations.IntegrationConnectionTests,
|
||||
lookbackSeconds int64,
|
||||
) (*integrations.IntegrationConnectionStatus, error) {
|
||||
) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
|
||||
// Calculate connection status for signals in parallel
|
||||
|
||||
result := &integrations.IntegrationConnectionStatus{}
|
||||
// TODO: migrate to errors package
|
||||
errors := []*model.ApiError{}
|
||||
var resultLock sync.Mutex
|
||||
|
||||
@@ -3493,14 +3476,12 @@ func (aH *APIHandler) UninstallIntegration(w http.ResponseWriter, r *http.Reques
|
||||
aH.Respond(w, map[string]interface{}{})
|
||||
}
|
||||
|
||||
// RegisterCloudIntegrationsRoutes register routes for cloud provider integrations
|
||||
// cloud provider integrations
|
||||
func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
subRouter := router.PathPrefix("/api/v1/cloud-integrations").Subrouter()
|
||||
|
||||
subRouter.Use(middleware.NewRecovery(aH.Signoz.Instrumentation.Logger()).Wrap)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionArtifact),
|
||||
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionUrl),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
@@ -3534,199 +3515,170 @@ func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *mi
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/services/{serviceId}/config", am.EditAccess(aH.CloudIntegrationsUpdateServiceConfig),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGenerateConnectionArtifact(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
|
||||
r.Context(), claims.OrgID, cloudProvider,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
reqBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GenerateConnectionArtifact(r.Context(), &integrationtypes.PostableConnectionArtifact{
|
||||
OrgID: claims.OrgID,
|
||||
Data: reqBody,
|
||||
})
|
||||
if err != nil {
|
||||
aH.Signoz.Instrumentation.Logger().ErrorContext(r.Context(),
|
||||
"failed to generate connection artifact for cloud integration",
|
||||
slog.String("cloudProvider", cloudProviderString),
|
||||
slog.String("orgID", claims.OrgID),
|
||||
)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
req := cloudintegrations.GenerateConnectionUrlRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListConnectedAccounts(r.Context(), claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
|
||||
r.Context(), claims.OrgID, cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetAccountStatus(r.Context(), claims.OrgID, accountId)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
req := cloudintegrations.AgentCheckInRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.CloudIntegrationsController.CheckInAsAgent(
|
||||
r.Context(), claims.OrgID, cloudProvider, req,
|
||||
)
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := new(integrationtypes.PostableAgentCheckInPayload)
|
||||
if err = json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||
render.Error(w, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid request body"))
|
||||
return
|
||||
}
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
req.OrgID = claims.OrgID
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].AgentCheckIn(r.Context(), req)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
reqBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
|
||||
req := cloudintegrations.UpdateAccountConfigRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateAccountConfig(r.Context(), &integrationtypes.PatchableAccountConfig{
|
||||
OrgID: claims.OrgID,
|
||||
AccountId: accountId,
|
||||
Data: reqBody,
|
||||
})
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
return
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.cloudIntegrationsRegistry[cloudProvider].DisconnectAccount(r.Context(), claims.OrgID, accountId)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, result)
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsListServices(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
func (aH *APIHandler) CloudIntegrationsListServices(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
var cloudAccountId *string
|
||||
|
||||
@@ -3735,22 +3687,26 @@ func (aH *APIHandler) CloudIntegrationsListServices(w http.ResponseWriter, r *ht
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListServices(r.Context(), claims.OrgID, cloudAccountId)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListServices(
|
||||
r.Context(), claims.OrgID, cloudProvider, cloudAccountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(w http.ResponseWriter, r *http.Request) {
|
||||
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
@@ -3762,14 +3718,7 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
serviceId := mux.Vars(r)["serviceId"]
|
||||
|
||||
var cloudAccountId *string
|
||||
@@ -3779,59 +3728,270 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(w http.ResponseWriter,
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetServiceDetails(r.Context(), &integrationtypes.GetServiceDetailsReq{
|
||||
OrgID: orgID,
|
||||
ServiceId: serviceId,
|
||||
CloudAccountID: cloudAccountId,
|
||||
})
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
serviceId := mux.Vars(r)["serviceId"]
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
reqBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
render.Error(w, errors.WrapInternalf(err,
|
||||
errors.CodeInternal,
|
||||
"failed to read update service config request body",
|
||||
))
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateServiceConfig(
|
||||
r.Context(), &integrationtypes.PatchableServiceConfig{
|
||||
OrgID: claims.OrgID,
|
||||
ServiceId: serviceId,
|
||||
Config: reqBody,
|
||||
},
|
||||
resp, err := aH.CloudIntegrationsController.GetServiceDetails(
|
||||
r.Context(), claims.OrgID, cloudProvider, serviceId, cloudAccountId,
|
||||
)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, result)
|
||||
// Add connection status for the 2 signals.
|
||||
if cloudAccountId != nil {
|
||||
connStatus, apiErr := aH.calculateCloudIntegrationServiceConnectionStatus(
|
||||
r.Context(), orgID, cloudProvider, *cloudAccountId, resp,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
resp.ConnectionStatus = connStatus
|
||||
}
|
||||
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
svcDetails *cloudintegrations.ServiceDetails,
|
||||
) (*cloudintegrations.ServiceConnectionStatus, *model.ApiError) {
|
||||
if cloudProvider != "aws" {
|
||||
// TODO(Raj): Make connection check generic for all providers in a follow up change
|
||||
return nil, model.BadRequest(
|
||||
fmt.Errorf("unsupported cloud provider: %s", cloudProvider),
|
||||
)
|
||||
}
|
||||
|
||||
telemetryCollectionStrategy := svcDetails.Strategy
|
||||
if telemetryCollectionStrategy == nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"service doesn't have telemetry collection strategy: %s", svcDetails.Id,
|
||||
))
|
||||
}
|
||||
|
||||
result := &cloudintegrations.ServiceConnectionStatus{}
|
||||
errors := []*model.ApiError{}
|
||||
var resultLock sync.Mutex
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Calculate metrics connection status
|
||||
if telemetryCollectionStrategy.AWSMetrics != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
metricsConnStatus, apiErr := aH.calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
ctx, cloudAccountId, telemetryCollectionStrategy.AWSMetrics, svcDetails.DataCollected.Metrics,
|
||||
)
|
||||
|
||||
resultLock.Lock()
|
||||
defer resultLock.Unlock()
|
||||
|
||||
if apiErr != nil {
|
||||
errors = append(errors, apiErr)
|
||||
} else {
|
||||
result.Metrics = metricsConnStatus
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Calculate logs connection status
|
||||
if telemetryCollectionStrategy.AWSLogs != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
logsConnStatus, apiErr := aH.calculateAWSIntegrationSvcLogsConnectionStatus(
|
||||
ctx, orgID, cloudAccountId, telemetryCollectionStrategy.AWSLogs,
|
||||
)
|
||||
|
||||
resultLock.Lock()
|
||||
defer resultLock.Unlock()
|
||||
|
||||
if apiErr != nil {
|
||||
errors = append(errors, apiErr)
|
||||
} else {
|
||||
result.Logs = logsConnStatus
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(errors) > 0 {
|
||||
return nil, errors[0]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
}
|
||||
func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountId string,
|
||||
strategy *services.AWSMetricsStrategy,
|
||||
metricsCollectedBySvc []services.CollectedMetric,
|
||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if strategy == nil || len(strategy.StreamFilters) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
expectedLabelValues := map[string]string{
|
||||
"cloud_provider": "aws",
|
||||
"cloud_account_id": cloudAccountId,
|
||||
}
|
||||
|
||||
metricsNamespace := strategy.StreamFilters[0].Namespace
|
||||
metricsNamespaceParts := strings.Split(metricsNamespace, "/")
|
||||
|
||||
if len(metricsNamespaceParts) >= 2 {
|
||||
expectedLabelValues["service_namespace"] = metricsNamespaceParts[0]
|
||||
expectedLabelValues["service_name"] = metricsNamespaceParts[1]
|
||||
} else {
|
||||
// metrics for single word namespaces like "CWAgent" do not
|
||||
// have the service_namespace label populated
|
||||
expectedLabelValues["service_name"] = metricsNamespaceParts[0]
|
||||
}
|
||||
|
||||
metricNamesCollectedBySvc := []string{}
|
||||
for _, cm := range metricsCollectedBySvc {
|
||||
metricNamesCollectedBySvc = append(metricNamesCollectedBySvc, cm.Name)
|
||||
}
|
||||
|
||||
statusForLastReceivedMetric, apiErr := aH.reader.GetLatestReceivedMetric(
|
||||
ctx, metricNamesCollectedBySvc, expectedLabelValues,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
if statusForLastReceivedMetric != nil {
|
||||
return &cloudintegrations.SignalConnectionStatus{
|
||||
LastReceivedTsMillis: statusForLastReceivedMetric.LastReceivedTsMillis,
|
||||
LastReceivedFrom: "signoz-aws-integration",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
cloudAccountId string,
|
||||
strategy *services.AWSLogsStrategy,
|
||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if strategy == nil || len(strategy.Subscriptions) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logGroupNamePrefix := strategy.Subscriptions[0].LogGroupNamePrefix
|
||||
if len(logGroupNamePrefix) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logsConnTestFilter := &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "cloud.account.id",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: "=",
|
||||
Value: cloudAccountId,
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "aws.cloudwatch.log_group_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: "like",
|
||||
Value: logGroupNamePrefix + "%",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(Raj): Receive this as a param from UI in the future.
|
||||
lookbackSeconds := int64(30 * 60)
|
||||
|
||||
qrParams := &v3.QueryRangeParamsV3{
|
||||
Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
|
||||
End: time.Now().UnixMilli(),
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeList,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
PageSize: 1,
|
||||
Filters: logsConnTestFilter,
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceLogs,
|
||||
Expression: "A",
|
||||
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
queryRes, _, err := aH.querier.QueryRange(
|
||||
ctx, orgID, qrParams,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query for integration connection status: %w", err,
|
||||
))
|
||||
}
|
||||
if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
|
||||
lastLog := queryRes[0].List[0]
|
||||
|
||||
return &cloudintegrations.SignalConnectionStatus{
|
||||
LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
|
||||
LastReceivedFrom: "signoz-aws-integration",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
serviceId := mux.Vars(r)["serviceId"]
|
||||
|
||||
req := cloudintegrations.UpdateServiceConfigRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.CloudIntegrationsController.UpdateServiceConfig(
|
||||
r.Context(), claims.OrgID, cloudProvider, serviceId, &req,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
// logs
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -108,7 +107,7 @@ type IntegrationsListItem struct {
|
||||
|
||||
type Integration struct {
|
||||
IntegrationDetails
|
||||
Installation *integrationtypes.InstalledIntegration `json:"installation"`
|
||||
Installation *types.InstalledIntegration `json:"installation"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
@@ -224,7 +223,7 @@ func (m *Manager) InstallIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
config integrationtypes.InstalledIntegrationConfig,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*IntegrationsListItem, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
|
||||
if apiErr != nil {
|
||||
@@ -430,7 +429,7 @@ func (m *Manager) getInstalledIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
iis, apiErr := m.installedIntegrationsRepo.get(
|
||||
ctx, orgId, []string{integrationId},
|
||||
)
|
||||
@@ -458,7 +457,7 @@ func (m *Manager) getInstalledIntegrations(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
installedTypes := utils.MapSlice(installations, func(i integrationtypes.InstalledIntegration) string {
|
||||
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
|
||||
return i.Type
|
||||
})
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)
|
||||
|
||||
@@ -4,22 +4,22 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
type InstalledIntegrationsRepo interface {
|
||||
list(ctx context.Context, orgId string) ([]integrationtypes.InstalledIntegration, *model.ApiError)
|
||||
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
get(
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError)
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config integrationtypes.InstalledIntegrationConfig,
|
||||
) (*integrationtypes.InstalledIntegration, *model.ApiError)
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -27,8 +26,8 @@ func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
|
||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) ([]integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []integrationtypes.InstalledIntegration{}
|
||||
) ([]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&integrations).
|
||||
@@ -45,8 +44,8 @@ func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []integrationtypes.InstalledIntegration{}
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
|
||||
typeValues := []interface{}{}
|
||||
for _, integrationType := range integrationTypes {
|
||||
@@ -63,7 +62,7 @@ func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]integrationtypes.InstalledIntegration{}
|
||||
result := map[string]types.InstalledIntegration{}
|
||||
for _, ii := range integrations {
|
||||
result[ii.Type] = ii
|
||||
}
|
||||
@@ -75,10 +74,10 @@ func (r *InstalledIntegrationsSqliteRepo) upsert(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config integrationtypes.InstalledIntegrationConfig,
|
||||
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
|
||||
integration := integrationtypes.InstalledIntegration{
|
||||
integration := types.InstalledIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
@@ -115,7 +114,7 @@ func (r *InstalledIntegrationsSqliteRepo) delete(
|
||||
ctx context.Context, orgId string, integrationType string,
|
||||
) *model.ApiError {
|
||||
_, dbErr := r.store.BunDB().NewDelete().
|
||||
Model(&integrationtypes.InstalledIntegration{}).
|
||||
Model(&types.InstalledIntegration{}).
|
||||
Where("type = ?", integrationType).
|
||||
Where("org_id = ?", orgId).
|
||||
Exec(ctx)
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||
@@ -69,6 +70,11 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
|
||||
Provider: "memory",
|
||||
Memory: cache.Memory{
|
||||
@@ -120,13 +126,13 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
Reader: reader,
|
||||
RuleManager: rm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
FluxInterval: config.Querier.FluxInterval,
|
||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||
LicensingAPI: nooplicensing.NewLicenseAPI(),
|
||||
Signoz: signoz,
|
||||
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
||||
Logger: signoz.Instrumentation.Logger(),
|
||||
}, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
func RecoverPanic(callback func(err interface{}, stack []byte)) {
|
||||
if r := recover(); r != nil {
|
||||
if callback != nil {
|
||||
callback(r, debug.Stack())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,8 +11,11 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager/signozalertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/emailing/emailingtest"
|
||||
"github.com/SigNoz/signoz/pkg/factory/factorytest"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/sharder"
|
||||
@@ -41,7 +44,13 @@ func TestNewHandlers(t *testing.T) {
|
||||
queryParser := queryparser.New(providerSettings)
|
||||
require.NoError(t, err)
|
||||
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), providerSettings, nil, orgGetter, queryParser)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule)
|
||||
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
require.NoError(t, err)
|
||||
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
|
||||
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter)
|
||||
|
||||
querierHandler := querier.NewHandler(providerSettings, nil, nil)
|
||||
handlers := NewHandlers(modules, providerSettings, nil, querierHandler, nil, nil, nil, nil, nil, nil, nil)
|
||||
|
||||
@@ -85,11 +85,11 @@ func NewModules(
|
||||
queryParser queryparser.QueryParser,
|
||||
config Config,
|
||||
dashboard dashboard.Module,
|
||||
userGetter user.Getter,
|
||||
) Modules {
|
||||
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
|
||||
orgSetter := implorganization.NewSetter(implorganization.NewStore(sqlstore), alertmanager, quickfilter)
|
||||
user := impluser.NewModule(impluser.NewStore(sqlstore, providerSettings), tokenizer, emailing, providerSettings, orgSetter, authz, analytics, config.User)
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings))
|
||||
ruleStore := sqlrulestore.NewRuleStore(sqlstore, queryParser, providerSettings)
|
||||
|
||||
return Modules{
|
||||
|
||||
@@ -11,8 +11,11 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager/signozalertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/emailing/emailingtest"
|
||||
"github.com/SigNoz/signoz/pkg/factory/factorytest"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/sharder"
|
||||
"github.com/SigNoz/signoz/pkg/sharder/noopsharder"
|
||||
@@ -40,7 +43,13 @@ func TestNewModules(t *testing.T) {
|
||||
queryParser := queryparser.New(providerSettings)
|
||||
require.NoError(t, err)
|
||||
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), providerSettings, nil, orgGetter, queryParser)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule)
|
||||
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
require.NoError(t, err)
|
||||
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
|
||||
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter)
|
||||
|
||||
reflectVal := reflect.ValueOf(modules)
|
||||
for i := 0; i < reflectVal.NumField(); i++ {
|
||||
|
||||
@@ -169,6 +169,7 @@ func NewSQLMigrationProviderFactories(
|
||||
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
|
||||
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package signoz
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager/nfmanagertest"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
@@ -75,7 +77,12 @@ func TestNewProviderFactories(t *testing.T) {
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual), instrumentationtest.New().ToProviderSettings()))
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual), instrumentationtest.New().ToProviderSettings()), flagger)
|
||||
orgGetter := implorganization.NewGetter(implorganization.NewStore(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual)), nil)
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherEqual)
|
||||
NewStatsReporterProviderFactories(telemetryStore, []statsreporter.StatsCollector{}, orgGetter, userGetter, tokenizertest.NewMockTokenizer(t), version.Build{}, analytics.Config{Enabled: true})
|
||||
|
||||
@@ -280,7 +280,7 @@ func New(
|
||||
}
|
||||
|
||||
// Initialize user getter
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings))
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
|
||||
|
||||
licensingProviderFactory := licenseProviderFactory(sqlstore, zeus, orgGetter, analytics)
|
||||
licensing, err := licensingProviderFactory.New(
|
||||
@@ -388,7 +388,7 @@ func New(
|
||||
}
|
||||
|
||||
// Initialize all modules
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter)
|
||||
|
||||
userService := impluser.NewService(providerSettings, impluser.NewStore(sqlstore, providerSettings), modules.User, orgGetter, authz, config.User.Root)
|
||||
|
||||
|
||||
209
pkg/sqlmigration/066_migrate_rules_v4_to_v5_post_deprecation.go
Normal file
209
pkg/sqlmigration/066_migrate_rules_v4_to_v5_post_deprecation.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type migrateRulesV4ToV5 struct {
|
||||
store sqlstore.SQLStore
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func NewMigrateRulesV4ToV5Factory(
|
||||
store sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(
|
||||
factory.MustNewName("migrate_rules_post_deprecation"),
|
||||
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &migrateRulesV4ToV5{
|
||||
store: store,
|
||||
telemetryStore: telemetryStore,
|
||||
logger: ps.Logger,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT name
|
||||
FROM (
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
|
||||
INTERSECT
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
|
||||
)
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT tagKey
|
||||
FROM signoz_traces.distributed_span_attributes_keys
|
||||
WHERE tagType IN ('tag', 'resource')
|
||||
GROUP BY tagKey
|
||||
HAVING COUNT(DISTINCT tagType) > 1
|
||||
ORDER BY tagKey
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
|
||||
logsKeys, err := migration.getLogDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
var rules []struct {
|
||||
ID string `bun:"id"`
|
||||
Data map[string]any `bun:"data"`
|
||||
}
|
||||
|
||||
err = tx.NewSelect().
|
||||
Table("rule").
|
||||
Column("id", "data").
|
||||
Scan(ctx, &rules)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||
|
||||
count := 0
|
||||
|
||||
for _, rule := range rules {
|
||||
version, _ := rule.Data["version"].(string)
|
||||
|
||||
if version == "v5" {
|
||||
continue
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
|
||||
}
|
||||
|
||||
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
|
||||
|
||||
// Check if the queries envelope already exists and is non-empty
|
||||
hasQueriesEnvelope := false
|
||||
if condition, ok := rule.Data["condition"].(map[string]any); ok {
|
||||
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
|
||||
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
|
||||
hasQueriesEnvelope = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hasQueriesEnvelope {
|
||||
// already has queries envelope, just bump version
|
||||
// this is because user made a mistake of choosing version
|
||||
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
|
||||
rule.Data["version"] = "v5"
|
||||
} else {
|
||||
// old format, run full migration
|
||||
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
|
||||
updated := alertsMigrator.Migrate(ctx, rule.Data)
|
||||
if !updated {
|
||||
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
|
||||
continue
|
||||
}
|
||||
rule.Data["version"] = "v5"
|
||||
}
|
||||
|
||||
dataJSON, err := json.Marshal(rule.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.NewUpdate().
|
||||
Table("rule").
|
||||
Set("data = ?", string(dataJSON)).
|
||||
Where("id = ?", rule.ID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 0 {
|
||||
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package authtypes
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
|
||||
@@ -10,8 +11,10 @@ import (
|
||||
var (
|
||||
nameRegex = regexp.MustCompile("^[a-z-]{1,50}$")
|
||||
|
||||
_ json.Marshaler = new(Name)
|
||||
_ json.Unmarshaler = new(Name)
|
||||
_ json.Marshaler = new(Name)
|
||||
_ json.Unmarshaler = new(Name)
|
||||
_ encoding.TextMarshaler = new(Name)
|
||||
_ encoding.TextUnmarshaler = new(Name)
|
||||
)
|
||||
|
||||
type Name struct {
|
||||
@@ -58,3 +61,16 @@ func (name *Name) UnmarshalJSON(data []byte) error {
|
||||
*name = shadow
|
||||
return nil
|
||||
}
|
||||
|
||||
func (name Name) MarshalText() ([]byte, error) {
|
||||
return []byte(name.val), nil
|
||||
}
|
||||
|
||||
func (name *Name) UnmarshalText(text []byte) error {
|
||||
shadow, err := NewName(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*name = shadow
|
||||
return nil
|
||||
}
|
||||
|
||||
177
pkg/types/authtypes/object.go
Normal file
177
pkg/types/authtypes/object.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package authtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
type Resource struct {
|
||||
Name Name `json:"name" required:"true"`
|
||||
Type Type `json:"type" required:"true"`
|
||||
}
|
||||
|
||||
type GettableResources struct {
|
||||
Resources []*Resource `json:"resources" required:"true" nullable:"false"`
|
||||
Relations map[Relation][]Type `json:"relations" required:"true"`
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
Resource Resource `json:"resource" required:"true"`
|
||||
Selector Selector `json:"selector" required:"true"`
|
||||
}
|
||||
|
||||
type GettableObjects struct {
|
||||
Resource Resource `json:"resource" required:"true"`
|
||||
Selectors []Selector `json:"selectors" required:"true" nullable:"false"`
|
||||
}
|
||||
|
||||
type PatchableObjects struct {
|
||||
Additions []*GettableObjects `json:"additions" required:"true" nullable:"true"`
|
||||
Deletions []*GettableObjects `json:"deletions" required:"true" nullable:"true"`
|
||||
}
|
||||
|
||||
func NewObject(resource Resource, selector Selector) (*Object, error) {
|
||||
err := IsValidSelector(resource.Type, selector.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{Resource: resource, Selector: selector}, nil
|
||||
}
|
||||
|
||||
func NewObjectsFromGettableObjects(patchableObjects []*GettableObjects) ([]*Object, error) {
|
||||
objects := make([]*Object, 0)
|
||||
|
||||
for _, patchObject := range patchableObjects {
|
||||
for _, selector := range patchObject.Selectors {
|
||||
object, err := NewObject(patchObject.Resource, selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objects = append(objects, object)
|
||||
}
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func NewPatchableObjects(additions []*GettableObjects, deletions []*GettableObjects, relation Relation) ([]*Object, []*Object, error) {
|
||||
if len(additions) == 0 && len(deletions) == 0 {
|
||||
return nil, nil, errors.New(errors.TypeInvalidInput, ErrCodeInvalidPatchObject, "empty object patch request received, at least one of additions or deletions must be present")
|
||||
}
|
||||
|
||||
for _, object := range additions {
|
||||
if !slices.Contains(TypeableRelations[object.Resource.Type], relation) {
|
||||
return nil, nil, errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidRelation, "relation %s is invalid for type %s", relation.StringValue(), object.Resource.Type.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
for _, object := range deletions {
|
||||
if !slices.Contains(TypeableRelations[object.Resource.Type], relation) {
|
||||
return nil, nil, errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidRelation, "relation %s is invalid for type %s", relation.StringValue(), object.Resource.Type.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
additionObjects, err := NewObjectsFromGettableObjects(additions)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
deletionsObjects, err := NewObjectsFromGettableObjects(deletions)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return additionObjects, deletionsObjects, nil
|
||||
}
|
||||
|
||||
func NewGettableResources(resources []*Resource) *GettableResources {
|
||||
return &GettableResources{
|
||||
Resources: resources,
|
||||
Relations: RelationsTypeable,
|
||||
}
|
||||
}
|
||||
|
||||
func NewGettableObjects(objects []*Object) []*GettableObjects {
|
||||
grouped := make(map[Resource][]Selector)
|
||||
for _, obj := range objects {
|
||||
key := obj.Resource
|
||||
if _, ok := grouped[key]; !ok {
|
||||
grouped[key] = make([]Selector, 0)
|
||||
}
|
||||
|
||||
grouped[key] = append(grouped[key], obj.Selector)
|
||||
}
|
||||
|
||||
gettableObjects := make([]*GettableObjects, 0, len(grouped))
|
||||
for resource, selectors := range grouped {
|
||||
gettableObjects = append(gettableObjects, &GettableObjects{
|
||||
Resource: resource,
|
||||
Selectors: selectors,
|
||||
})
|
||||
}
|
||||
|
||||
return gettableObjects
|
||||
}
|
||||
|
||||
func MustNewObject(resource Resource, selector Selector) *Object {
|
||||
object, err := NewObject(resource, selector)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return object
|
||||
}
|
||||
|
||||
func MustNewObjectFromString(input string) *Object {
|
||||
parts := strings.Split(input, "/")
|
||||
if len(parts) != 4 {
|
||||
panic(errors.Newf(errors.TypeInternal, errors.CodeInternal, "invalid input format: %s", input))
|
||||
}
|
||||
|
||||
typeParts := strings.Split(parts[0], ":")
|
||||
if len(typeParts) != 2 {
|
||||
panic(errors.Newf(errors.TypeInternal, errors.CodeInternal, "invalid type format: %s", parts[0]))
|
||||
}
|
||||
|
||||
resource := Resource{
|
||||
Type: MustNewType(typeParts[0]),
|
||||
Name: MustNewName(parts[2]),
|
||||
}
|
||||
|
||||
selector := MustNewSelector(resource.Type, parts[3])
|
||||
|
||||
return &Object{Resource: resource, Selector: selector}
|
||||
}
|
||||
|
||||
func MustNewObjectsFromStringSlice(input []string) []*Object {
|
||||
objects := make([]*Object, 0, len(input))
|
||||
for _, str := range input {
|
||||
objects = append(objects, MustNewObjectFromString(str))
|
||||
}
|
||||
return objects
|
||||
}
|
||||
|
||||
func (object *Object) UnmarshalJSON(data []byte) error {
|
||||
var shadow = struct {
|
||||
Resource Resource
|
||||
Selector Selector
|
||||
}{}
|
||||
|
||||
err := json.Unmarshal(data, &shadow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj, err := NewObject(shadow.Resource, shadow.Selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*object = *obj
|
||||
return nil
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
var (
|
||||
ErrCodeAuthZInvalidRelation = errors.MustNewCode("authz_invalid_relation")
|
||||
ErrCodeInvalidPatchObject = errors.MustNewCode("authz_invalid_patch_objects")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -26,6 +27,14 @@ var TypeableRelations = map[Type][]Relation{
|
||||
TypeMetaResources: {RelationCreate, RelationList},
|
||||
}
|
||||
|
||||
var RelationsTypeable = map[Relation][]Type{
|
||||
RelationCreate: {TypeMetaResources},
|
||||
RelationRead: {TypeUser, TypeRole, TypeOrganization, TypeMetaResource},
|
||||
RelationList: {TypeMetaResources},
|
||||
RelationUpdate: {TypeUser, TypeRole, TypeOrganization, TypeMetaResource},
|
||||
RelationDelete: {TypeUser, TypeRole, TypeOrganization, TypeMetaResource},
|
||||
}
|
||||
|
||||
type Relation struct{ valuer.String }
|
||||
|
||||
func NewRelation(relation string) (Relation, error) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package authtypes
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"regexp"
|
||||
@@ -15,8 +16,10 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
_ json.Marshaler = new(Selector)
|
||||
_ json.Unmarshaler = new(Selector)
|
||||
_ json.Marshaler = new(Selector)
|
||||
_ json.Unmarshaler = new(Selector)
|
||||
_ encoding.TextMarshaler = new(Selector)
|
||||
_ encoding.TextUnmarshaler = new(Selector)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -79,6 +82,15 @@ func (typed *Selector) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (selector Selector) MarshalText() ([]byte, error) {
|
||||
return []byte(selector.val), nil
|
||||
}
|
||||
|
||||
func (selector *Selector) UnmarshalText(text []byte) error {
|
||||
*selector = Selector{val: string(text)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsValidSelector(typed Type, selector string) error {
|
||||
switch typed {
|
||||
case TypeUser:
|
||||
|
||||
@@ -3,24 +3,13 @@ package authtypes
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Resource struct {
|
||||
Name Name `json:"name" required:"true"`
|
||||
Type Type `json:"type" required:"true"`
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
Resource Resource `json:"resource" required:"true"`
|
||||
Selector Selector `json:"selector" required:"true"`
|
||||
}
|
||||
|
||||
type Transaction struct {
|
||||
ID valuer.UUID `json:"id"`
|
||||
ID valuer.UUID `json:"-"`
|
||||
Relation Relation `json:"relation" required:"true"`
|
||||
Object Object `json:"object" required:"true"`
|
||||
}
|
||||
@@ -31,53 +20,6 @@ type GettableTransaction struct {
|
||||
Authorized bool `json:"authorized" required:"true"`
|
||||
}
|
||||
|
||||
func NewObject(resource Resource, selector Selector) (*Object, error) {
|
||||
err := IsValidSelector(resource.Type, selector.val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{Resource: resource, Selector: selector}, nil
|
||||
}
|
||||
|
||||
func MustNewObject(resource Resource, selector Selector) *Object {
|
||||
object, err := NewObject(resource, selector)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return object
|
||||
}
|
||||
|
||||
func MustNewObjectFromString(input string) *Object {
|
||||
parts := strings.Split(input, "/")
|
||||
if len(parts) != 4 {
|
||||
panic(errors.Newf(errors.TypeInternal, errors.CodeInternal, "invalid input format: %s", input))
|
||||
}
|
||||
|
||||
typeParts := strings.Split(parts[0], ":")
|
||||
if len(typeParts) != 2 {
|
||||
panic(errors.Newf(errors.TypeInternal, errors.CodeInternal, "invalid type format: %s", parts[0]))
|
||||
}
|
||||
|
||||
resource := Resource{
|
||||
Type: MustNewType(typeParts[0]),
|
||||
Name: MustNewName(parts[2]),
|
||||
}
|
||||
|
||||
selector := MustNewSelector(resource.Type, parts[3])
|
||||
|
||||
return &Object{Resource: resource, Selector: selector}
|
||||
}
|
||||
|
||||
func MustNewObjectsFromStringSlice(input []string) []*Object {
|
||||
objects := make([]*Object, 0, len(input))
|
||||
for _, str := range input {
|
||||
objects = append(objects, MustNewObjectFromString(str))
|
||||
}
|
||||
return objects
|
||||
}
|
||||
|
||||
func NewTransaction(relation Relation, object Object) (*Transaction, error) {
|
||||
if !slices.Contains(TypeableRelations[object.Resource.Type], relation) {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidRelation, "invalid relation %s for type %s", relation.StringValue(), object.Resource.Type.StringValue())
|
||||
@@ -100,26 +42,6 @@ func NewGettableTransaction(transactions []*Transaction, results map[string]*Tup
|
||||
return gettableTransactions
|
||||
}
|
||||
|
||||
func (object *Object) UnmarshalJSON(data []byte) error {
|
||||
var shadow = struct {
|
||||
Resource Resource
|
||||
Selector Selector
|
||||
}{}
|
||||
|
||||
err := json.Unmarshal(data, &shadow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj, err := NewObject(shadow.Resource, shadow.Selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*object = *obj
|
||||
return nil
|
||||
}
|
||||
|
||||
func (transaction *Transaction) UnmarshalJSON(data []byte) error {
|
||||
var shadow = struct {
|
||||
Relation Relation
|
||||
|
||||
@@ -33,8 +33,8 @@ type LimitConfig struct {
|
||||
}
|
||||
|
||||
type LimitValue struct {
|
||||
Size int64 `json:"size"`
|
||||
Count int64 `json:"count"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
Count *int64 `json:"count,omitempty"`
|
||||
}
|
||||
|
||||
type LimitMetric struct {
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
)
|
||||
|
||||
type Identifiable struct {
|
||||
ID valuer.UUID `json:"id" bun:"id,pk,type:text"`
|
||||
ID valuer.UUID `json:"id" bun:"id,pk,type:text" required:"true"`
|
||||
}
|
||||
|
||||
247
pkg/types/integration.go
Normal file
247
pkg/types/integration.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type IntegrationUserEmail string
|
||||
|
||||
const (
|
||||
AWSIntegrationUserEmail IntegrationUserEmail = "aws-integration@signoz.io"
|
||||
)
|
||||
|
||||
var AllIntegrationUserEmails = []IntegrationUserEmail{
|
||||
AWSIntegrationUserEmail,
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Normal integration uses just the installed_integration table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type InstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
|
||||
// For serializing from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
filterSetJson, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not serialize integration config to JSON")
|
||||
}
|
||||
return filterSetJson, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Cloud integration uses the cloud_integration table
|
||||
// and cloud_integrations_service table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type CloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
||||
Config *AccountConfig `json:"config" bun:"config,type:text"`
|
||||
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
|
||||
OrgID string `bun:"org_id,type:text,unique:provider_id"`
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Account() Account {
|
||||
ca := Account{Id: a.ID.StringValue(), Status: a.Status()}
|
||||
|
||||
if a.AccountID != nil {
|
||||
ca.CloudAccountId = *a.AccountID
|
||||
}
|
||||
|
||||
if a.Config != nil {
|
||||
ca.Config = *a.Config
|
||||
} else {
|
||||
ca.Config = DefaultAccountConfig()
|
||||
}
|
||||
return ca
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config AccountConfig `json:"config"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func DefaultAccountConfig() AccountConfig {
|
||||
return AccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
type AccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *AccountConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "cloud account config is nil")
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't serialize cloud account config to JSON")
|
||||
}
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
||||
return string(serialized), nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, r)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(
|
||||
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
|
||||
)
|
||||
}
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
||||
return string(serialized), nil
|
||||
}
|
||||
|
||||
type CloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config CloudServiceConfig `bun:"config,type:text"`
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
data = src
|
||||
case string:
|
||||
data = []byte(src)
|
||||
default:
|
||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "cloud service config is nil")
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(
|
||||
err, errors.CodeInternal, "couldn't serialize cloud service config to JSON",
|
||||
)
|
||||
}
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
||||
return string(serialized), nil
|
||||
}
|
||||
@@ -1,396 +0,0 @@
|
||||
package integrationtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
// Generic utility functions for JSON serialization/deserialization
|
||||
|
||||
// UnmarshalJSON is a generic function to unmarshal JSON data into any type
|
||||
func UnmarshalJSON[T any](src []byte, target *T) error {
|
||||
err := json.Unmarshal(src, target)
|
||||
if err != nil {
|
||||
return errors.WrapInternalf(
|
||||
err, errors.CodeInternal, "couldn't deserialize JSON",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON is a generic function to marshal any type to JSON
|
||||
func MarshalJSON[T any](source *T) ([]byte, error) {
|
||||
if source == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "source is nil")
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(source)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(
|
||||
err, errors.CodeInternal, "couldn't serialize to JSON",
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
// CloudProvider defines the interface to be implemented by different cloud providers.
|
||||
// This is generic interface so it will be accepting and returning generic types instead of concrete.
|
||||
// It's the cloud provider's responsibility to cast them to appropriate types and validate
|
||||
type CloudProvider interface {
|
||||
GetName() CloudProviderType
|
||||
|
||||
AgentCheckIn(ctx context.Context, req *PostableAgentCheckInPayload) (any, error)
|
||||
GenerateConnectionArtifact(ctx context.Context, req *PostableConnectionArtifact) (any, error)
|
||||
GetAccountStatus(ctx context.Context, orgID, accountID string) (*GettableAccountStatus, error)
|
||||
|
||||
ListServices(ctx context.Context, orgID string, accountID *string) (any, error) // returns either GettableAWSServices
|
||||
GetServiceDetails(ctx context.Context, req *GetServiceDetailsReq) (any, error)
|
||||
ListConnectedAccounts(ctx context.Context, orgID string) (*GettableConnectedAccountsList, error)
|
||||
GetDashboard(ctx context.Context, req *GettableDashboard) (*dashboardtypes.Dashboard, error)
|
||||
GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
|
||||
|
||||
UpdateAccountConfig(ctx context.Context, req *PatchableAccountConfig) (any, error) // req can be either PatchableAWSAccountConfig
|
||||
UpdateServiceConfig(ctx context.Context, req *PatchableServiceConfig) (any, error)
|
||||
|
||||
DisconnectAccount(ctx context.Context, orgID, accountID string) (*CloudIntegration, error)
|
||||
}
|
||||
|
||||
type GettableDashboard struct {
|
||||
ID string
|
||||
OrgID valuer.UUID
|
||||
}
|
||||
|
||||
type GettableCloudIntegrationConnectionParams struct {
|
||||
IngestionUrl string `json:"ingestion_url,omitempty"`
|
||||
IngestionKey string `json:"ingestion_key,omitempty"`
|
||||
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
|
||||
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
|
||||
}
|
||||
|
||||
type GettableIngestionKey struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
// other attributes from gateway response not included here since they are not being used.
|
||||
}
|
||||
|
||||
type GettableIngestionKeysSearch struct {
|
||||
Status string `json:"status"`
|
||||
Data []GettableIngestionKey `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type GettableCreateIngestionKey struct {
|
||||
Status string `json:"status"`
|
||||
Data GettableIngestionKey `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type GettableDeployment struct {
|
||||
Name string `json:"name"`
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
}
|
||||
|
||||
type GettableConnectedAccountsList struct {
|
||||
Accounts []*Account `json:"accounts"`
|
||||
}
|
||||
|
||||
// SigNozAWSAgentConfig represents requirements for agent deployment in user's AWS account
|
||||
type SigNozAWSAgentConfig struct {
|
||||
// The region in which SigNoz agent should be installed.
|
||||
Region string `json:"region"`
|
||||
|
||||
IngestionUrl string `json:"ingestion_url"`
|
||||
IngestionKey string `json:"ingestion_key"`
|
||||
SigNozAPIUrl string `json:"signoz_api_url"`
|
||||
SigNozAPIKey string `json:"signoz_api_key"`
|
||||
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type PostableConnectionArtifact struct {
|
||||
OrgID string
|
||||
Data []byte // either PostableAWSConnectionUrl
|
||||
}
|
||||
|
||||
type PostableConnectionArtifactTyped[AgentConfigT any, AccountConfigT any] struct {
|
||||
AccountId *string `json:"account_id,omitempty"` // Optional. To be specified for updates.
|
||||
AgentConfig *AgentConfigT `json:"agent_config"`
|
||||
AccountConfig *AccountConfigT `json:"account_config"`
|
||||
}
|
||||
|
||||
type PostableAWSConnectionUrl = PostableConnectionArtifactTyped[SigNozAWSAgentConfig, AWSAccountConfig]
|
||||
|
||||
// GettableConnectionArtifact represents base structure for connection artifacts
|
||||
type GettableConnectionArtifact[T any] struct {
|
||||
AccountId string `json:"account_id"`
|
||||
Artifact T `json:",inline"`
|
||||
}
|
||||
|
||||
type GettableAWSConnectionArtifact struct {
|
||||
ConnectionUrl string `json:"connection_url"`
|
||||
}
|
||||
|
||||
type GettableAWSConnectionUrl struct {
|
||||
AccountId string `json:"account_id"`
|
||||
ConnectionUrl string `json:"connection_url"`
|
||||
}
|
||||
|
||||
type GettableAccountStatus struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type PostableAgentCheckInPayload struct {
|
||||
ID string `json:"account_id"`
|
||||
AccountID string `json:"cloud_account_id"`
|
||||
// Arbitrary cloud specific Agent data
|
||||
Data map[string]any `json:"data,omitempty"`
|
||||
OrgID string `json:"-"`
|
||||
}
|
||||
|
||||
type AWSAgentIntegrationConfig struct {
|
||||
EnabledRegions []string `json:"enabled_regions"`
|
||||
TelemetryCollectionStrategy *AWSCollectionStrategy `json:"telemetry,omitempty"`
|
||||
}
|
||||
|
||||
type GettableAgentCheckIn[T any] struct {
|
||||
AccountId string `json:"account_id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
RemovedAt *time.Time `json:"removed_at"`
|
||||
IntegrationConfig T `json:"integration_config"`
|
||||
}
|
||||
|
||||
type GettableAWSAgentCheckIn = GettableAgentCheckIn[AWSAgentIntegrationConfig]
|
||||
|
||||
type PatchableServiceConfig struct {
|
||||
OrgID string `json:"org_id"`
|
||||
ServiceId string `json:"service_id"`
|
||||
Config []byte `json:"config"` // json serialized config
|
||||
}
|
||||
|
||||
type UpdatableCloudServiceConfig[T any] struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config *T `json:"config"`
|
||||
}
|
||||
|
||||
type UpdatableAWSCloudServiceConfig = UpdatableCloudServiceConfig[AWSCloudServiceConfig]
|
||||
|
||||
type AWSCloudServiceConfig struct {
|
||||
Logs *AWSCloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *AWSCloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
func (a *AWSCloudServiceConfig) Validate(def *AWSDefinition) error {
|
||||
if def.Id != S3Sync && a.Logs != nil && a.Logs.S3Buckets != nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", S3Sync)
|
||||
} else if def.Id == S3Sync && a.Logs != nil && a.Logs.S3Buckets != nil {
|
||||
for region := range a.Logs.S3Buckets {
|
||||
if _, found := ValidAWSRegions[region]; !found {
|
||||
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type PatchServiceConfigResponse struct {
|
||||
ServiceId string `json:"id"`
|
||||
Config any `json:"config"`
|
||||
}
|
||||
|
||||
type PatchableAccountConfig struct {
|
||||
OrgID string
|
||||
AccountId string
|
||||
Data []byte // can be either AWSAccountConfig
|
||||
}
|
||||
|
||||
type PatchableAccountConfigTyped[T any] struct {
|
||||
Config *T `json:"config"`
|
||||
}
|
||||
|
||||
type PatchableAWSAccountConfig = PatchableAccountConfigTyped[AWSAccountConfig]
|
||||
|
||||
type AWSAccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
type GettableServices[T any] struct {
|
||||
Services []T `json:"services"`
|
||||
}
|
||||
|
||||
type GettableAWSServices = GettableServices[AWSServiceSummary]
|
||||
|
||||
type GetServiceDetailsReq struct {
|
||||
OrgID valuer.UUID
|
||||
ServiceId string
|
||||
CloudAccountID *string
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// DATABASE TYPES
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Cloud integration uses the cloud_integration table
|
||||
// and cloud_integrations_service table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type CloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
||||
Config string `json:"config" bun:"config,type:text"` // json serialized config
|
||||
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
|
||||
OrgID string `bun:"org_id,type:text,unique:provider_id"`
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Account(cloudProvider CloudProviderType) *Account {
|
||||
ca := &Account{Id: a.ID.StringValue(), Status: a.Status()}
|
||||
|
||||
if a.AccountID != nil {
|
||||
ca.CloudAccountId = *a.AccountID
|
||||
}
|
||||
|
||||
ca.Config = map[string]interface{}{}
|
||||
|
||||
if len(a.Config) < 1 {
|
||||
return ca
|
||||
}
|
||||
|
||||
switch cloudProvider {
|
||||
case CloudProviderAWS:
|
||||
config := new(AWSAccountConfig)
|
||||
_ = UnmarshalJSON([]byte(a.Config), config)
|
||||
ca.Config = config
|
||||
default:
|
||||
}
|
||||
|
||||
return ca
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config any `json:"config"` // AWSAccountConfig
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func DefaultAWSAccountConfig() AWSAccountConfig {
|
||||
return AWSAccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
type ServiceSummary[T any] struct {
|
||||
DefinitionMetadata
|
||||
Config *T `json:"config"`
|
||||
}
|
||||
|
||||
type AWSServiceSummary = ServiceSummary[AWSCloudServiceConfig]
|
||||
|
||||
type GettableAWSServiceDetails struct {
|
||||
AWSDefinition
|
||||
Config *AWSCloudServiceConfig `json:"config"`
|
||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type ServiceConnectionStatus struct {
|
||||
Logs []*SignalConnectionStatus `json:"logs"`
|
||||
Metrics []*SignalConnectionStatus `json:"metrics"`
|
||||
}
|
||||
|
||||
type SignalConnectionStatus struct {
|
||||
CategoryID string `json:"category"`
|
||||
CategoryDisplayName string `json:"category_display_name"`
|
||||
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
|
||||
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// Scan scans data from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, r)
|
||||
}
|
||||
|
||||
// Value serializes data to bytes for db insertion
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(
|
||||
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config string `bun:"config,type:text"` // json serialized config
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type AWSCloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
type AWSCloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package integrationtypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
const (
|
||||
S3Sync = "s3sync"
|
||||
)
|
||||
|
||||
type AWSDefinition = ServiceDefinition[AWSCollectionStrategy]
|
||||
|
||||
var _ Definition = &AWSDefinition{}
|
||||
|
||||
type ServiceDefinition[T any] struct {
|
||||
DefinitionMetadata
|
||||
Overview string `json:"overview"` // markdown
|
||||
Assets Assets `json:"assets"`
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
DataCollected DataCollected `json:"data_collected"`
|
||||
IngestionStatusCheck *IngestionStatusCheck `json:"ingestion_status_check,omitempty"`
|
||||
Strategy *T `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
func (def *ServiceDefinition[T]) PopulateDashboardURLs(cloudProvider CloudProviderType, svcId string) {
|
||||
for i := range def.Assets.Dashboards {
|
||||
dashboardId := def.Assets.Dashboards[i].Id
|
||||
url := "/dashboard/" + GetCloudIntegrationDashboardID(cloudProvider, svcId, dashboardId)
|
||||
def.Assets.Dashboards[i].Url = url
|
||||
}
|
||||
}
|
||||
|
||||
func (def *ServiceDefinition[T]) GetId() string {
|
||||
return def.Id
|
||||
}
|
||||
|
||||
func (def *ServiceDefinition[T]) Validate() error {
|
||||
seenDashboardIds := map[string]interface{}{}
|
||||
|
||||
if def.Strategy == nil {
|
||||
return errors.NewInternalf(errors.CodeInternal, "telemetry_collection_strategy is required")
|
||||
}
|
||||
|
||||
for _, dd := range def.Assets.Dashboards {
|
||||
if _, seen := seenDashboardIds[dd.Id]; seen {
|
||||
return errors.NewInternalf(errors.CodeInternal, "multiple dashboards found with id %s", dd.Id)
|
||||
}
|
||||
seenDashboardIds[dd.Id] = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type DefinitionMetadata struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
}
|
||||
|
||||
type Definition interface {
|
||||
GetId() string
|
||||
Validate() error
|
||||
PopulateDashboardURLs(cloudProvider CloudProviderType, svcId string)
|
||||
}
|
||||
|
||||
type IngestionStatusCheck struct {
|
||||
Metrics []*IngestionStatusCheckCategory `json:"metrics"`
|
||||
Logs []*IngestionStatusCheckCategory `json:"logs"`
|
||||
}
|
||||
|
||||
type IngestionStatusCheckCategory struct {
|
||||
Category string `json:"category"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Checks []*IngestionStatusCheckAttribute `json:"checks"`
|
||||
}
|
||||
|
||||
type IngestionStatusCheckAttribute struct {
|
||||
Key string `json:"key"` // search key (metric name or log message)
|
||||
Attributes []*IngestionStatusCheckAttributeFilter `json:"attributes"`
|
||||
}
|
||||
|
||||
type IngestionStatusCheckAttributeFilter struct {
|
||||
Name string `json:"name"`
|
||||
Operator string `json:"operator"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
type Assets struct {
|
||||
Dashboards []Dashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollected struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type AWSCollectionStrategy struct {
|
||||
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
|
||||
}
|
||||
|
||||
type AWSMetricsStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
StreamFilters []struct {
|
||||
// json tags here are in the shape expected by AWS API as detailed at
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
} `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
type AWSLogsStrategy struct {
|
||||
Subscriptions []struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
} `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
Id string `json:"id"`
|
||||
Url string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Image string `json:"image"`
|
||||
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
|
||||
func GetCloudIntegrationDashboardID(cloudProvider valuer.String, svcId, dashboardId string) string {
|
||||
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
|
||||
}
|
||||
|
||||
func GetDashboardsFromAssets(
|
||||
svcId string,
|
||||
orgID valuer.UUID,
|
||||
cloudProvider CloudProviderType,
|
||||
createdAt *time.Time,
|
||||
assets Assets,
|
||||
) []*dashboardtypes.Dashboard {
|
||||
dashboards := make([]*dashboardtypes.Dashboard, 0)
|
||||
|
||||
for _, d := range assets.Dashboards {
|
||||
author := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
dashboards = append(dashboards, &dashboardtypes.Dashboard{
|
||||
ID: GetCloudIntegrationDashboardID(cloudProvider, svcId, d.Id),
|
||||
Locked: true,
|
||||
OrgID: orgID,
|
||||
Data: *d.Definition,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: *createdAt,
|
||||
UpdatedAt: *createdAt,
|
||||
},
|
||||
UserAuditable: types.UserAuditable{
|
||||
CreatedBy: author,
|
||||
UpdatedBy: author,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return dashboards
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package integrationtypes
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
// CloudProviderType type alias
|
||||
type CloudProviderType = valuer.String
|
||||
|
||||
var (
|
||||
CloudProviderAWS = valuer.NewString("aws")
|
||||
)
|
||||
|
||||
var (
|
||||
CodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
|
||||
)
|
||||
|
||||
func NewCloudProvider(provider string) (CloudProviderType, error) {
|
||||
switch provider {
|
||||
case CloudProviderAWS.String():
|
||||
return valuer.NewString(provider), nil
|
||||
default:
|
||||
return CloudProviderType{}, errors.NewInvalidInputf(CodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
|
||||
)
|
||||
|
||||
var IntegrationUserEmails = []valuer.Email{
|
||||
AWSIntegrationUserEmail,
|
||||
}
|
||||
|
||||
func IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
|
||||
parts := strings.SplitN(dashboardUuid, "--", 4)
|
||||
if len(parts) != 4 {
|
||||
return false
|
||||
}
|
||||
|
||||
return parts[0] == "cloud-integration"
|
||||
}
|
||||
|
||||
func GetCloudProviderFromDashboardID(dashboardUuid string) (CloudProviderType, error) {
|
||||
parts := strings.SplitN(dashboardUuid, "--", 4)
|
||||
if len(parts) != 4 {
|
||||
return valuer.String{}, errors.NewInvalidInputf(CodeCloudProviderInvalidInput, "invalid dashboard uuid: %s", dashboardUuid)
|
||||
}
|
||||
|
||||
providerStr := parts[1]
|
||||
|
||||
cloudProvider, err := NewCloudProvider(providerStr)
|
||||
if err != nil {
|
||||
return CloudProviderType{}, err
|
||||
}
|
||||
|
||||
return cloudProvider, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Normal integration uses just the installed_integration table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type InstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
types.Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
|
||||
// Scan scans data from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// Value serializes data to db
|
||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
filterSetJson, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not serialize integration config to JSON")
|
||||
}
|
||||
return filterSetJson, nil
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package roletypes
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -84,16 +83,6 @@ type PatchableRole struct {
|
||||
Description string `json:"description" required:"true"`
|
||||
}
|
||||
|
||||
type PatchableObjects struct {
|
||||
Additions []*authtypes.Object `json:"additions" required:"true"`
|
||||
Deletions []*authtypes.Object `json:"deletions" required:"true"`
|
||||
}
|
||||
|
||||
type GettableResources struct {
|
||||
Resources []*authtypes.Resource `json:"resources" required:"true"`
|
||||
Relations map[authtypes.Type][]authtypes.Relation `json:"relations" required:"true"`
|
||||
}
|
||||
|
||||
func NewStorableRoleFromRole(role *Role) *StorableRole {
|
||||
return &StorableRole{
|
||||
Identifiable: role.Identifiable,
|
||||
@@ -142,15 +131,8 @@ func NewManagedRoles(orgID valuer.UUID) []*Role {
|
||||
|
||||
}
|
||||
|
||||
func NewGettableResources(resources []*authtypes.Resource) *GettableResources {
|
||||
return &GettableResources{
|
||||
Resources: resources,
|
||||
Relations: authtypes.TypeableRelations,
|
||||
}
|
||||
}
|
||||
|
||||
func (role *Role) PatchMetadata(description string) error {
|
||||
err := role.CanEditDelete()
|
||||
err := role.ErrIfManaged()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -160,32 +142,7 @@ func (role *Role) PatchMetadata(description string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (role *Role) NewPatchableObjects(additions []*authtypes.Object, deletions []*authtypes.Object, relation authtypes.Relation) (*PatchableObjects, error) {
|
||||
err := role.CanEditDelete()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(additions) == 0 && len(deletions) == 0 {
|
||||
return nil, errors.New(errors.TypeInvalidInput, ErrCodeRoleEmptyPatch, "empty object patch request received, at least one of additions or deletions must be present")
|
||||
}
|
||||
|
||||
for _, object := range additions {
|
||||
if !slices.Contains(authtypes.TypeableRelations[object.Resource.Type], relation) {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, authtypes.ErrCodeAuthZInvalidRelation, "relation %s is invalid for type %s", relation.StringValue(), object.Resource.Type.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
for _, object := range deletions {
|
||||
if !slices.Contains(authtypes.TypeableRelations[object.Resource.Type], relation) {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, authtypes.ErrCodeAuthZInvalidRelation, "relation %s is invalid for type %s", relation.StringValue(), object.Resource.Type.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
return &PatchableObjects{Additions: additions, Deletions: deletions}, nil
|
||||
}
|
||||
|
||||
func (role *Role) CanEditDelete() error {
|
||||
func (role *Role) ErrIfManaged() error {
|
||||
if role.Type == RoleTypeManaged {
|
||||
return errors.Newf(errors.TypeInvalidInput, ErrCodeRoleInvalidInput, "cannot edit/delete managed role: %s", role.Name)
|
||||
}
|
||||
|
||||
@@ -355,6 +355,10 @@ func (r *PostableRule) validate() error {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
|
||||
}
|
||||
|
||||
if r.Version != "v5" {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
|
||||
}
|
||||
|
||||
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
|
||||
}
|
||||
|
||||
@@ -108,6 +108,7 @@ func TestParseIntoRule(t *testing.T) {
|
||||
"ruleType": "threshold_rule",
|
||||
"evalWindow": "5m",
|
||||
"frequency": "1m",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -150,6 +151,7 @@ func TestParseIntoRule(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "DefaultsRule",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -187,6 +189,7 @@ func TestParseIntoRule(t *testing.T) {
|
||||
initRule: PostableRule{},
|
||||
content: []byte(`{
|
||||
"alert": "PromQLRule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "promql",
|
||||
@@ -256,6 +259,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "SeverityLabelTest",
|
||||
"schemaVersion": "v1",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -344,6 +348,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "NoLabelsTest",
|
||||
"schemaVersion": "v1",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -384,6 +389,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "OverwriteTest",
|
||||
"schemaVersion": "v1",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -474,6 +480,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "V2Test",
|
||||
"schemaVersion": "v2",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -517,6 +524,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
initRule: PostableRule{},
|
||||
content: []byte(`{
|
||||
"alert": "DefaultSchemaTest",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -569,6 +577,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
func TestParseIntoRuleThresholdGeneration(t *testing.T) {
|
||||
content := []byte(`{
|
||||
"alert": "TestThresholds",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -639,6 +648,7 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
|
||||
"schemaVersion": "v2",
|
||||
"alert": "MultiThresholdAlert",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -732,6 +742,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -766,6 +777,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -799,6 +811,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyAboveTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -833,6 +846,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyAboveTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -866,6 +880,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowAllTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -901,6 +916,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowAllTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -935,6 +951,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyOutOfBoundsTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -969,6 +986,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "ThresholdTest",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -1003,6 +1021,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "ThresholdTest",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
|
||||
48
tests/integration/fixtures/gatewayutils.py
Normal file
48
tests/integration/fixtures/gatewayutils.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from wiremock.client import WireMockMatchers
|
||||
|
||||
from fixtures import types
|
||||
|
||||
TEST_KEY_ID = "test-key-id-001"
|
||||
TEST_LIMIT_ID = "test-limit-id-001"
|
||||
|
||||
|
||||
def common_gateway_headers():
|
||||
"""Common headers expected on requests forwarded to the gateway."""
|
||||
return {
|
||||
"X-Signoz-Cloud-Api-Key": {WireMockMatchers.EQUAL_TO: "secret-key"},
|
||||
"X-Consumer-Username": {
|
||||
WireMockMatchers.EQUAL_TO: "lid:00000000-0000-0000-0000-000000000000"
|
||||
},
|
||||
"X-Consumer-Groups": {WireMockMatchers.EQUAL_TO: "ns:default"},
|
||||
}
|
||||
|
||||
|
||||
def get_gateway_requests(signoz: types.SigNoz, method: str, url: str) -> list:
|
||||
"""Return captured requests from the WireMock gateway journal.
|
||||
|
||||
Returns an empty list when no requests match or the admin API is unreachable.
|
||||
"""
|
||||
response = requests.post(
|
||||
signoz.gateway.host_configs["8080"].get("/__admin/requests/find"),
|
||||
json={"method": method, "url": url},
|
||||
timeout=5,
|
||||
)
|
||||
return response.json().get("requests", [])
|
||||
|
||||
|
||||
def get_latest_gateway_request_body(
|
||||
signoz: types.SigNoz, method: str, url: str
|
||||
) -> Optional[dict]:
|
||||
"""Return the parsed JSON body of the most recent matching gateway request.
|
||||
|
||||
WireMock returns requests in reverse chronological order, so ``matched[0]``
|
||||
is always the latest. Returns ``None`` when no matching request is found.
|
||||
"""
|
||||
matched = get_gateway_requests(signoz, method, url)
|
||||
if not matched:
|
||||
return None
|
||||
return json.loads(matched[0]["body"])
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user