mirror of
https://github.com/SigNoz/signoz.git
synced 2026-05-05 18:10:31 +01:00
Compare commits
24 Commits
v0.67.1
...
pkg/config
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b10a9061ec | ||
|
|
fae0581d83 | ||
|
|
9d6928b3e6 | ||
|
|
baaffd79de | ||
|
|
59c416c222 | ||
|
|
63a7ae586b | ||
|
|
f0cef2d9d5 | ||
|
|
616ada91dd | ||
|
|
aab6a1c914 | ||
|
|
bc708cd891 | ||
|
|
c2a11960c1 | ||
|
|
6d76d56dbd | ||
|
|
1977756591 | ||
|
|
1d9c10a214 | ||
|
|
57a25bf98f | ||
|
|
35c310aa9d | ||
|
|
7c0481de7d | ||
|
|
147cf28024 | ||
|
|
265c67e5bd | ||
|
|
efc8c95d59 | ||
|
|
5708079c3c | ||
|
|
dbe78e55a9 | ||
|
|
a60371fb80 | ||
|
|
d5b847c091 |
@@ -3,8 +3,36 @@
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
level: info
|
||||
enabled: false
|
||||
processors:
|
||||
batch:
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: localhost:4317
|
||||
traces:
|
||||
enabled: false
|
||||
processors:
|
||||
batch:
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: localhost:4317
|
||||
metrics:
|
||||
enabled: true
|
||||
readers:
|
||||
pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: "0.0.0.0"
|
||||
port: 9090
|
||||
|
||||
##################### Web #####################
|
||||
web:
|
||||
# Whether to enable the web frontend
|
||||
enabled: true
|
||||
# The prefix to serve web on
|
||||
prefix: /
|
||||
# The directory containing the static build files.
|
||||
@@ -29,4 +57,14 @@ cache:
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
db: 0
|
||||
|
||||
##################### SQLStore #####################
|
||||
sqlstore:
|
||||
# specifies the SQLStore provider to use.
|
||||
provider: sqlite
|
||||
# The maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
sqlite:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
@@ -130,7 +130,7 @@ services:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
query-service:
|
||||
image: signoz/query-service:0.67.1
|
||||
image: signoz/query-service:0.68.0
|
||||
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -158,7 +158,7 @@ services:
|
||||
condition: on-failure
|
||||
!!merge <<: *db-depend
|
||||
frontend:
|
||||
image: signoz/frontend:0.67.1
|
||||
image: signoz/frontend:0.68.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -170,7 +170,7 @@ services:
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.111.22
|
||||
image: signoz/signoz-otel-collector:0.111.23
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
@@ -202,7 +202,7 @@ services:
|
||||
- otel-collector-migrator
|
||||
- query-service
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.111.22
|
||||
image: signoz/signoz-schema-migrator:0.111.23
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -57,7 +57,7 @@ services:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.23}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "sync"
|
||||
@@ -73,7 +73,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.23}
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
|
||||
@@ -145,7 +145,7 @@ services:
|
||||
- --storage.path=/data
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.67.1}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.68.0}
|
||||
container_name: signoz-query-service
|
||||
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
|
||||
# ports:
|
||||
@@ -172,7 +172,7 @@ services:
|
||||
retries: 3
|
||||
!!merge <<: *db-depend
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.67.1}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.68.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -183,7 +183,7 @@ services:
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.23}
|
||||
container_name: otel-migrator-sync
|
||||
command:
|
||||
- "sync"
|
||||
@@ -197,7 +197,7 @@ services:
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
otel-collector-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.23}
|
||||
container_name: otel-migrator-async
|
||||
command:
|
||||
- "async"
|
||||
@@ -213,7 +213,7 @@ services:
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.23}
|
||||
container_name: signoz-otel-collector
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
user: root # required for reading docker container logs
|
||||
|
||||
@@ -148,7 +148,7 @@ services:
|
||||
- --storage.path=/data
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.67.1}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.68.0}
|
||||
container_name: signoz-query-service
|
||||
command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
|
||||
# ports:
|
||||
@@ -176,7 +176,7 @@ services:
|
||||
retries: 3
|
||||
!!merge <<: *db-depend
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.67.1}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.68.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -187,7 +187,7 @@ services:
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.23}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -199,7 +199,7 @@ services:
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.22}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.23}
|
||||
container_name: signoz-otel-collector
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
user: root # required for reading docker container logs
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
@@ -34,6 +35,7 @@ type APIHandlerOptions struct {
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
@@ -62,6 +64,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
@@ -63,7 +64,6 @@ import (
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
@@ -81,7 +81,6 @@ type ServerOptions struct {
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
SkipWebFrontend bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
@@ -111,26 +110,15 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
func NewServer(serverOptions *ServerOptions, config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
modelDao, err := dao.InitDao(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
baseexplorer.InitWithDB(signoz.SQLStore.SQLxDB())
|
||||
preferences.InitDB(signoz.SQLStore.SQLxDB())
|
||||
dashboards.InitDB(signoz.SQLStore.SQLxDB())
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
@@ -138,7 +126,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
lm, err := licensepkg.StartManager("sqlite", signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -152,7 +140,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
if storage == "clickhouse" {
|
||||
zap.L().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(
|
||||
localDB,
|
||||
signoz.SQLStore.SQLxDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.MaxIdleConns,
|
||||
@@ -188,7 +176,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
baseconst.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
signoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
@@ -209,21 +197,25 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = opAmpModel.InitDB(signoz.SQLStore.SQLxDB())
|
||||
|
||||
integrationsController, err := integrations.NewController(localDB)
|
||||
integrationsController, err := integrations.NewController(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create cloud provider integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
signoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -231,8 +223,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// initiate agent config handler
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: localDB,
|
||||
DBEngine: AppDbEngine,
|
||||
DB: signoz.SQLStore.SQLxDB(),
|
||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -271,6 +262,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
@@ -293,7 +285,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
usageManager: usageManager,
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||
httpServer, err := s.createPublicServer(apiHandler, signoz.Web)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -342,7 +334,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*http.Server, error) {
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
@@ -370,6 +362,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
@@ -386,11 +379,9 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (*
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
if !s.serverOptions.SkipWebFrontend {
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
@@ -502,32 +493,29 @@ func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}
|
||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||
}
|
||||
|
||||
signozMetricsUsed := false
|
||||
signozLogsUsed := false
|
||||
signozTracesUsed := false
|
||||
if postData != nil {
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||
|
||||
if postData.CompositeQuery != nil {
|
||||
data["queryType"] = postData.CompositeQuery.QueryType
|
||||
data["panelType"] = postData.CompositeQuery.PanelType
|
||||
|
||||
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||
}
|
||||
}
|
||||
|
||||
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||
if signozMetricsUsed {
|
||||
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||
if queryInfoResult.MetricsUsed {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if signozLogsUsed {
|
||||
if queryInfoResult.LogsUsed {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
if signozTracesUsed {
|
||||
if queryInfoResult.TracesUsed {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
data["metricsUsed"] = signozMetricsUsed
|
||||
data["logsUsed"] = signozLogsUsed
|
||||
data["tracesUsed"] = signozTracesUsed
|
||||
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||
data["queryType"] = queryInfoResult.QueryType
|
||||
data["panelType"] = queryInfoResult.PanelType
|
||||
|
||||
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
// switch case to set data["screen"] based on the referrer
|
||||
|
||||
@@ -1,18 +1,11 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
)
|
||||
|
||||
func InitDao(engine, path string) (ModelDao, error) {
|
||||
|
||||
switch engine {
|
||||
case "sqlite":
|
||||
return sqlite.InitDB(path)
|
||||
default:
|
||||
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
|
||||
}
|
||||
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
|
||||
return sqlite.InitDB(inputDB)
|
||||
|
||||
}
|
||||
|
||||
@@ -65,8 +65,8 @@ func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(dataSourceName)
|
||||
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(inputDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,86 +3,28 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
signozconfig "go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/confmap/provider/signozenvprovider"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
pkgversion "go.signoz.io/signoz/pkg/version"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||
defaultLogLevel := zapcore.InfoLevel
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("query-service"),
|
||||
)
|
||||
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
)
|
||||
|
||||
if enableQueryServiceLogOTLPExport {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to establish connection: %v", err)
|
||||
} else {
|
||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||
if err != nil {
|
||||
logExportBatchSizeInt = 512
|
||||
}
|
||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||
BatchSize: logExportBatchSizeInt,
|
||||
ResourceSchema: semconv.SchemaURL,
|
||||
Resource: res,
|
||||
}))
|
||||
core = zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
|
||||
)
|
||||
}
|
||||
}
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
|
||||
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
@@ -100,7 +42,6 @@ func main() {
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
@@ -108,7 +49,6 @@ func main() {
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
var skipWebFrontend bool
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
@@ -122,39 +62,39 @@ func main() {
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.BoolVar(&skipWebFrontend, "skip-web-frontend", false, "skip web frontend")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signozconfig.New(context.Background(), signozconfig.ProviderSettings{
|
||||
ResolverSettings: confmap.ResolverSettings{
|
||||
URIs: []string{"signozenv:"},
|
||||
ProviderFactories: []confmap.ProviderFactory{
|
||||
signozenvprovider.NewFactory(),
|
||||
},
|
||||
config, err := signoz.NewConfig(context.Background(), signozconfig.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(config, skipWebFrontend)
|
||||
instrumentation, err := instrumentation.New(context.Background(), pkgversion.Build{}, config.Instrumentation)
|
||||
if err != nil {
|
||||
fmt.Println(err, err.Error())
|
||||
zap.L().Fatal("Failed to create instrumentation", zap.Error(err))
|
||||
}
|
||||
defer instrumentation.Stop(context.Background())
|
||||
|
||||
zap.ReplaceGlobals(instrumentation.Logger())
|
||||
defer instrumentation.Logger().Sync() // flushes buffer, if any
|
||||
|
||||
signoz, err := signoz.New(context.Background(), instrumentation, config, signoz.NewProviderFactories())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
SigNoz: signoz,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
@@ -171,7 +111,6 @@ func main() {
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
SkipWebFrontend: skipWebFrontend,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
@@ -183,13 +122,7 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
server, err := app.NewServer(serverOptions, config, signoz)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import './LogsExplorerList.style.scss';
|
||||
|
||||
import { Card } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import LogDetail from 'components/LogDetail';
|
||||
import { VIEW_TYPES } from 'components/LogDetail/constants';
|
||||
// components
|
||||
@@ -18,7 +19,7 @@ import { FontSize } from 'container/OptionsMenu/types';
|
||||
import { useActiveLog } from 'hooks/logs/useActiveLog';
|
||||
import { useCopyLogLink } from 'hooks/logs/useCopyLogLink';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { memo, useCallback, useEffect, useMemo, useRef } from 'react';
|
||||
import { Virtuoso, VirtuosoHandle } from 'react-virtuoso';
|
||||
// interfaces
|
||||
import { ILog } from 'types/api/logs/log';
|
||||
@@ -71,7 +72,13 @@ function LogsExplorerList({
|
||||
() => convertKeysToColumnFields(options.selectColumns),
|
||||
[options],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isLoading && !isFetching && !isError && logs.length !== 0) {
|
||||
logEvent('Logs Explorer: Data present', {
|
||||
panelType: 'LIST',
|
||||
});
|
||||
}
|
||||
}, [isLoading, isFetching, isError, logs.length]);
|
||||
const getItemContent = useCallback(
|
||||
(_: number, log: ILog): JSX.Element => {
|
||||
if (options.format === 'raw') {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import './TimeSeriesView.styles.scss';
|
||||
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import Uplot from 'components/Uplot';
|
||||
import { QueryParams } from 'constants/query';
|
||||
import EmptyLogsSearch from 'container/EmptyLogsSearch/EmptyLogsSearch';
|
||||
@@ -120,6 +121,20 @@ function TimeSeriesView({
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (chartData[0] && chartData[0]?.length !== 0 && !isLoading && !isError) {
|
||||
if (dataSource === DataSource.TRACES) {
|
||||
logEvent('Traces Explorer: Data present', {
|
||||
panelType: 'TIME_SERIES',
|
||||
});
|
||||
} else if (dataSource === DataSource.LOGS) {
|
||||
logEvent('Logs Explorer: Data present', {
|
||||
panelType: 'TIME_SERIES',
|
||||
});
|
||||
}
|
||||
}
|
||||
}, [isLoading, isError, chartData, dataSource]);
|
||||
|
||||
const { timezone } = useTimezone();
|
||||
|
||||
const chartOptions = getUPlotChartOptions({
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { ResizeTable } from 'components/ResizeTable';
|
||||
import { DEFAULT_ENTITY_VERSION } from 'constants/app';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
@@ -18,7 +19,7 @@ import { getDraggedColumns } from 'hooks/useDragColumns/utils';
|
||||
import useUrlQueryData from 'hooks/useUrlQueryData';
|
||||
import { RowData } from 'lib/query/createTableColumnsFromQuery';
|
||||
import { useTimezone } from 'providers/Timezone';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { memo, useCallback, useEffect, useMemo } from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
@@ -145,12 +146,24 @@ function ListView({ isFilterApplied }: ListViewProps): JSX.Element {
|
||||
[columns, onDragColumns],
|
||||
);
|
||||
|
||||
const isDataPresent =
|
||||
const isDataAbsent =
|
||||
!isLoading &&
|
||||
!isFetching &&
|
||||
!isError &&
|
||||
transformedQueryTableData.length === 0;
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
!isLoading &&
|
||||
!isFetching &&
|
||||
!isError &&
|
||||
transformedQueryTableData.length !== 0
|
||||
) {
|
||||
logEvent('Traces Explorer: Data present', {
|
||||
panelType,
|
||||
});
|
||||
}
|
||||
}, [isLoading, isFetching, isError, transformedQueryTableData, panelType]);
|
||||
return (
|
||||
<Container>
|
||||
{transformedQueryTableData.length !== 0 && (
|
||||
@@ -168,11 +181,11 @@ function ListView({ isFilterApplied }: ListViewProps): JSX.Element {
|
||||
<TracesLoading />
|
||||
)}
|
||||
|
||||
{isDataPresent && !isFilterApplied && (
|
||||
{isDataAbsent && !isFilterApplied && (
|
||||
<NoLogs dataSource={DataSource.TRACES} />
|
||||
)}
|
||||
|
||||
{isDataPresent && isFilterApplied && (
|
||||
{isDataAbsent && isFilterApplied && (
|
||||
<EmptyLogsSearch dataSource={DataSource.TRACES} panelType="LIST" />
|
||||
)}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Typography } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { ResizeTable } from 'components/ResizeTable';
|
||||
import { DEFAULT_ENTITY_VERSION } from 'constants/app';
|
||||
import { QueryParams } from 'constants/query';
|
||||
@@ -10,7 +11,7 @@ import { useGetQueryRange } from 'hooks/queryBuilder/useGetQueryRange';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { Pagination } from 'hooks/queryPagination';
|
||||
import useUrlQueryData from 'hooks/useUrlQueryData';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { memo, useEffect, useMemo } from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
@@ -72,6 +73,14 @@ function TracesView({ isFilterApplied }: TracesViewProps): JSX.Element {
|
||||
[responseData],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isLoading && !isFetching && !isError && (tableData || []).length !== 0) {
|
||||
logEvent('Traces Explorer: Data present', {
|
||||
panelType: 'TRACE',
|
||||
});
|
||||
}
|
||||
}, [isLoading, isFetching, isError, panelType, tableData]);
|
||||
|
||||
return (
|
||||
<Container>
|
||||
{(tableData || []).length !== 0 && (
|
||||
|
||||
@@ -572,29 +572,6 @@ export const createTableColumnsFromQuery: CreateTableDataFromQuery = ({
|
||||
a.queryName < b.queryName ? -1 : 1,
|
||||
);
|
||||
|
||||
// the reason we need this is because the filling of values in rows doesn't account for mismatch enteries
|
||||
// in the response. Example : Series A -> [label1, label2] and Series B -> [label2,label1] this isn't accounted for
|
||||
sortedQueryTableData.forEach((q) => {
|
||||
q.series?.forEach((s) => {
|
||||
s.labelsArray?.sort((a, b) =>
|
||||
Object.keys(a)[0] < Object.keys(b)[0] ? -1 : 1,
|
||||
);
|
||||
});
|
||||
q.series?.sort((a, b) => {
|
||||
let labelA = '';
|
||||
let labelB = '';
|
||||
a.labelsArray?.forEach((lab) => {
|
||||
labelA += Object.values(lab)[0];
|
||||
});
|
||||
|
||||
b.labelsArray?.forEach((lab) => {
|
||||
labelB += Object.values(lab)[0];
|
||||
});
|
||||
|
||||
return labelA < labelB ? -1 : 1;
|
||||
});
|
||||
});
|
||||
|
||||
const dynamicColumns = getDynamicColumns(sortedQueryTableData, query);
|
||||
|
||||
const { filledDynamicColumns, rowsLength } = fillColumnsData(
|
||||
|
||||
@@ -11,7 +11,7 @@ import logEvent from 'api/common/logEvent';
|
||||
import { getMs } from 'container/Trace/Filters/Panel/PanelBody/Duration/util';
|
||||
import { useGetCompositeQueryParam } from 'hooks/queryBuilder/useGetCompositeQueryParam';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { isArray, isEqual } from 'lodash-es';
|
||||
import { isArray, isEmpty, isEqual } from 'lodash-es';
|
||||
import {
|
||||
Dispatch,
|
||||
SetStateAction,
|
||||
@@ -198,7 +198,7 @@ export function Filter(props: FilterProps): JSX.Element {
|
||||
})),
|
||||
},
|
||||
};
|
||||
if (selectedFilters) {
|
||||
if (!isEmpty(selectedFilters)) {
|
||||
logEvent('Traces Explorer: Sidebar filter used', {
|
||||
selectedFilters,
|
||||
});
|
||||
|
||||
22
go.mod
22
go.mod
@@ -9,8 +9,6 @@ require (
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
github.com/auth0/go-jwt-middleware v1.0.1
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
@@ -20,6 +18,7 @@ require (
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/go-redis/redismock/v8 v8.11.5
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
@@ -29,8 +28,9 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/knadh/koanf v1.5.0
|
||||
github.com/knadh/koanf/v2 v2.1.1
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/oklog/oklog v0.3.2
|
||||
github.com/open-telemetry/opamp-go v0.5.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0
|
||||
@@ -48,6 +48,8 @@ require (
|
||||
github.com/soheilhy/cmux v0.1.5
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.9.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/uptrace/bun v1.2.8
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8
|
||||
go.opentelemetry.io/collector/confmap v1.17.0
|
||||
go.opentelemetry.io/collector/pdata v1.17.0
|
||||
go.opentelemetry.io/collector/processor v0.111.0
|
||||
@@ -62,10 +64,9 @@ require (
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/net v0.29.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/text v0.21.0
|
||||
google.golang.org/grpc v1.67.1
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
@@ -99,6 +100,7 @@ require (
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.7.1 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||
@@ -106,7 +108,6 @@ require (
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
@@ -120,13 +121,13 @@ require (
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.17.10 // indirect
|
||||
github.com/knadh/koanf/v2 v2.1.1 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-syslog/v4 v4.2.0 // indirect
|
||||
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
|
||||
@@ -151,6 +152,7 @@ require (
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/segmentio/backo-go v1.0.1 // indirect
|
||||
@@ -162,8 +164,11 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
github.com/vjeantet/grok v1.0.1 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
@@ -212,12 +217,13 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
gonum.org/v1/gonum v0.15.1 // indirect
|
||||
google.golang.org/api v0.199.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
k8s.io/client-go v0.31.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
|
||||
32
go.sum
32
go.sum
@@ -70,12 +70,6 @@ github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PU
|
||||
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
|
||||
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974/go.mod h1:fpiHtiboLJpIE5TtkQfiWx6xtnlA+uWmv+N9opETqKY=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 h1:G2JzCrqdeOTtAn4tDFZEg5gCAEYVRXcddG3ZlrFMumo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974/go.mod h1:YtDal1xBRQfPRNo7iSU3W37RGT0jMW7Rnzk6EON3a4M=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -436,6 +430,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
@@ -519,8 +515,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
@@ -661,6 +657,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
@@ -740,12 +738,22 @@ github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
|
||||
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||
github.com/uptrace/bun v1.2.8 h1:HEiLvy9wc7ehU5S02+O6NdV5BLz48lL4REPhTkMX3Dg=
|
||||
github.com/uptrace/bun v1.2.8/go.mod h1:JBq0uBKsKqNT0Ccce1IAFZY337Wkf08c6F6qlmfOHE8=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8 h1:Huqw7YhLFTbocbSv8NETYYXqKtwLa6XsciCWtjzWSWU=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8/go.mod h1:ni7h2uwIc5zPhxgmCMTEbefONc4XsVr/ATfz1Q7d3CE=
|
||||
github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
|
||||
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4=
|
||||
github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
|
||||
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -1002,8 +1010,8 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1086,8 +1094,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
|
||||
14
pkg/cache/config.go
vendored
14
pkg/cache/config.go
vendored
@@ -4,12 +4,9 @@ import (
|
||||
"time"
|
||||
|
||||
go_cache "github.com/patrickmn/go-cache"
|
||||
"go.signoz.io/signoz/pkg/confmap"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
// Config satisfies the confmap.Config interface
|
||||
var _ confmap.Config = (*Config)(nil)
|
||||
|
||||
type Memory struct {
|
||||
TTL time.Duration `mapstructure:"ttl"`
|
||||
CleanupInterval time.Duration `mapstructure:"cleanupInterval"`
|
||||
@@ -28,7 +25,11 @@ type Config struct {
|
||||
Redis Redis `mapstructure:"redis"`
|
||||
}
|
||||
|
||||
func (c *Config) NewWithDefaults() confmap.Config {
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("cache"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
return &Config{
|
||||
Provider: "memory",
|
||||
Memory: Memory{
|
||||
@@ -42,8 +43,9 @@ func (c *Config) NewWithDefaults() confmap.Config {
|
||||
DB: 0,
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
func (c Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
101
pkg/cache/memorycache/memory.go
vendored
Normal file
101
pkg/cache/memorycache/memory.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package memorycache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
type memory struct {
|
||||
cc *gocache.Cache
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("memory"), New)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) {
|
||||
return &memory{cc: gocache.New(config.Memory.TTL, config.Memory.CleanupInterval)}, nil
|
||||
}
|
||||
|
||||
// Connect does nothing
|
||||
func (c *memory) Connect(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store stores the data in the cache
|
||||
func (c *memory) Store(_ context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
|
||||
// check if the data being passed is a pointer and is not nil
|
||||
rv := reflect.ValueOf(data)
|
||||
if rv.Kind() != reflect.Pointer || rv.IsNil() {
|
||||
return cache.WrapCacheableEntityErrors(reflect.TypeOf(data), "inmemory")
|
||||
}
|
||||
|
||||
c.cc.Set(cacheKey, data, ttl)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves the data from the cache
|
||||
func (c *memory) Retrieve(_ context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
|
||||
// check if the destination being passed is a pointer and is not nil
|
||||
dstv := reflect.ValueOf(dest)
|
||||
if dstv.Kind() != reflect.Pointer || dstv.IsNil() {
|
||||
return cache.RetrieveStatusError, cache.WrapCacheableEntityErrors(reflect.TypeOf(dest), "inmemory")
|
||||
}
|
||||
|
||||
// check if the destination value is settable
|
||||
if !dstv.Elem().CanSet() {
|
||||
return cache.RetrieveStatusError, fmt.Errorf("destination value is not settable, %s", dstv.Elem())
|
||||
}
|
||||
|
||||
data, found := c.cc.Get(cacheKey)
|
||||
if !found {
|
||||
return cache.RetrieveStatusKeyMiss, nil
|
||||
}
|
||||
|
||||
// check the type compatbility between the src and dest
|
||||
srcv := reflect.ValueOf(data)
|
||||
if !srcv.Type().AssignableTo(dstv.Type()) {
|
||||
return cache.RetrieveStatusError, fmt.Errorf("src type is not assignable to dst type")
|
||||
}
|
||||
|
||||
// set the value to from src to dest
|
||||
dstv.Elem().Set(srcv.Elem())
|
||||
return cache.RetrieveStatusHit, nil
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL for the cache entry
|
||||
func (c *memory) SetTTL(_ context.Context, cacheKey string, ttl time.Duration) {
|
||||
item, found := c.cc.Get(cacheKey)
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
c.cc.Replace(cacheKey, item, ttl)
|
||||
}
|
||||
|
||||
// Remove removes the cache entry
|
||||
func (c *memory) Remove(_ context.Context, cacheKey string) {
|
||||
c.cc.Delete(cacheKey)
|
||||
}
|
||||
|
||||
// BulkRemove removes the cache entries
|
||||
func (c *memory) BulkRemove(_ context.Context, cacheKeys []string) {
|
||||
for _, cacheKey := range cacheKeys {
|
||||
c.cc.Delete(cacheKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (c *memory) Close(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration returns the cache configuration
|
||||
func (c *memory) Configuration() *cache.Memory {
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package memory
|
||||
package memorycache
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,18 +7,21 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
// TestNew tests the New function
|
||||
func TestNew(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, c)
|
||||
assert.NotNil(t, c.cc)
|
||||
assert.NotNil(t, c.(*memory).cc)
|
||||
assert.NoError(t, c.Connect(context.Background()))
|
||||
}
|
||||
|
||||
@@ -53,32 +56,35 @@ func (dce DCacheableEntity) UnmarshalBinary(data []byte) error {
|
||||
// TestStore tests the Store function
|
||||
// this should fail because of nil pointer error
|
||||
func TestStoreWithNilPointer(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
var storeCacheableEntity *CacheableEntity
|
||||
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
}
|
||||
|
||||
// this should fail because of no pointer error
|
||||
func TestStoreWithStruct(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
var storeCacheableEntity CacheableEntity
|
||||
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
}
|
||||
|
||||
func TestStoreWithNonNilPointer(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -89,11 +95,12 @@ func TestStoreWithNonNilPointer(t *testing.T) {
|
||||
|
||||
// TestRetrieve tests the Retrieve function
|
||||
func TestRetrieveWithNilPointer(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -109,11 +116,12 @@ func TestRetrieveWithNilPointer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRetrieveWitNonPointer(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -129,11 +137,12 @@ func TestRetrieveWitNonPointer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRetrieveWithDifferentTypes(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -148,11 +157,8 @@ func TestRetrieveWithDifferentTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRetrieveWithSameTypes(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: _cache.Memory{TTL: 10 * time.Second, CleanupInterval: 10 * time.Second}})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -169,7 +175,8 @@ func TestRetrieveWithSameTypes(t *testing.T) {
|
||||
|
||||
// TestSetTTL tests the SetTTL function
|
||||
func TestSetTTL(t *testing.T) {
|
||||
c := New(&_cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: _cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second}})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -194,11 +201,11 @@ func TestSetTTL(t *testing.T) {
|
||||
|
||||
// TestRemove tests the Remove function
|
||||
func TestRemove(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -216,11 +223,12 @@ func TestRemove(t *testing.T) {
|
||||
|
||||
// TestBulkRemove tests the BulkRemove function
|
||||
func TestBulkRemove(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -244,11 +252,12 @@ func TestBulkRemove(t *testing.T) {
|
||||
|
||||
// TestCache tests the cache
|
||||
func TestCache(t *testing.T) {
|
||||
opts := &_cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c := New(opts)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -1,4 +1,4 @@
|
||||
package redis
|
||||
package rediscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -8,16 +8,21 @@ import (
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type cache struct {
|
||||
client *redis.Client
|
||||
opts *_cache.Redis
|
||||
opts _cache.Redis
|
||||
}
|
||||
|
||||
func New(opts *_cache.Redis) *cache {
|
||||
return &cache{opts: opts}
|
||||
func NewFactory() factory.ProviderFactory[_cache.Cache, _cache.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("redis"), New)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config _cache.Config) (_cache.Cache, error) {
|
||||
return &cache{opts: config.Redis}, nil
|
||||
}
|
||||
|
||||
// WithClient creates a new cache with the given client
|
||||
@@ -87,11 +92,6 @@ func (c *cache) GetClient() *redis.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
// GetOptions returns the options
|
||||
func (c *cache) GetOptions() *_cache.Redis {
|
||||
return c.opts
|
||||
}
|
||||
|
||||
// GetTTL returns the TTL for the cache entry
|
||||
func (c *cache) GetTTL(ctx context.Context, cacheKey string) time.Duration {
|
||||
ttl, err := c.client.TTL(ctx, cacheKey).Result()
|
||||
@@ -1,4 +1,4 @@
|
||||
package redis
|
||||
package rediscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
96
pkg/cache/strategy/memory/memory.go
vendored
96
pkg/cache/strategy/memory/memory.go
vendored
@@ -1,96 +0,0 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
go_cache "github.com/patrickmn/go-cache"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
)
|
||||
|
||||
type cache struct {
|
||||
cc *go_cache.Cache
|
||||
}
|
||||
|
||||
func New(opts *_cache.Memory) *cache {
|
||||
return &cache{cc: go_cache.New(opts.TTL, opts.CleanupInterval)}
|
||||
}
|
||||
|
||||
// Connect does nothing
|
||||
func (c *cache) Connect(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store stores the data in the cache
|
||||
func (c *cache) Store(_ context.Context, cacheKey string, data _cache.CacheableEntity, ttl time.Duration) error {
|
||||
// check if the data being passed is a pointer and is not nil
|
||||
rv := reflect.ValueOf(data)
|
||||
if rv.Kind() != reflect.Pointer || rv.IsNil() {
|
||||
return _cache.WrapCacheableEntityErrors(reflect.TypeOf(data), "inmemory")
|
||||
}
|
||||
|
||||
c.cc.Set(cacheKey, data, ttl)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves the data from the cache
|
||||
func (c *cache) Retrieve(_ context.Context, cacheKey string, dest _cache.CacheableEntity, allowExpired bool) (_cache.RetrieveStatus, error) {
|
||||
// check if the destination being passed is a pointer and is not nil
|
||||
dstv := reflect.ValueOf(dest)
|
||||
if dstv.Kind() != reflect.Pointer || dstv.IsNil() {
|
||||
return _cache.RetrieveStatusError, _cache.WrapCacheableEntityErrors(reflect.TypeOf(dest), "inmemory")
|
||||
}
|
||||
|
||||
// check if the destination value is settable
|
||||
if !dstv.Elem().CanSet() {
|
||||
return _cache.RetrieveStatusError, fmt.Errorf("destination value is not settable, %s", dstv.Elem())
|
||||
}
|
||||
|
||||
data, found := c.cc.Get(cacheKey)
|
||||
if !found {
|
||||
return _cache.RetrieveStatusKeyMiss, nil
|
||||
}
|
||||
|
||||
// check the type compatbility between the src and dest
|
||||
srcv := reflect.ValueOf(data)
|
||||
if !srcv.Type().AssignableTo(dstv.Type()) {
|
||||
return _cache.RetrieveStatusError, fmt.Errorf("src type is not assignable to dst type")
|
||||
}
|
||||
|
||||
// set the value to from src to dest
|
||||
dstv.Elem().Set(srcv.Elem())
|
||||
return _cache.RetrieveStatusHit, nil
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL for the cache entry
|
||||
func (c *cache) SetTTL(_ context.Context, cacheKey string, ttl time.Duration) {
|
||||
item, found := c.cc.Get(cacheKey)
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
c.cc.Replace(cacheKey, item, ttl)
|
||||
}
|
||||
|
||||
// Remove removes the cache entry
|
||||
func (c *cache) Remove(_ context.Context, cacheKey string) {
|
||||
c.cc.Delete(cacheKey)
|
||||
}
|
||||
|
||||
// BulkRemove removes the cache entries
|
||||
func (c *cache) BulkRemove(_ context.Context, cacheKeys []string) {
|
||||
for _, cacheKey := range cacheKeys {
|
||||
c.cc.Delete(cacheKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (c *cache) Close(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration returns the cache configuration
|
||||
func (c *cache) Configuration() *_cache.Memory {
|
||||
return nil
|
||||
}
|
||||
90
pkg/config/conf.go
Normal file
90
pkg/config/conf.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/go-viper/mapstructure/v2"
|
||||
"github.com/knadh/koanf/providers/confmap"
|
||||
"github.com/knadh/koanf/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
KoanfDelimiter string = "::"
|
||||
)
|
||||
|
||||
// Conf is a wrapper around the koanf library.
|
||||
type Conf struct {
|
||||
*koanf.Koanf
|
||||
}
|
||||
|
||||
// NewConf creates a new Conf instance.
|
||||
func NewConf() *Conf {
|
||||
return &Conf{koanf.New(KoanfDelimiter)}
|
||||
}
|
||||
|
||||
// NewConfFromMap creates a new Conf instance from a map.
|
||||
func NewConfFromMap(m map[string]any) (*Conf, error) {
|
||||
conf := NewConf()
|
||||
if err := conf.Koanf.Load(confmap.Provider(m, KoanfDelimiter), nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// MustNewConfFromMap creates a new Conf instance from a map.
|
||||
// It panics if the conf cannot be created.
|
||||
func MustNewConfFromMap(m map[string]any) *Conf {
|
||||
conf, err := NewConfFromMap(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return conf
|
||||
}
|
||||
|
||||
// Merge merges the current configuration with the input configuration.
|
||||
func (conf *Conf) Merge(input *Conf) error {
|
||||
return conf.Koanf.Merge(input.Koanf)
|
||||
}
|
||||
|
||||
// Merge merges the current configuration with the input configuration.
|
||||
func (conf *Conf) MergeAt(input *Conf, path string) error {
|
||||
return conf.Koanf.MergeAt(input.Koanf, path)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals the configuration at the given path into the input.
|
||||
// It uses a WeaklyTypedInput to allow for more flexible unmarshalling.
|
||||
func (conf *Conf) Unmarshal(path string, input any) error {
|
||||
dc := &mapstructure.DecoderConfig{
|
||||
TagName: "mapstructure",
|
||||
WeaklyTypedInput: true,
|
||||
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||
mapstructure.StringToSliceHookFunc(","),
|
||||
mapstructure.StringToTimeDurationHookFunc(),
|
||||
mapstructure.TextUnmarshallerHookFunc(),
|
||||
),
|
||||
Result: input,
|
||||
}
|
||||
|
||||
return conf.Koanf.UnmarshalWithConf(path, input, koanf.UnmarshalConf{Tag: "mapstructure", DecoderConfig: dc})
|
||||
}
|
||||
|
||||
// Set sets the configuration at the given key.
|
||||
// It decodes the input into a map as per mapstructure.Decode and then merges it into the configuration.
|
||||
func (conf *Conf) Set(key string, input any) error {
|
||||
m := map[string]any{}
|
||||
err := mapstructure.Decode(input, &m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newConf := NewConf()
|
||||
if err := newConf.Koanf.Load(confmap.Provider(m, KoanfDelimiter), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := conf.Koanf.MergeAt(newConf.Koanf, key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
38
pkg/config/conf_test.go
Normal file
38
pkg/config/conf_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfMerge(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
conf *Conf
|
||||
input *Conf
|
||||
expected *Conf
|
||||
pass bool
|
||||
}{
|
||||
{name: "Empty", conf: NewConf(), input: NewConf(), expected: NewConf(), pass: true},
|
||||
{name: "Merge", conf: MustNewConfFromMap(map[string]any{"a": "b"}), input: MustNewConfFromMap(map[string]any{"c": "d"}), expected: MustNewConfFromMap(map[string]any{"a": "b", "c": "d"}), pass: true},
|
||||
{name: "NestedMerge", conf: MustNewConfFromMap(map[string]any{"a": map[string]any{"b": "v1", "c": "v2"}}), input: MustNewConfFromMap(map[string]any{"a": map[string]any{"d": "v1", "e": "v2"}}), expected: MustNewConfFromMap(map[string]any{"a": map[string]any{"b": "v1", "c": "v2", "d": "v1", "e": "v2"}}), pass: true},
|
||||
{name: "Override", conf: MustNewConfFromMap(map[string]any{"a": "b"}), input: MustNewConfFromMap(map[string]any{"a": "c"}), expected: MustNewConfFromMap(map[string]any{"a": "c"}), pass: true},
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.conf.Merge(tc.input)
|
||||
if !tc.pass {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expected.Raw(), tc.conf.Raw())
|
||||
assert.Equal(t, tc.expected.Raw(), tc.conf.Raw())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,33 +3,34 @@ package config
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
signozconfmap "go.signoz.io/signoz/pkg/confmap"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
// This map contains the default values of all config structs
|
||||
var (
|
||||
defaults = map[string]signozconfmap.Config{
|
||||
"instrumentation": &instrumentation.Config{},
|
||||
"web": &web.Config{},
|
||||
"cache": &cache.Config{},
|
||||
}
|
||||
)
|
||||
|
||||
// Config defines the entire configuration of signoz.
|
||||
type Config struct {
|
||||
Instrumentation instrumentation.Config `mapstructure:"instrumentation"`
|
||||
Web web.Config `mapstructure:"web"`
|
||||
Cache cache.Config `mapstructure:"cache"`
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings ProviderSettings) (*Config, error) {
|
||||
provider, err := NewProvider(settings)
|
||||
func New(ctx context.Context, resolverConfig ResolverConfig, configFactories []factory.ConfigFactory) (*Conf, error) {
|
||||
// Get the config from the resolver
|
||||
resolver, err := NewResolver(resolverConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return provider.Get(ctx)
|
||||
resolvedConf, err := resolver.Do(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := NewConf()
|
||||
// Set the default configs
|
||||
for _, factory := range configFactories {
|
||||
c := factory.New()
|
||||
if err := conf.Set(factory.Name().String(), c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = conf.Merge(resolvedConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/confmap/provider/signozenvprovider"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
)
|
||||
|
||||
func TestNewWithSignozEnvProvider(t *testing.T) {
|
||||
|
||||
t.Setenv("SIGNOZ__WEB__PREFIX", "/web")
|
||||
t.Setenv("SIGNOZ__WEB__DIRECTORY", "/build")
|
||||
t.Setenv("SIGNOZ__CACHE__PROVIDER", "redis")
|
||||
t.Setenv("SIGNOZ__CACHE__REDIS__HOST", "127.0.0.1")
|
||||
|
||||
config, err := New(context.Background(), ProviderSettings{
|
||||
ResolverSettings: confmap.ResolverSettings{
|
||||
URIs: []string{"signozenv:"},
|
||||
ProviderFactories: []confmap.ProviderFactory{
|
||||
signozenvprovider.NewFactory(),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := &Config{
|
||||
Web: web.Config{
|
||||
Prefix: "/web",
|
||||
Directory: "/build",
|
||||
},
|
||||
Cache: cache.Config{
|
||||
Provider: "redis",
|
||||
Memory: cache.Memory{
|
||||
TTL: time.Duration(-1),
|
||||
CleanupInterval: 1 * time.Minute,
|
||||
},
|
||||
Redis: cache.Redis{
|
||||
Host: "127.0.0.1",
|
||||
Port: 6379,
|
||||
Password: "",
|
||||
DB: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, config)
|
||||
}
|
||||
71
pkg/config/envprovider/provider.go
Normal file
71
pkg/config/envprovider/provider.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package envprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
koanfenv "github.com/knadh/koanf/providers/env"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix string = "SIGNOZ_"
|
||||
scheme string = "env"
|
||||
)
|
||||
|
||||
type provider struct{}
|
||||
|
||||
func NewFactory() config.ProviderFactory {
|
||||
return config.NewProviderFactory(New)
|
||||
}
|
||||
|
||||
func New(config config.ProviderConfig) config.Provider {
|
||||
return &provider{}
|
||||
}
|
||||
|
||||
func (provider *provider) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
func (provider *provider) Get(ctx context.Context, uri config.Uri) (*config.Conf, error) {
|
||||
conf := config.NewConf()
|
||||
err := conf.Load(
|
||||
koanfenv.Provider(
|
||||
prefix,
|
||||
// Do not set this to `_`. The correct delimiter is being set by the custom callback provided below.
|
||||
// Since this had to be passed, using `config.KoanfDelimiter` eliminates any possible side effect.
|
||||
config.KoanfDelimiter,
|
||||
func(s string) string {
|
||||
s = strings.ToLower(strings.TrimPrefix(s, prefix))
|
||||
return provider.cb(s, config.KoanfDelimiter)
|
||||
},
|
||||
),
|
||||
nil,
|
||||
)
|
||||
|
||||
return conf, err
|
||||
}
|
||||
|
||||
func (provider *provider) cb(s string, delim string) string {
|
||||
delims := []rune(delim)
|
||||
runes := []rune(s)
|
||||
result := make([]rune, 0, len(runes))
|
||||
|
||||
for i := 0; i < len(runes); i++ {
|
||||
// Check for double underscore pattern
|
||||
if i < len(runes)-1 && runes[i] == '_' && runes[i+1] == '_' {
|
||||
result = append(result, '_')
|
||||
i++ // Skip next underscore
|
||||
continue
|
||||
}
|
||||
|
||||
if runes[i] == '_' {
|
||||
result = append(result, delims...)
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, runes[i])
|
||||
}
|
||||
|
||||
return string(result)
|
||||
}
|
||||
78
pkg/config/envprovider/provider_test.go
Normal file
78
pkg/config/envprovider/provider_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package envprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
)
|
||||
|
||||
func TestGetWithStrings(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_K1_K2", "string")
|
||||
t.Setenv("SIGNOZ_K3__K4", "string")
|
||||
t.Setenv("SIGNOZ_K5__K6_K7__K8", "string")
|
||||
t.Setenv("SIGNOZ_K9___K10", "string")
|
||||
t.Setenv("SIGNOZ_K11____K12", "string")
|
||||
expected := map[string]any{
|
||||
"k1::k2": "string",
|
||||
"k3_k4": "string",
|
||||
"k5_k6::k7_k8": "string",
|
||||
"k9_::k10": "string",
|
||||
"k11__k12": "string",
|
||||
}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
actual, err := provider.Get(context.Background(), config.MustNewUri("env:"))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithGoTypes(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_BOOL", "true")
|
||||
t.Setenv("SIGNOZ_STRING", "string")
|
||||
t.Setenv("SIGNOZ_INT", "1")
|
||||
t.Setenv("SIGNOZ_SLICE", "[1,2]")
|
||||
expected := map[string]any{
|
||||
"bool": "true",
|
||||
"int": "1",
|
||||
"slice": "[1,2]",
|
||||
"string": "string",
|
||||
}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
actual, err := provider.Get(context.Background(), config.MustNewUri("env:"))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithGoTypesWithUnmarshal(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_BOOL", "true")
|
||||
t.Setenv("SIGNOZ_STRING", "string")
|
||||
t.Setenv("SIGNOZ_INT", "1")
|
||||
|
||||
type test struct {
|
||||
Bool bool `mapstructure:"bool"`
|
||||
String string `mapstructure:"string"`
|
||||
Int int `mapstructure:"int"`
|
||||
}
|
||||
|
||||
expected := test{
|
||||
Bool: true,
|
||||
String: "string",
|
||||
Int: 1,
|
||||
}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
conf, err := provider.Get(context.Background(), config.MustNewUri("env:"))
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := test{}
|
||||
err = conf.Unmarshal("", &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
34
pkg/config/fileprovider/provider.go
Normal file
34
pkg/config/fileprovider/provider.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package fileprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
koanfyaml "github.com/knadh/koanf/parsers/yaml"
|
||||
koanffile "github.com/knadh/koanf/providers/file"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
)
|
||||
|
||||
const (
|
||||
scheme string = "file"
|
||||
)
|
||||
|
||||
type provider struct{}
|
||||
|
||||
func NewFactory() config.ProviderFactory {
|
||||
return config.NewProviderFactory(New)
|
||||
}
|
||||
|
||||
func New(config config.ProviderConfig) config.Provider {
|
||||
return &provider{}
|
||||
}
|
||||
|
||||
func (provider *provider) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
func (provider *provider) Get(ctx context.Context, uri config.Uri) (*config.Conf, error) {
|
||||
conf := config.NewConf()
|
||||
err := conf.Load(koanffile.Provider(uri.Value()), koanfyaml.Parser())
|
||||
|
||||
return conf, err
|
||||
}
|
||||
68
pkg/config/fileprovider/provider_test.go
Normal file
68
pkg/config/fileprovider/provider_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package fileprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
)
|
||||
|
||||
func TestGetWithStrings(t *testing.T) {
|
||||
expected := map[string]any{
|
||||
"k1::k2": "string",
|
||||
"k3_k4": "string",
|
||||
"k5_k6::k7_k8": "string",
|
||||
"k9_::k10": "string",
|
||||
"k11__k12": "string",
|
||||
}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
actual, err := provider.Get(context.Background(), config.MustNewUri("file:"+filepath.Join("testdata", "strings.yaml")))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithGoTypes(t *testing.T) {
|
||||
expected := map[string]any{
|
||||
"bool": true,
|
||||
"int": 1,
|
||||
"slice": []any{1, 2},
|
||||
"string": "string",
|
||||
}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
actual, err := provider.Get(context.Background(), config.MustNewUri("file:"+filepath.Join("testdata", "gotypes.yaml")))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithGoTypesWithUnmarshal(t *testing.T) {
|
||||
type test struct {
|
||||
Bool bool `mapstructure:"bool"`
|
||||
String string `mapstructure:"string"`
|
||||
Int int `mapstructure:"int"`
|
||||
Slice []any `mapstructure:"slice"`
|
||||
}
|
||||
|
||||
expected := test{
|
||||
Bool: true,
|
||||
String: "string",
|
||||
Int: 1,
|
||||
Slice: []any{1, 2},
|
||||
}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
conf, err := provider.Get(context.Background(), config.MustNewUri("file:"+filepath.Join("testdata", "gotypes.yaml")))
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := test{}
|
||||
err = conf.Unmarshal("", &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
6
pkg/config/fileprovider/testdata/gotypes.yaml
vendored
Normal file
6
pkg/config/fileprovider/testdata/gotypes.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
bool: true
|
||||
string: string
|
||||
int: 1
|
||||
slice:
|
||||
- 1
|
||||
- 2
|
||||
8
pkg/config/fileprovider/testdata/strings.yaml
vendored
Normal file
8
pkg/config/fileprovider/testdata/strings.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
k1:
|
||||
k2: string
|
||||
k3_k4: string
|
||||
k5_k6:
|
||||
k7_k8: string
|
||||
k9_:
|
||||
k10: string
|
||||
k11__k12: string
|
||||
@@ -2,51 +2,38 @@ package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
)
|
||||
|
||||
// Provides the configuration for signoz.
|
||||
// NewProviderFunc is a function that creates a new provider.
|
||||
type NewProviderFunc = func(ProviderConfig) Provider
|
||||
|
||||
// ProviderFactory is a factory that creates a new provider.
|
||||
type ProviderFactory interface {
|
||||
New(ProviderConfig) Provider
|
||||
}
|
||||
|
||||
// NewProviderFactory creates a new provider factory.
|
||||
func NewProviderFactory(f NewProviderFunc) ProviderFactory {
|
||||
return &providerFactory{f: f}
|
||||
}
|
||||
|
||||
// providerFactory is a factory that implements the ProviderFactory interface.
|
||||
type providerFactory struct {
|
||||
f NewProviderFunc
|
||||
}
|
||||
|
||||
// New creates a new provider.
|
||||
func (factory *providerFactory) New(config ProviderConfig) Provider {
|
||||
return factory.f(config)
|
||||
}
|
||||
|
||||
// ProviderConfig is the configuration for a provider.
|
||||
type ProviderConfig struct{}
|
||||
|
||||
// Provider is an interface that represents a provider.
|
||||
type Provider interface {
|
||||
// Get returns the configuration, or error otherwise.
|
||||
Get(ctx context.Context) (*Config, error)
|
||||
}
|
||||
|
||||
type provider struct {
|
||||
resolver *confmap.Resolver
|
||||
}
|
||||
|
||||
// ProviderSettings are the settings to configure the behavior of the Provider.
|
||||
type ProviderSettings struct {
|
||||
// ResolverSettings are the settings to configure the behavior of the confmap.Resolver.
|
||||
ResolverSettings confmap.ResolverSettings
|
||||
}
|
||||
|
||||
// NewProvider returns a new Provider that provides the entire configuration.
|
||||
// See https://github.com/open-telemetry/opentelemetry-collector/blob/main/otelcol/configprovider.go for
|
||||
// more details
|
||||
func NewProvider(settings ProviderSettings) (Provider, error) {
|
||||
resolver, err := confmap.NewResolver(settings.ResolverSettings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &provider{
|
||||
resolver: resolver,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Get(ctx context.Context) (*Config, error) {
|
||||
conf, err := provider.resolver.Resolve(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot resolve configuration: %w", err)
|
||||
}
|
||||
|
||||
config, err := unmarshal(conf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal configuration: %w", err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
// Get returns the configuration for the given URI.
|
||||
Get(context.Context, Uri) (*Conf, error)
|
||||
// Scheme returns the scheme of the provider.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
87
pkg/config/resolver.go
Normal file
87
pkg/config/resolver.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ResolverConfig struct {
|
||||
// Each string or `uri` must follow "<scheme>:<value>" format. This format is compatible with the URI definition
|
||||
// defined at https://datatracker.ietf.org/doc/html/rfc3986".
|
||||
// It is required to have at least one uri.
|
||||
Uris []string
|
||||
|
||||
// ProviderFactories is a slice of Provider factories.
|
||||
// It is required to have at least one factory.
|
||||
ProviderFactories []ProviderFactory
|
||||
}
|
||||
|
||||
type Resolver struct {
|
||||
uris []Uri
|
||||
providers map[string]Provider
|
||||
}
|
||||
|
||||
func NewResolver(config ResolverConfig) (*Resolver, error) {
|
||||
if len(config.Uris) == 0 {
|
||||
return nil, errors.New("cannot build resolver, no uris have been provided")
|
||||
}
|
||||
|
||||
if len(config.ProviderFactories) == 0 {
|
||||
return nil, errors.New("cannot build resolver, no providers have been provided")
|
||||
}
|
||||
|
||||
uris := make([]Uri, len(config.Uris))
|
||||
for i, inputUri := range config.Uris {
|
||||
uri, err := NewUri(inputUri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uris[i] = uri
|
||||
}
|
||||
|
||||
providers := make(map[string]Provider, len(config.ProviderFactories))
|
||||
for _, factory := range config.ProviderFactories {
|
||||
provider := factory.New(ProviderConfig{})
|
||||
|
||||
scheme := provider.Scheme()
|
||||
// Check that the scheme is unique.
|
||||
if _, ok := providers[scheme]; ok {
|
||||
return nil, fmt.Errorf("cannot build resolver, duplicate scheme %q found", scheme)
|
||||
}
|
||||
|
||||
providers[provider.Scheme()] = provider
|
||||
}
|
||||
|
||||
return &Resolver{
|
||||
uris: uris,
|
||||
providers: providers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Do(ctx context.Context) (*Conf, error) {
|
||||
conf := NewConf()
|
||||
|
||||
for _, uri := range resolver.uris {
|
||||
currentConf, err := resolver.get(ctx, uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = conf.Merge(currentConf); err != nil {
|
||||
return nil, fmt.Errorf("cannot merge config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (resolver *Resolver) get(ctx context.Context, uri Uri) (*Conf, error) {
|
||||
provider, ok := resolver.providers[uri.scheme]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot find provider with schema %q", uri.scheme)
|
||||
}
|
||||
|
||||
return provider.Get(ctx, uri)
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
)
|
||||
|
||||
// unmarshal converts a confmap.Conf into a Config struct.
|
||||
// It splits the input confmap into a map of key-value pairs, fetches the corresponding
|
||||
// signozconfmap.Config interface by name, merges it with the default config, validates it,
|
||||
// and then creates a new confmap from the parsed map to unmarshal into the Config struct.
|
||||
func unmarshal(conf *confmap.Conf) (*Config, error) {
|
||||
raw := make(map[string]any)
|
||||
if err := conf.Unmarshal(&raw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsed := make(map[string]any)
|
||||
|
||||
// To help the defaults kick in, we need iterate over the default map instead of the raw values
|
||||
for k, v := range defaults {
|
||||
sub, err := conf.Sub(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read config for %q: %w", k, err)
|
||||
}
|
||||
|
||||
d := v.NewWithDefaults()
|
||||
if err := sub.Unmarshal(&d); err != nil {
|
||||
return nil, fmt.Errorf("cannot merge config for %q: %w", k, err)
|
||||
}
|
||||
|
||||
err = d.Validate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to validate config for for %q: %w", k, err)
|
||||
}
|
||||
|
||||
parsed[k] = d
|
||||
}
|
||||
|
||||
parsedConf := confmap.NewFromStringMap(parsed)
|
||||
config := new(Config)
|
||||
err := parsedConf.Unmarshal(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal config: %w", err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
)
|
||||
|
||||
func TestUnmarshalForInstrumentation(t *testing.T) {
|
||||
input := confmap.NewFromStringMap(
|
||||
map[string]any{
|
||||
"instrumentation": map[string]any{
|
||||
"logs": map[string]bool{
|
||||
"enabled": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
expected := &Config{
|
||||
Instrumentation: instrumentation.Config{
|
||||
Logs: instrumentation.LogsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg, err := unmarshal(input)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected.Instrumentation, cfg.Instrumentation)
|
||||
}
|
||||
46
pkg/config/uri.go
Normal file
46
pkg/config/uri.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
// uriRegex is a regex that matches the URI format. It complies with the URI definition defined at https://datatracker.ietf.org/doc/html/rfc3986.
|
||||
// The format is "<scheme>:<value>".
|
||||
uriRegex = regexp.MustCompile(`(?s:^(?P<Scheme>[A-Za-z][A-Za-z0-9+.-]+):(?P<Value>.*)$)`)
|
||||
)
|
||||
|
||||
type Uri struct {
|
||||
scheme string
|
||||
value string
|
||||
}
|
||||
|
||||
func NewUri(input string) (Uri, error) {
|
||||
submatches := uriRegex.FindStringSubmatch(input)
|
||||
|
||||
if len(submatches) != 3 {
|
||||
return Uri{}, fmt.Errorf("invalid uri: %q", input)
|
||||
}
|
||||
return Uri{
|
||||
scheme: submatches[1],
|
||||
value: submatches[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func MustNewUri(input string) Uri {
|
||||
uri, err := NewUri(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return uri
|
||||
}
|
||||
|
||||
func (uri Uri) Scheme() string {
|
||||
return uri.scheme
|
||||
}
|
||||
|
||||
func (uri Uri) Value() string {
|
||||
return uri.value
|
||||
}
|
||||
35
pkg/config/uri_test.go
Normal file
35
pkg/config/uri_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewUri(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expected Uri
|
||||
pass bool
|
||||
}{
|
||||
{input: "file:/path/1", expected: Uri{scheme: "file", value: "/path/1"}, pass: true},
|
||||
{input: "file:", expected: Uri{scheme: "file", value: ""}, pass: true},
|
||||
{input: "env:", expected: Uri{scheme: "env", value: ""}, pass: true},
|
||||
{input: "scheme", expected: Uri{}, pass: false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
uri, err := NewUri(tc.input)
|
||||
if !tc.pass {
|
||||
assert.Error(t, err)
|
||||
continue
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotPanics(t, func() { MustNewUri(tc.input) })
|
||||
assert.Equal(t, tc.expected, uri)
|
||||
assert.Equal(t, tc.expected.Scheme(), uri.scheme)
|
||||
assert.Equal(t, tc.expected.Value(), uri.value)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package confmap
|
||||
|
||||
// Config is an interface that defines methods for creating and validating configurations.
|
||||
type Config interface {
|
||||
// New creates a new instance of the configuration with default values.
|
||||
NewWithDefaults() Config
|
||||
// Validate the configuration and returns an error if invalid.
|
||||
Validate() error
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// Package confmap is a wrapper on top of the confmap defined here:
|
||||
// https://github.com/open-telemetry/opentelemetry-collector/blob/main/otelcol/configprovider.go/
|
||||
package confmap
|
||||
@@ -1,94 +0,0 @@
|
||||
package signozenvprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.uber.org/zap"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
schemeName string = "signozenv"
|
||||
envPrefix string = "signoz"
|
||||
separator string = "__"
|
||||
envPrefixWithOneSeparator string = "signoz_"
|
||||
envRegexString string = `^[a-zA-Z][a-zA-Z0-9_]*$`
|
||||
)
|
||||
|
||||
var (
|
||||
envRegex = regexp.MustCompile(envRegexString)
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewFactory returns a factory for a confmap.Provider that reads the configuration from the environment.
|
||||
// All variables starting with `SIGNOZ__` are read from the environment.
|
||||
// The separator is `__` (2 underscores) in order to incorporate env variables having keys with a single `_`
|
||||
func NewFactory() confmap.ProviderFactory {
|
||||
return confmap.NewProviderFactory(newProvider)
|
||||
}
|
||||
|
||||
func newProvider(settings confmap.ProviderSettings) confmap.Provider {
|
||||
return &provider{
|
||||
logger: settings.Logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) {
|
||||
if !strings.HasPrefix(uri, schemeName+":") {
|
||||
return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName)
|
||||
}
|
||||
|
||||
// Read and Sort environment variables for consistent output
|
||||
envvars := os.Environ()
|
||||
sort.Strings(envvars)
|
||||
|
||||
// Create a map m containing key value pairs
|
||||
m := make(map[string]any)
|
||||
for _, envvar := range envvars {
|
||||
parts := strings.SplitN(envvar, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.ToLower(parts[0])
|
||||
val := parts[1]
|
||||
|
||||
if strings.HasPrefix(key, envPrefixWithOneSeparator) {
|
||||
// Remove the envPrefix from the key
|
||||
key = strings.Replace(key, envPrefix+separator, "", 1)
|
||||
|
||||
// Check whether the resulting key matches with the regex
|
||||
if !envRegex.MatchString(key) {
|
||||
provider.logger.Warn("Configuration references invalid environment variable key", zap.String("key", key))
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert key into yaml format
|
||||
key = strings.ToLower(strings.ReplaceAll(key, separator, confmap.KeyDelimiter))
|
||||
m[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
out, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return confmap.NewRetrievedFromYAML(out)
|
||||
}
|
||||
|
||||
func (*provider) Scheme() string {
|
||||
return schemeName
|
||||
}
|
||||
|
||||
func (*provider) Shutdown(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package signozenvprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/collector/confmap"
|
||||
"go.opentelemetry.io/collector/confmap/confmaptest"
|
||||
)
|
||||
|
||||
func createProvider() confmap.Provider {
|
||||
return NewFactory().Create(confmaptest.NewNopProviderSettings())
|
||||
}
|
||||
|
||||
func TestValidateProviderScheme(t *testing.T) {
|
||||
assert.NoError(t, confmaptest.ValidateProviderScheme(createProvider()))
|
||||
}
|
||||
|
||||
func TestRetrieve(t *testing.T) {
|
||||
t.Setenv("SIGNOZ__STORAGE__DSN", "localhost:9000")
|
||||
t.Setenv("SIGNOZ__SIGNOZ_ENABLED", "true")
|
||||
t.Setenv("SIGNOZ__INSTRUMENTATION__LOGS__ENABLED", "true")
|
||||
expected := confmap.NewFromStringMap(map[string]any{
|
||||
"storage::dsn": "localhost:9000",
|
||||
"signoz_enabled": "true",
|
||||
"instrumentation::logs::enabled": "true",
|
||||
})
|
||||
|
||||
signoz := createProvider()
|
||||
retrieved, err := signoz.Retrieve(context.Background(), schemeName+":", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := retrieved.AsConf()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected.ToStringMap(), actual.ToStringMap())
|
||||
assert.NoError(t, signoz.Shutdown(context.Background()))
|
||||
}
|
||||
37
pkg/factory/config.go
Normal file
37
pkg/factory/config.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package factory
|
||||
|
||||
// Config is an interface that defines methods for creating and validating configurations.
|
||||
type Config interface {
|
||||
// Validate the configuration and returns an error if invalid.
|
||||
Validate() error
|
||||
}
|
||||
|
||||
// NewConfigFunc is a function that creates a new config.
|
||||
type NewConfigFunc func() Config
|
||||
|
||||
// ConfigFactory is a factory that creates a new config.
|
||||
type ConfigFactory interface {
|
||||
Named
|
||||
New() Config
|
||||
}
|
||||
|
||||
// configFactory is a factory that implements the ConfigFactory interface.
|
||||
type configFactory struct {
|
||||
name Name
|
||||
newConfigFunc NewConfigFunc
|
||||
}
|
||||
|
||||
// New creates a new config.
|
||||
func (factory *configFactory) Name() Name {
|
||||
return factory.name
|
||||
}
|
||||
|
||||
// New creates a new config.
|
||||
func (factory *configFactory) New() Config {
|
||||
return factory.newConfigFunc()
|
||||
}
|
||||
|
||||
// Creates a new config factory.
|
||||
func NewConfigFactory(name Name, f NewConfigFunc) ConfigFactory {
|
||||
return &configFactory{name: name, newConfigFunc: f}
|
||||
}
|
||||
@@ -1,12 +1,14 @@
|
||||
package registry
|
||||
package factorytest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
var _ NamedService = (*httpService)(nil)
|
||||
var _ factory.Service = (*httpService)(nil)
|
||||
|
||||
type httpService struct {
|
||||
Listener net.Listener
|
||||
@@ -14,15 +16,15 @@ type httpService struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func newHttpService(name string) (*httpService, error) {
|
||||
func NewHttpService(name string) (*httpService, error) {
|
||||
return &httpService{
|
||||
name: name,
|
||||
Server: &http.Server{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (service *httpService) Name() string {
|
||||
return service.name
|
||||
func (service *httpService) Name() factory.Name {
|
||||
return factory.MustNewName(service.name)
|
||||
}
|
||||
|
||||
func (service *httpService) Start(ctx context.Context) error {
|
||||
38
pkg/factory/name.go
Normal file
38
pkg/factory/name.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
// nameRegex is a regex that matches a valid name.
|
||||
// It must start with a alphabet, and can only contain alphabets, numbers, underscores or hyphens.
|
||||
nameRegex = regexp.MustCompile(`^[a-z][a-z0-9_-]{0,30}$`)
|
||||
)
|
||||
|
||||
type Name struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (n Name) String() string {
|
||||
return n.name
|
||||
}
|
||||
|
||||
// NewName creates a new name.
|
||||
func NewName(name string) (Name, error) {
|
||||
if !nameRegex.MatchString(name) {
|
||||
return Name{}, fmt.Errorf("invalid factory name %q", name)
|
||||
}
|
||||
return Name{name: name}, nil
|
||||
}
|
||||
|
||||
// MustNewName creates a new name.
|
||||
// It panics if the name is invalid.
|
||||
func MustNewName(name string) Name {
|
||||
n, err := NewName(name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
64
pkg/factory/named.go
Normal file
64
pkg/factory/named.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package factory
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Named is implemented by all types of factories.
|
||||
type Named interface {
|
||||
Name() Name
|
||||
}
|
||||
|
||||
type NamedMap[T Named] struct {
|
||||
factories map[Name]T
|
||||
factoriesInOrder []T
|
||||
}
|
||||
|
||||
func NewNamedMap[T Named](factories ...T) (NamedMap[T], error) {
|
||||
fmap := make(map[Name]T)
|
||||
for _, factory := range factories {
|
||||
if _, ok := fmap[factory.Name()]; ok {
|
||||
return NamedMap[T]{}, fmt.Errorf("cannot build factory map, duplicate name %q found", factory.Name())
|
||||
}
|
||||
|
||||
fmap[factory.Name()] = factory
|
||||
}
|
||||
|
||||
return NamedMap[T]{factories: fmap, factoriesInOrder: factories}, nil
|
||||
}
|
||||
|
||||
func MustNewNamedMap[T Named](factories ...T) NamedMap[T] {
|
||||
nm, err := NewNamedMap(factories...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nm
|
||||
}
|
||||
|
||||
func (n NamedMap[T]) Get(namestr string) (t T, err error) {
|
||||
name, err := NewName(namestr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
factory, ok := n.factories[name]
|
||||
if !ok {
|
||||
err = fmt.Errorf("factory %q not found or not registered", name)
|
||||
return
|
||||
}
|
||||
|
||||
t = factory
|
||||
return
|
||||
}
|
||||
|
||||
func (n NamedMap[T]) Add(factory T) (err error) {
|
||||
name := factory.Name()
|
||||
if _, ok := n.factories[name]; ok {
|
||||
return fmt.Errorf("factory %q already exists", name)
|
||||
}
|
||||
|
||||
n.factories[name] = factory
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n NamedMap[T]) GetInOrder() []T {
|
||||
return n.factoriesInOrder
|
||||
}
|
||||
48
pkg/factory/provider.go
Normal file
48
pkg/factory/provider.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package factory
|
||||
|
||||
import "context"
|
||||
|
||||
type Provider = any
|
||||
|
||||
// NewProviderFunc is a function that creates a new Provider.
|
||||
type NewProviderFunc[P Provider, C Config] func(context.Context, ProviderSettings, C) (P, error)
|
||||
|
||||
type ProviderFactory[P Provider, C Config] interface {
|
||||
Named
|
||||
New(context.Context, ProviderSettings, C) (P, error)
|
||||
}
|
||||
|
||||
type providerFactory[P Provider, C Config] struct {
|
||||
name Name
|
||||
newProviderFunc NewProviderFunc[P, C]
|
||||
}
|
||||
|
||||
func (factory *providerFactory[P, C]) Name() Name {
|
||||
return factory.name
|
||||
}
|
||||
|
||||
func (factory *providerFactory[P, C]) New(ctx context.Context, settings ProviderSettings, config C) (P, error) {
|
||||
return factory.newProviderFunc(ctx, settings, config)
|
||||
}
|
||||
|
||||
func NewProviderFactory[P Provider, C Config](name Name, newProviderFunc NewProviderFunc[P, C]) ProviderFactory[P, C] {
|
||||
return &providerFactory[P, C]{
|
||||
name: name,
|
||||
newProviderFunc: newProviderFunc,
|
||||
}
|
||||
}
|
||||
|
||||
func NewFromFactory[P Provider, C Config](ctx context.Context, settings ProviderSettings, config C, factories NamedMap[ProviderFactory[P, C]], key string) (p P, err error) {
|
||||
providerFactory, err := factories.Get(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
provider, err := providerFactory.New(ctx, settings, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p = provider
|
||||
return
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package registry
|
||||
package factory
|
||||
|
||||
import "context"
|
||||
|
||||
@@ -8,9 +8,3 @@ type Service interface {
|
||||
// Stops a service.
|
||||
Stop(context.Context) error
|
||||
}
|
||||
|
||||
type NamedService interface {
|
||||
// Identifier of a service. It should be unique across all services.
|
||||
Name() string
|
||||
Service
|
||||
}
|
||||
58
pkg/factory/setting.go
Normal file
58
pkg/factory/setting.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
sdkmetric "go.opentelemetry.io/otel/metric"
|
||||
sdktrace "go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ProviderSettings struct {
|
||||
// LoggerProvider is the otel logger.
|
||||
LoggerProvider sdklog.LoggerProvider
|
||||
// ZapLogger is the zap logger.
|
||||
ZapLogger *zap.Logger
|
||||
// MeterProvider is the meter provider.
|
||||
MeterProvider sdkmetric.MeterProvider
|
||||
// TracerProvider is the tracer provider.
|
||||
TracerProvider sdktrace.TracerProvider
|
||||
}
|
||||
|
||||
type ScopedProviderSettings interface {
|
||||
Logger() sdklog.Logger
|
||||
ZapLogger() *zap.Logger
|
||||
Meter() sdkmetric.Meter
|
||||
Tracer() sdktrace.Tracer
|
||||
}
|
||||
|
||||
type scoped struct {
|
||||
logger sdklog.Logger
|
||||
zapLogger *zap.Logger
|
||||
meter sdkmetric.Meter
|
||||
tracer sdktrace.Tracer
|
||||
}
|
||||
|
||||
func NewScopedProviderSettings(settings ProviderSettings, pkgName string) *scoped {
|
||||
return &scoped{
|
||||
logger: settings.LoggerProvider.Logger(pkgName),
|
||||
zapLogger: settings.ZapLogger.Named(pkgName),
|
||||
meter: settings.MeterProvider.Meter(pkgName),
|
||||
tracer: settings.TracerProvider.Tracer(pkgName),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scoped) Logger() sdklog.Logger {
|
||||
return s.logger
|
||||
}
|
||||
|
||||
func (s *scoped) ZapLogger() *zap.Logger {
|
||||
return s.zapLogger
|
||||
}
|
||||
|
||||
func (s *scoped) Meter() sdkmetric.Meter {
|
||||
return s.meter
|
||||
}
|
||||
|
||||
func (s *scoped) Tracer() sdktrace.Tracer {
|
||||
return s.tracer
|
||||
}
|
||||
@@ -1,12 +1,5 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/confmap"
|
||||
)
|
||||
|
||||
// Config satisfies the confmap.Config interface
|
||||
var _ confmap.Config = (*Config)(nil)
|
||||
|
||||
// Config holds the configuration for http.
|
||||
type Config struct {
|
||||
//Address specifies the TCP address for the server to listen on, in the form "host:port".
|
||||
@@ -15,7 +8,7 @@ type Config struct {
|
||||
Address string `mapstructure:"address"`
|
||||
}
|
||||
|
||||
func (c *Config) NewWithDefaults() confmap.Config {
|
||||
func (c *Config) NewWithDefaults() *Config {
|
||||
return &Config{
|
||||
Address: "0.0.0.0:8080",
|
||||
}
|
||||
|
||||
@@ -6,21 +6,21 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/registry"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var _ registry.NamedService = (*Server)(nil)
|
||||
var _ factory.Service = (*Server)(nil)
|
||||
|
||||
type Server struct {
|
||||
srv *http.Server
|
||||
logger *zap.Logger
|
||||
handler http.Handler
|
||||
cfg Config
|
||||
name string
|
||||
name factory.Name
|
||||
}
|
||||
|
||||
func New(logger *zap.Logger, name string, cfg Config, handler http.Handler) (*Server, error) {
|
||||
func New(logger *zap.Logger, name factory.Name, cfg Config, handler http.Handler) (*Server, error) {
|
||||
if handler == nil {
|
||||
return nil, fmt.Errorf("cannot build http server, handler is required")
|
||||
}
|
||||
@@ -29,10 +29,6 @@ func New(logger *zap.Logger, name string, cfg Config, handler http.Handler) (*Se
|
||||
return nil, fmt.Errorf("cannot build http server, logger is required")
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("cannot build http server, name is required")
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: cfg.Address,
|
||||
Handler: handler,
|
||||
@@ -50,7 +46,7 @@ func New(logger *zap.Logger, name string, cfg Config, handler http.Handler) (*Se
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (server *Server) Name() string {
|
||||
func (server *Server) Name() factory.Name {
|
||||
return server.name
|
||||
}
|
||||
|
||||
|
||||
@@ -2,13 +2,10 @@ package instrumentation
|
||||
|
||||
import (
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
"go.signoz.io/signoz/pkg/confmap"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// Config satisfies the confmap.Config interface
|
||||
var _ confmap.Config = (*Config)(nil)
|
||||
|
||||
// Config holds the configuration for all instrumentation components.
|
||||
type Config struct {
|
||||
Logs LogsConfig `mapstructure:"logs"`
|
||||
@@ -24,39 +21,69 @@ type Resource struct {
|
||||
|
||||
// LogsConfig holds the configuration for the logging component.
|
||||
type LogsConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
Level zapcore.Level `mapstructure:"level"`
|
||||
contribsdkconfig.LoggerProvider `mapstructure:",squash"`
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
Level zapcore.Level `mapstructure:"level"`
|
||||
Processors LogsProcessors `mapstructure:"processors"`
|
||||
}
|
||||
|
||||
type LogsProcessors struct {
|
||||
Batch contribsdkconfig.BatchLogRecordProcessor `mapstructure:"batch"`
|
||||
}
|
||||
|
||||
// TracesConfig holds the configuration for the tracing component.
|
||||
type TracesConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
contribsdkconfig.TracerProvider `mapstructure:",squash"`
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
Processors TracesProcessors `mapstructure:"processors"`
|
||||
Sampler contribsdkconfig.Sampler `mapstructure:"sampler"`
|
||||
}
|
||||
|
||||
type TracesProcessors struct {
|
||||
Batch contribsdkconfig.BatchSpanProcessor `mapstructure:"batch"`
|
||||
}
|
||||
|
||||
// MetricsConfig holds the configuration for the metrics component.
|
||||
type MetricsConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
contribsdkconfig.MeterProvider `mapstructure:",squash"`
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
Readers MetricsReaders `mapstructure:"readers"`
|
||||
}
|
||||
|
||||
func (c *Config) NewWithDefaults() confmap.Config {
|
||||
return &Config{
|
||||
type MetricsReaders struct {
|
||||
Pull contribsdkconfig.PullMetricReader `mapstructure:"pull"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("instrumentation"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
host := "0.0.0.0"
|
||||
port := 9090
|
||||
|
||||
return Config{
|
||||
Logs: LogsConfig{
|
||||
Enabled: false,
|
||||
Level: zapcore.InfoLevel,
|
||||
Level: zapcore.DebugLevel,
|
||||
},
|
||||
Traces: TracesConfig{
|
||||
Enabled: false,
|
||||
},
|
||||
Metrics: MetricsConfig{
|
||||
Enabled: false,
|
||||
Enabled: true,
|
||||
Readers: MetricsReaders{
|
||||
Pull: contribsdkconfig.PullMetricReader{
|
||||
Exporter: contribsdkconfig.MetricExporter{
|
||||
Prometheus: &contribsdkconfig.Prometheus{
|
||||
Host: &host,
|
||||
Port: &port,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
func (c Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package instrumentation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
@@ -10,21 +9,31 @@ import (
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
sdktrace "go.opentelemetry.io/otel/trace"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Instrumentation holds the core components for application instrumentation.
|
||||
type Instrumentation struct {
|
||||
LoggerProvider sdklog.LoggerProvider
|
||||
Logger *zap.Logger
|
||||
MeterProvider sdkmetric.MeterProvider
|
||||
TracerProvider sdktrace.TracerProvider
|
||||
var _ factory.Service = (*SDK)(nil)
|
||||
var _ Instrumentation = (*SDK)(nil)
|
||||
|
||||
type Instrumentation interface {
|
||||
LoggerProvider() sdklog.LoggerProvider
|
||||
Logger() *zap.Logger
|
||||
MeterProvider() sdkmetric.MeterProvider
|
||||
TracerProvider() sdktrace.TracerProvider
|
||||
ToProviderSettings() factory.ProviderSettings
|
||||
}
|
||||
|
||||
// SDK holds the core components for application instrumentation.
|
||||
type SDK struct {
|
||||
sdk contribsdkconfig.SDK
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// New creates a new Instrumentation instance with configured providers.
|
||||
// It sets up logging, tracing, and metrics based on the provided configuration.
|
||||
func New(ctx context.Context, build version.Build, cfg Config) (*Instrumentation, error) {
|
||||
func New(ctx context.Context, build version.Build, cfg Config) (*SDK, error) {
|
||||
// Set default resource attributes if not provided
|
||||
if cfg.Resource.Attributes == nil {
|
||||
cfg.Resource.Attributes = map[string]any{
|
||||
@@ -55,29 +64,86 @@ func New(ctx context.Context, build version.Build, cfg Config) (*Instrumentation
|
||||
SchemaUrl: &sch,
|
||||
}
|
||||
|
||||
loggerProvider, err := newLoggerProvider(ctx, cfg, configResource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create logger provider: %w", err)
|
||||
var loggerProvider *contribsdkconfig.LoggerProvider
|
||||
if cfg.Logs.Enabled {
|
||||
loggerProvider = &contribsdkconfig.LoggerProvider{
|
||||
Processors: []contribsdkconfig.LogRecordProcessor{
|
||||
{Batch: &cfg.Logs.Processors.Batch},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tracerProvider, err := newTracerProvider(ctx, cfg, configResource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create tracer provider: %w", err)
|
||||
var tracerProvider *contribsdkconfig.TracerProvider
|
||||
if cfg.Traces.Enabled {
|
||||
tracerProvider = &contribsdkconfig.TracerProvider{
|
||||
Processors: []contribsdkconfig.SpanProcessor{
|
||||
{Batch: &cfg.Traces.Processors.Batch},
|
||||
},
|
||||
Sampler: &cfg.Traces.Sampler,
|
||||
}
|
||||
}
|
||||
|
||||
meterProvider, err := newMeterProvider(ctx, cfg, configResource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create meter provider: %w", err)
|
||||
var meterProvider *contribsdkconfig.MeterProvider
|
||||
if cfg.Metrics.Enabled {
|
||||
meterProvider = &contribsdkconfig.MeterProvider{
|
||||
Readers: []contribsdkconfig.MetricReader{
|
||||
{Pull: &cfg.Metrics.Readers.Pull},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return &Instrumentation{
|
||||
LoggerProvider: loggerProvider,
|
||||
TracerProvider: tracerProvider,
|
||||
MeterProvider: meterProvider,
|
||||
Logger: newLogger(cfg, loggerProvider),
|
||||
sdk, err := contribsdkconfig.NewSDK(
|
||||
contribsdkconfig.WithContext(ctx),
|
||||
contribsdkconfig.WithOpenTelemetryConfiguration(contribsdkconfig.OpenTelemetryConfiguration{
|
||||
LoggerProvider: loggerProvider,
|
||||
TracerProvider: tracerProvider,
|
||||
MeterProvider: meterProvider,
|
||||
Resource: &configResource,
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SDK{
|
||||
sdk: sdk,
|
||||
logger: newLogger(cfg, sdk.LoggerProvider()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *SDK) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *SDK) Stop(ctx context.Context) error {
|
||||
return i.sdk.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (i *SDK) LoggerProvider() sdklog.LoggerProvider {
|
||||
return i.sdk.LoggerProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) Logger() *zap.Logger {
|
||||
return i.logger
|
||||
}
|
||||
|
||||
func (i *SDK) MeterProvider() sdkmetric.MeterProvider {
|
||||
return i.sdk.MeterProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) TracerProvider() sdktrace.TracerProvider {
|
||||
return i.sdk.TracerProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) ToProviderSettings() factory.ProviderSettings {
|
||||
return factory.ProviderSettings{
|
||||
LoggerProvider: i.LoggerProvider(),
|
||||
ZapLogger: i.Logger(),
|
||||
MeterProvider: i.MeterProvider(),
|
||||
TracerProvider: i.TracerProvider(),
|
||||
}
|
||||
}
|
||||
|
||||
// attributes merges the input attributes with the resource attributes.
|
||||
func attributes(input map[string]any, resource *sdkresource.Resource) map[string]any {
|
||||
output := make(map[string]any)
|
||||
|
||||
54
pkg/instrumentation/instrumentationtest/instrumentation.go
Normal file
54
pkg/instrumentation/instrumentationtest/instrumentation.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package instrumentationtest
|
||||
|
||||
import (
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
nooplog "go.opentelemetry.io/otel/log/noop"
|
||||
sdkmetric "go.opentelemetry.io/otel/metric"
|
||||
noopmetric "go.opentelemetry.io/otel/metric/noop"
|
||||
sdktrace "go.opentelemetry.io/otel/trace"
|
||||
nooptrace "go.opentelemetry.io/otel/trace/noop"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type noopInstrumentation struct {
|
||||
logger *zap.Logger
|
||||
loggerProvider sdklog.LoggerProvider
|
||||
meterProvider sdkmetric.MeterProvider
|
||||
tracerProvider sdktrace.TracerProvider
|
||||
}
|
||||
|
||||
func New() instrumentation.Instrumentation {
|
||||
return &noopInstrumentation{
|
||||
logger: zap.NewNop(),
|
||||
loggerProvider: nooplog.NewLoggerProvider(),
|
||||
meterProvider: noopmetric.NewMeterProvider(),
|
||||
tracerProvider: nooptrace.NewTracerProvider(),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *noopInstrumentation) LoggerProvider() sdklog.LoggerProvider {
|
||||
return i.loggerProvider
|
||||
}
|
||||
|
||||
func (i *noopInstrumentation) Logger() *zap.Logger {
|
||||
return i.logger
|
||||
}
|
||||
|
||||
func (i *noopInstrumentation) MeterProvider() sdkmetric.MeterProvider {
|
||||
return i.meterProvider
|
||||
}
|
||||
|
||||
func (i *noopInstrumentation) TracerProvider() sdktrace.TracerProvider {
|
||||
return i.tracerProvider
|
||||
}
|
||||
|
||||
func (i *noopInstrumentation) ToProviderSettings() factory.ProviderSettings {
|
||||
return factory.ProviderSettings{
|
||||
LoggerProvider: i.LoggerProvider(),
|
||||
ZapLogger: i.Logger(),
|
||||
MeterProvider: i.MeterProvider(),
|
||||
TracerProvider: i.TracerProvider(),
|
||||
}
|
||||
}
|
||||
@@ -1,38 +1,14 @@
|
||||
package instrumentation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"go.opentelemetry.io/contrib/bridges/otelzap"
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
nooplog "go.opentelemetry.io/otel/log/noop"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// newLoggerProvider creates a new logger provider based on the configuration.
|
||||
// If logging is disabled, it returns a no-op logger provider.
|
||||
func newLoggerProvider(ctx context.Context, cfg Config, cfgResource contribsdkconfig.Resource) (sdklog.LoggerProvider, error) {
|
||||
if !cfg.Logs.Enabled {
|
||||
return nooplog.NewLoggerProvider(), nil
|
||||
}
|
||||
|
||||
sdk, err := contribsdkconfig.NewSDK(
|
||||
contribsdkconfig.WithContext(ctx),
|
||||
contribsdkconfig.WithOpenTelemetryConfiguration(contribsdkconfig.OpenTelemetryConfiguration{
|
||||
LoggerProvider: &cfg.Logs.LoggerProvider,
|
||||
Resource: &cfgResource,
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sdk.LoggerProvider(), nil
|
||||
}
|
||||
|
||||
// newLogger creates a new Zap logger with the configured level and output.
|
||||
// It combines a JSON encoder for stdout and an OpenTelemetry bridge.
|
||||
func newLogger(cfg Config, provider sdklog.LoggerProvider) *zap.Logger {
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
package instrumentation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
sdkmetric "go.opentelemetry.io/otel/metric"
|
||||
noopmetric "go.opentelemetry.io/otel/metric/noop"
|
||||
)
|
||||
|
||||
// newMeterProvider creates a new meter provider based on the configuration.
|
||||
// If metrics are disabled, it returns a no-op meter provider.
|
||||
func newMeterProvider(ctx context.Context, cfg Config, cfgResource contribsdkconfig.Resource) (sdkmetric.MeterProvider, error) {
|
||||
if !cfg.Metrics.Enabled {
|
||||
return noopmetric.NewMeterProvider(), nil
|
||||
}
|
||||
|
||||
sdk, err := contribsdkconfig.NewSDK(
|
||||
contribsdkconfig.WithContext(ctx),
|
||||
contribsdkconfig.WithOpenTelemetryConfiguration(contribsdkconfig.OpenTelemetryConfiguration{
|
||||
MeterProvider: &cfg.Metrics.MeterProvider,
|
||||
Resource: &cfgResource,
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sdk.MeterProvider(), nil
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package instrumentation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
sdktrace "go.opentelemetry.io/otel/trace"
|
||||
nooptrace "go.opentelemetry.io/otel/trace/noop"
|
||||
)
|
||||
|
||||
// newTracerProvider creates a new tracer provider based on the configuration.
|
||||
// If tracing is disabled, it returns a no-op tracer provider.
|
||||
func newTracerProvider(ctx context.Context, cfg Config, cfgResource contribsdkconfig.Resource) (sdktrace.TracerProvider, error) {
|
||||
if !cfg.Traces.Enabled {
|
||||
return nooptrace.NewTracerProvider(), nil
|
||||
}
|
||||
|
||||
sdk, err := contribsdkconfig.NewSDK(
|
||||
contribsdkconfig.WithContext(ctx),
|
||||
contribsdkconfig.WithOpenTelemetryConfiguration(contribsdkconfig.OpenTelemetryConfiguration{
|
||||
TracerProvider: &cfg.Traces.TracerProvider,
|
||||
Resource: &cfgResource,
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sdk.TracerProvider(), nil
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf/sqlite"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
@@ -19,15 +18,6 @@ type Repo struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func (r *Repo) initDB(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) GetConfigHistory(
|
||||
ctx context.Context, typ ElementTypeDef, limit int,
|
||||
) ([]ConfigVersion, *model.ApiError) {
|
||||
|
||||
@@ -39,8 +39,7 @@ type Manager struct {
|
||||
}
|
||||
|
||||
type ManagerOptions struct {
|
||||
DB *sqlx.DB
|
||||
DBEngine string
|
||||
DB *sqlx.DB
|
||||
|
||||
// When acting as opamp.AgentConfigProvider, agent conf recommendations are
|
||||
// applied to the base conf in the order the features have been specified here.
|
||||
@@ -66,10 +65,6 @@ func Initiate(options *ManagerOptions) (*Manager, error) {
|
||||
configSubscribers: map[string]func(){},
|
||||
}
|
||||
|
||||
err := m.initDB(options.DBEngine)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not init agentConf db")
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS agent_config_versions(
|
||||
id TEXT PRIMARY KEY,
|
||||
created_by TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_by TEXT,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
version INTEGER DEFAULT 1,
|
||||
active int,
|
||||
is_valid int,
|
||||
disabled int,
|
||||
element_type VARCHAR(120) NOT NULL,
|
||||
deploy_status VARCHAR(80) NOT NULL DEFAULT 'DIRTY',
|
||||
deploy_sequence INTEGER,
|
||||
deploy_result TEXT,
|
||||
last_hash TEXT,
|
||||
last_config TEXT,
|
||||
UNIQUE(element_type, version)
|
||||
);
|
||||
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS agent_config_versions_u1
|
||||
ON agent_config_versions(element_type, version);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS agent_config_versions_nu1
|
||||
ON agent_config_versions(last_hash);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agent_config_elements(
|
||||
id TEXT PRIMARY KEY,
|
||||
created_by TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_by TEXT,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
element_id TEXT NOT NULL,
|
||||
element_type VARCHAR(120) NOT NULL,
|
||||
version_id TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS agent_config_elements_u1
|
||||
ON agent_config_elements(version_id, element_id, element_type);
|
||||
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error in creating agent config tables")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2640,7 +2640,7 @@ func (r *ClickHouseReader) GetLogsInfoInLastHeartBeatInterval(ctx context.Contex
|
||||
|
||||
var totalLogLines uint64
|
||||
|
||||
queryStr := fmt.Sprintf("select count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d))*1000000000;", r.logsDB, r.logsTable, int(interval.Minutes()))
|
||||
queryStr := fmt.Sprintf("select count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d))*1000000000;", r.logsDB, r.logsTableV2, int(interval.Minutes()))
|
||||
|
||||
err := r.db.QueryRow(ctx, queryStr).Scan(&totalLogLines)
|
||||
|
||||
|
||||
5
pkg/query-service/app/cloudintegrations/Readme.md
Normal file
5
pkg/query-service/app/cloudintegrations/Readme.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# SigNoz Cloud Integrations
|
||||
|
||||
Cloud integrations are unlike the rest of SigNoz integrations.
|
||||
They have a different UX and so require a different API.
|
||||
They will also be limited in number and are not expected to have community contributed implementations
|
||||
247
pkg/query-service/app/cloudintegrations/controller.go
Normal file
247
pkg/query-service/app/cloudintegrations/controller.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
var SupportedCloudProviders = []string{
|
||||
"aws",
|
||||
}
|
||||
|
||||
func validateCloudProviderName(name string) *model.ApiError {
|
||||
if !slices.Contains(SupportedCloudProviders, name) {
|
||||
return model.BadRequest(fmt.Errorf("invalid cloud provider: %s", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Controller struct {
|
||||
repo cloudProviderAccountsRepository
|
||||
}
|
||||
|
||||
func NewController(db *sqlx.DB) (
|
||||
*Controller, error,
|
||||
) {
|
||||
repo, err := newCloudProviderAccountsRepository(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
|
||||
}
|
||||
|
||||
return &Controller{
|
||||
repo: repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config AccountConfig `json:"config"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type ConnectedAccountsListResponse struct {
|
||||
Accounts []Account `json:"accounts"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListConnectedAccounts(
|
||||
ctx context.Context, cloudProvider string,
|
||||
) (
|
||||
*ConnectedAccountsListResponse, *model.ApiError,
|
||||
) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecords, apiErr := c.repo.listConnected(ctx, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
||||
}
|
||||
|
||||
connectedAccounts := []Account{}
|
||||
for _, a := range accountRecords {
|
||||
connectedAccounts = append(connectedAccounts, a.account())
|
||||
}
|
||||
|
||||
return &ConnectedAccountsListResponse{
|
||||
Accounts: connectedAccounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type GenerateConnectionUrlRequest struct {
|
||||
// Optional. To be specified for updates.
|
||||
AccountId *string `json:"account_id,omitempty"`
|
||||
|
||||
AccountConfig AccountConfig `json:"account_config"`
|
||||
|
||||
AgentConfig SigNozAgentConfig `json:"agent_config"`
|
||||
}
|
||||
|
||||
type SigNozAgentConfig struct {
|
||||
// The region in which SigNoz agent should be installed.
|
||||
Region string `json:"region"`
|
||||
}
|
||||
|
||||
type GenerateConnectionUrlResponse struct {
|
||||
AccountId string `json:"account_id"`
|
||||
ConnectionUrl string `json:"connection_url"`
|
||||
}
|
||||
|
||||
func (c *Controller) GenerateConnectionUrl(
|
||||
ctx context.Context, cloudProvider string, req GenerateConnectionUrlRequest,
|
||||
) (*GenerateConnectionUrlResponse, *model.ApiError) {
|
||||
// Account connection with a simple connection URL may not be available for all providers.
|
||||
if cloudProvider != "aws" {
|
||||
return nil, model.BadRequest(fmt.Errorf("unsupported cloud provider: %s", cloudProvider))
|
||||
}
|
||||
|
||||
account, apiErr := c.repo.upsert(
|
||||
ctx, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
// TODO(Raj): Add actual cloudformation template for AWS integration after it has been shipped.
|
||||
connectionUrl := fmt.Sprintf(
|
||||
"https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?stackName=SigNozIntegration/",
|
||||
req.AgentConfig.Region, req.AgentConfig.Region,
|
||||
)
|
||||
|
||||
return &GenerateConnectionUrlResponse{
|
||||
AccountId: account.Id,
|
||||
ConnectionUrl: connectionUrl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type AccountStatusResponse struct {
|
||||
Id string `json:"id"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Controller) GetAccountStatus(
|
||||
ctx context.Context, cloudProvider string, accountId string,
|
||||
) (
|
||||
*AccountStatusResponse, *model.ApiError,
|
||||
) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.repo.get(ctx, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
resp := AccountStatusResponse{
|
||||
Id: account.Id,
|
||||
Status: account.status(),
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
type AgentCheckInRequest struct {
|
||||
AccountId string `json:"account_id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
// Arbitrary cloud specific Agent data
|
||||
Data map[string]any `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type AgentCheckInResponse struct {
|
||||
Account AccountRecord `json:"account"`
|
||||
}
|
||||
|
||||
func (c *Controller) CheckInAsAgent(
|
||||
ctx context.Context, cloudProvider string, req AgentCheckInRequest,
|
||||
) (*AgentCheckInResponse, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
existingAccount, apiErr := c.repo.get(ctx, cloudProvider, req.AccountId)
|
||||
if existingAccount != nil && existingAccount.CloudAccountId != nil && *existingAccount.CloudAccountId != req.CloudAccountId {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||
cloudProvider, req.CloudAccountId, existingAccount.Id, cloudProvider, *existingAccount.CloudAccountId,
|
||||
))
|
||||
}
|
||||
|
||||
existingAccount, apiErr = c.repo.getConnectedCloudAccount(ctx, cloudProvider, req.CloudAccountId)
|
||||
if existingAccount != nil && existingAccount.Id != req.AccountId {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||
cloudProvider, req.CloudAccountId, req.AccountId, existingAccount.Id,
|
||||
))
|
||||
}
|
||||
|
||||
agentReport := AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: req.Data,
|
||||
}
|
||||
|
||||
account, apiErr := c.repo.upsert(
|
||||
ctx, cloudProvider, &req.AccountId, nil, &req.CloudAccountId, &agentReport, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
return &AgentCheckInResponse{
|
||||
Account: *account,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type UpdateAccountConfigRequest struct {
|
||||
Config AccountConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateAccountConfig(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
accountId string,
|
||||
req UpdateAccountConfigRequest,
|
||||
) (*Account, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecord, apiErr := c.repo.upsert(
|
||||
ctx, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
account := accountRecord.account()
|
||||
|
||||
return &account, nil
|
||||
}
|
||||
|
||||
func (c *Controller) DisconnectAccount(
|
||||
ctx context.Context, cloudProvider string, accountId string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.repo.get(ctx, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
tsNow := time.Now()
|
||||
account, apiErr = c.repo.upsert(
|
||||
ctx, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
153
pkg/query-service/app/cloudintegrations/controller_test.go
Normal file
153
pkg/query-service/app/cloudintegrations/controller_test.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
)
|
||||
|
||||
func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
// should be able to generate connection url for
|
||||
// same account id again with updated config
|
||||
testAccountConfig1 := AccountConfig{EnabledRegions: []string{"us-east-1", "us-west-1"}}
|
||||
resp1, apiErr := controller.GenerateConnectionUrl(
|
||||
context.TODO(), "aws", GenerateConnectionUrlRequest{
|
||||
AccountConfig: testAccountConfig1,
|
||||
AgentConfig: SigNozAgentConfig{Region: "us-east-2"},
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.NotEmpty(resp1.ConnectionUrl)
|
||||
require.NotEmpty(resp1.AccountId)
|
||||
|
||||
testAccountId := resp1.AccountId
|
||||
account, apiErr := controller.repo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountConfig1, *account.Config)
|
||||
|
||||
testAccountConfig2 := AccountConfig{EnabledRegions: []string{"us-east-2", "us-west-2"}}
|
||||
resp2, apiErr := controller.GenerateConnectionUrl(
|
||||
context.TODO(), "aws", GenerateConnectionUrlRequest{
|
||||
AccountId: &testAccountId,
|
||||
AccountConfig: testAccountConfig2,
|
||||
AgentConfig: SigNozAgentConfig{Region: "us-east-2"},
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountId, resp2.AccountId)
|
||||
|
||||
account, apiErr = controller.repo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountConfig2, *account.Config)
|
||||
}
|
||||
|
||||
func TestAgentCheckIns(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
// An agent should be able to check in from a cloud account even
|
||||
// if no connection url was requested (no account with agent's account id exists)
|
||||
testAccountId1 := uuid.NewString()
|
||||
testCloudAccountId1 := "546311234"
|
||||
resp1, apiErr := controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId1,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountId1, resp1.Account.Id)
|
||||
require.Equal(testCloudAccountId1, *resp1.Account.CloudAccountId)
|
||||
|
||||
// The agent should not be able to check in with a different
|
||||
// cloud account id for the same account.
|
||||
testCloudAccountId2 := "99999999"
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId1,
|
||||
CloudAccountId: testCloudAccountId2,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
|
||||
// The agent should not be able to check-in with a particular cloud account id
|
||||
// if another connected AccountRecord exists for same cloud account
|
||||
// i.e. there can't be 2 connected account records for the same cloud account id
|
||||
// at any point in time.
|
||||
existingConnected, apiErr := controller.repo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.NotNil(existingConnected)
|
||||
require.Equal(testCloudAccountId1, *existingConnected.CloudAccountId)
|
||||
require.Nil(existingConnected.RemovedAt)
|
||||
|
||||
testAccountId2 := uuid.NewString()
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
|
||||
// After disconnecting existing account record, the agent should be able to
|
||||
// connected for a particular cloud account id
|
||||
_, apiErr = controller.DisconnectAccount(
|
||||
context.TODO(), "aws", testAccountId1,
|
||||
)
|
||||
|
||||
existingConnected, apiErr = controller.repo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(existingConnected)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
// should be able to keep checking in
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
}
|
||||
|
||||
func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
// Attempting to disconnect a non-existent account should return error
|
||||
account, apiErr := controller.DisconnectAccount(
|
||||
context.TODO(), "aws", uuid.NewString(),
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
require.Nil(account)
|
||||
}
|
||||
117
pkg/query-service/app/cloudintegrations/model.go
Normal file
117
pkg/query-service/app/cloudintegrations/model.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Represents a cloud provider account for cloud integrations
|
||||
type AccountRecord struct {
|
||||
CloudProvider string `json:"cloud_provider" db:"cloud_provider"`
|
||||
Id string `json:"id" db:"id"`
|
||||
Config *AccountConfig `json:"config" db:"config_json"`
|
||||
CloudAccountId *string `json:"cloud_account_id" db:"cloud_account_id"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" db:"last_agent_report_json"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
RemovedAt *time.Time `json:"removed_at" db:"removed_at"`
|
||||
}
|
||||
|
||||
type AccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
func DefaultAccountConfig() AccountConfig {
|
||||
return AccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *AccountConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud account config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &r)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize agent report to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func (a *AccountRecord) status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *AccountRecord) account() Account {
|
||||
ca := Account{Id: a.Id, Status: a.status()}
|
||||
|
||||
if a.CloudAccountId != nil {
|
||||
ca.CloudAccountId = *a.CloudAccountId
|
||||
}
|
||||
|
||||
if a.Config != nil {
|
||||
ca.Config = *a.Config
|
||||
} else {
|
||||
ca.Config = DefaultAccountConfig()
|
||||
}
|
||||
|
||||
return ca
|
||||
}
|
||||
239
pkg/query-service/app/cloudintegrations/repo.go
Normal file
239
pkg/query-service/app/cloudintegrations/repo.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type cloudProviderAccountsRepository interface {
|
||||
listConnected(ctx context.Context, cloudProvider string) ([]AccountRecord, *model.ApiError)
|
||||
|
||||
get(ctx context.Context, cloudProvider string, id string) (*AccountRecord, *model.ApiError)
|
||||
|
||||
getConnectedCloudAccount(
|
||||
ctx context.Context, cloudProvider string, cloudAccountId string,
|
||||
) (*AccountRecord, *model.ApiError)
|
||||
|
||||
// Insert an account or update it by (cloudProvider, id)
|
||||
// for specified non-empty fields
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
id *string,
|
||||
config *AccountConfig,
|
||||
cloudAccountId *string,
|
||||
agentReport *AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*AccountRecord, *model.ApiError)
|
||||
}
|
||||
|
||||
func newCloudProviderAccountsRepository(db *sqlx.DB) (
|
||||
*cloudProviderAccountsSQLRepository, error,
|
||||
) {
|
||||
return &cloudProviderAccountsSQLRepository{
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type cloudProviderAccountsSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
ctx context.Context, cloudProvider string,
|
||||
) ([]AccountRecord, *model.ApiError) {
|
||||
accounts := []AccountRecord{}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &accounts, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and removed_at is NULL
|
||||
and cloud_account_id is not NULL
|
||||
and last_agent_report_json is not NULL
|
||||
order by created_at
|
||||
`, cloudProvider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query connected cloud accounts: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) get(
|
||||
ctx context.Context, cloudProvider string, id string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
var result AccountRecord
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and id=$2
|
||||
`,
|
||||
cloudProvider, id,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find account with Id %s", id,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud provider accounts: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
ctx context.Context, cloudProvider string, cloudAccountId string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
var result AccountRecord
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
and last_agent_report_json is not NULL
|
||||
and removed_at is NULL
|
||||
`,
|
||||
cloudProvider, cloudAccountId,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find connected cloud account %s", cloudAccountId,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud provider accounts: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
id *string,
|
||||
config *AccountConfig,
|
||||
cloudAccountId *string,
|
||||
agentReport *AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
// Insert
|
||||
if id == nil {
|
||||
newId := uuid.NewString()
|
||||
id = &newId
|
||||
}
|
||||
|
||||
// Prepare clause for setting values in `on conflict do update`
|
||||
onConflictSetStmts := []string{}
|
||||
setColStatement := func(col string) string {
|
||||
return fmt.Sprintf("%s=excluded.%s", col, col)
|
||||
}
|
||||
|
||||
if config != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("config_json"),
|
||||
)
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("cloud_account_id"),
|
||||
)
|
||||
}
|
||||
|
||||
if agentReport != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("last_agent_report_json"),
|
||||
)
|
||||
}
|
||||
|
||||
if removedAt != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("removed_at"),
|
||||
)
|
||||
}
|
||||
|
||||
onConflictClause := ""
|
||||
if len(onConflictSetStmts) > 0 {
|
||||
onConflictClause = fmt.Sprintf(
|
||||
"on conflict(cloud_provider, id) do update SET\n%s",
|
||||
strings.Join(onConflictSetStmts, ",\n"),
|
||||
)
|
||||
}
|
||||
|
||||
insertQuery := fmt.Sprintf(`
|
||||
INSERT INTO cloud_integrations_accounts (
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
removed_at
|
||||
) values ($1, $2, $3, $4, $5, $6)
|
||||
%s`, onConflictClause,
|
||||
)
|
||||
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, insertQuery,
|
||||
cloudProvider, id, config, cloudAccountId, agentReport, removedAt,
|
||||
)
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud account record: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedAccount, apiErr := r.get(ctx, cloudProvider, *id)
|
||||
if apiErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
|
||||
))
|
||||
}
|
||||
|
||||
return upsertedAccount, nil
|
||||
}
|
||||
@@ -35,126 +35,10 @@ var (
|
||||
)
|
||||
|
||||
// InitDB sets up setting up the connection pool global variable.
|
||||
func InitDB(dataSourceName string) (*sqlx.DB, error) {
|
||||
var err error
|
||||
|
||||
db, err = sqlx.Open("sqlite3", dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS dashboards (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
uuid TEXT NOT NULL UNIQUE,
|
||||
created_at datetime NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating dashboard table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS rules (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
updated_at datetime NOT NULL,
|
||||
deleted INTEGER DEFAULT 0,
|
||||
data TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS notification_channels (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
created_at datetime NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
type TEXT NOT NULL,
|
||||
deleted INTEGER DEFAULT 0,
|
||||
data TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating notification_channles table: %s", err.Error())
|
||||
}
|
||||
|
||||
tableSchema := `CREATE TABLE IF NOT EXISTS planned_maintenance (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
alert_ids TEXT,
|
||||
schedule TEXT NOT NULL,
|
||||
created_at datetime NOT NULL,
|
||||
created_by TEXT NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
updated_by TEXT NOT NULL
|
||||
);`
|
||||
_, err = db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating planned_maintenance table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS ttl_status (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transaction_id TEXT NOT NULL,
|
||||
created_at datetime NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
table_name TEXT NOT NULL,
|
||||
ttl INTEGER DEFAULT 0,
|
||||
cold_storage_ttl INTEGER DEFAULT 0,
|
||||
status TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating ttl_status table: %s", err.Error())
|
||||
}
|
||||
|
||||
// sqlite does not support "IF NOT EXISTS"
|
||||
createdAt := `ALTER TABLE rules ADD COLUMN created_at datetime;`
|
||||
_, err = db.Exec(createdAt)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column created_at to rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
createdBy := `ALTER TABLE rules ADD COLUMN created_by TEXT;`
|
||||
_, err = db.Exec(createdBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column created_by to rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
updatedBy := `ALTER TABLE rules ADD COLUMN updated_by TEXT;`
|
||||
_, err = db.Exec(updatedBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column updated_by to rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
createdBy = `ALTER TABLE dashboards ADD COLUMN created_by TEXT;`
|
||||
_, err = db.Exec(createdBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column created_by to dashboards table: %s", err.Error())
|
||||
}
|
||||
|
||||
updatedBy = `ALTER TABLE dashboards ADD COLUMN updated_by TEXT;`
|
||||
_, err = db.Exec(updatedBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column updated_by to dashboards table: %s", err.Error())
|
||||
}
|
||||
|
||||
locked := `ALTER TABLE dashboards ADD COLUMN locked INTEGER DEFAULT 0;`
|
||||
_, err = db.Exec(locked)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column locked to dashboards table: %s", err.Error())
|
||||
}
|
||||
|
||||
// @deprecated
|
||||
func InitDB(inputDB *sqlx.DB) {
|
||||
db = inputDB
|
||||
telemetry.GetInstance().SetDashboardsInfoCallback(GetDashboardsInfo)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
@@ -470,7 +354,7 @@ func GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) {
|
||||
dashboardInfo := countPanelsInDashboard(dashboard.Data)
|
||||
dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels
|
||||
dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels
|
||||
dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels
|
||||
dashboardsInfo.MetricBasedPanels += dashboardInfo.MetricBasedPanels
|
||||
dashboardsInfo.LogsPanelsWithAttrContainsOp += dashboardInfo.LogsPanelsWithAttrContainsOp
|
||||
dashboardsInfo.DashboardsWithLogsChQuery += dashboardInfo.DashboardsWithLogsChQuery
|
||||
dashboardsInfo.DashboardsWithTraceChQuery += dashboardInfo.DashboardsWithTraceChQuery
|
||||
|
||||
@@ -33,41 +33,9 @@ type SavedView struct {
|
||||
ExtraData string `json:"extra_data" db:"extra_data"`
|
||||
}
|
||||
|
||||
// InitWithDSN sets up setting up the connection pool global variable.
|
||||
func InitWithDSN(dataSourceName string) (*sqlx.DB, error) {
|
||||
var err error
|
||||
|
||||
db, err = sqlx.Open("sqlite3", dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tableSchema := `CREATE TABLE IF NOT EXISTS saved_views (
|
||||
uuid TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
category TEXT NOT NULL,
|
||||
created_at datetime NOT NULL,
|
||||
created_by TEXT,
|
||||
updated_at datetime NOT NULL,
|
||||
updated_by TEXT,
|
||||
source_page TEXT NOT NULL,
|
||||
tags TEXT,
|
||||
data TEXT NOT NULL,
|
||||
extra_data TEXT
|
||||
);`
|
||||
|
||||
_, err = db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating saved views table: %s", err.Error())
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func InitWithDB(sqlDB *sqlx.DB) {
|
||||
db = sqlDB
|
||||
telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo)
|
||||
}
|
||||
|
||||
func GetViews() ([]*v3.SavedView, error) {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/inframetrics"
|
||||
@@ -102,6 +103,8 @@ type APIHandler struct {
|
||||
|
||||
IntegrationsController *integrations.Controller
|
||||
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
|
||||
// SetupCompleted indicates if SigNoz is ready for general use.
|
||||
@@ -155,6 +158,9 @@ type APIHandlerOpts struct {
|
||||
// Integrations
|
||||
IntegrationsController *integrations.Controller
|
||||
|
||||
// Cloud Provider Integrations
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
|
||||
// Log parsing pipelines
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
|
||||
@@ -226,6 +232,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
||||
ruleManager: opts.RuleManager,
|
||||
featureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
querier: querier,
|
||||
querierV2: querierv2,
|
||||
@@ -3867,6 +3874,157 @@ func (aH *APIHandler) UninstallIntegration(
|
||||
aH.Respond(w, map[string]interface{}{})
|
||||
}
|
||||
|
||||
// cloud provider integrations
|
||||
func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
subRouter := router.PathPrefix("/api/v1/cloud-integrations").Subrouter()
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionUrl),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/accounts", am.ViewAccess(aH.CloudIntegrationsListConnectedAccounts),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/accounts/{accountId}/status", am.ViewAccess(aH.CloudIntegrationsGetAccountStatus),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/accounts/{accountId}/config", am.EditAccess(aH.CloudIntegrationsUpdateAccountConfig),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/accounts/{accountId}/disconnect", am.EditAccess(aH.CloudIntegrationsDisconnectAccount),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/agent-check-in", am.EditAccess(aH.CloudIntegrationsAgentCheckIn),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
|
||||
r.Context(), cloudProvider,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
req := cloudintegrations.GenerateConnectionUrlRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
|
||||
r.Context(), cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
|
||||
r.Context(), cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
req := cloudintegrations.AgentCheckInRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.CheckInAsAgent(
|
||||
r.Context(), cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
req := cloudintegrations.UpdateAccountConfigRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
|
||||
r.Context(), cloudProvider, accountId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
|
||||
r.Context(), cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
// logs
|
||||
func (aH *APIHandler) RegisterLogsRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
subRouter := router.PathPrefix("/api/v1/logs").Subrouter()
|
||||
@@ -4504,8 +4662,8 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
|
||||
|
||||
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
signozLogsUsed, signozMetricsUsed, signozTracesUsed := telemetry.GetInstance().CheckSigNozSignals(queryRangeParams)
|
||||
if signozLogsUsed || signozMetricsUsed || signozTracesUsed {
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(queryRangeParams)
|
||||
if queryInfoResult.LogsUsed || queryInfoResult.MetricsUsed || queryInfoResult.TracesUsed {
|
||||
|
||||
if dashboardMatched {
|
||||
var dashboardID, widgetID string
|
||||
@@ -4531,13 +4689,18 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
|
||||
widgetID = widgetIDMatch[1]
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY, map[string]interface{}{
|
||||
"queryType": queryRangeParams.CompositeQuery.QueryType,
|
||||
"panelType": queryRangeParams.CompositeQuery.PanelType,
|
||||
"tracesUsed": signozTracesUsed,
|
||||
"logsUsed": signozLogsUsed,
|
||||
"metricsUsed": signozMetricsUsed,
|
||||
"dashboardId": dashboardID,
|
||||
"widgetId": widgetID,
|
||||
"queryType": queryRangeParams.CompositeQuery.QueryType,
|
||||
"panelType": queryRangeParams.CompositeQuery.PanelType,
|
||||
"tracesUsed": queryInfoResult.TracesUsed,
|
||||
"logsUsed": queryInfoResult.LogsUsed,
|
||||
"metricsUsed": queryInfoResult.MetricsUsed,
|
||||
"numberOfQueries": queryInfoResult.NumberOfQueries,
|
||||
"groupByApplied": queryInfoResult.GroupByApplied,
|
||||
"aggregateOperator": queryInfoResult.AggregateOperator,
|
||||
"aggregateAttributeKey": queryInfoResult.AggregateAttributeKey,
|
||||
"filterApplied": queryInfoResult.FilterApplied,
|
||||
"dashboardId": dashboardID,
|
||||
"widgetId": widgetID,
|
||||
}, userEmail, true, false)
|
||||
}
|
||||
if alertMatched {
|
||||
@@ -4554,12 +4717,17 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
|
||||
alertID = alertIDMatch[1]
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_SUCCESSFUL_ALERT_QUERY, map[string]interface{}{
|
||||
"queryType": queryRangeParams.CompositeQuery.QueryType,
|
||||
"panelType": queryRangeParams.CompositeQuery.PanelType,
|
||||
"tracesUsed": signozTracesUsed,
|
||||
"logsUsed": signozLogsUsed,
|
||||
"metricsUsed": signozMetricsUsed,
|
||||
"alertId": alertID,
|
||||
"queryType": queryRangeParams.CompositeQuery.QueryType,
|
||||
"panelType": queryRangeParams.CompositeQuery.PanelType,
|
||||
"tracesUsed": queryInfoResult.TracesUsed,
|
||||
"logsUsed": queryInfoResult.LogsUsed,
|
||||
"metricsUsed": queryInfoResult.MetricsUsed,
|
||||
"numberOfQueries": queryInfoResult.NumberOfQueries,
|
||||
"groupByApplied": queryInfoResult.GroupByApplied,
|
||||
"aggregateOperator": queryInfoResult.AggregateOperator,
|
||||
"aggregateAttributeKey": queryInfoResult.AggregateAttributeKey,
|
||||
"filterApplied": queryInfoResult.FilterApplied,
|
||||
"alertId": alertID,
|
||||
}, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,12 +123,7 @@ type Manager struct {
|
||||
}
|
||||
|
||||
func NewManager(db *sqlx.DB) (*Manager, error) {
|
||||
iiRepo, err := NewInstalledIntegrationsSqliteRepo(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not init sqlite DB for installed integrations: %w", err,
|
||||
)
|
||||
}
|
||||
iiRepo := NewInstalledIntegrationsSqliteRepo(db)
|
||||
|
||||
return &Manager{
|
||||
availableIntegrationsRepo: &BuiltInIntegrations{},
|
||||
|
||||
@@ -9,45 +9,14 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func InitSqliteDBIfNeeded(db *sqlx.DB) error {
|
||||
if db == nil {
|
||||
return fmt.Errorf("db is required")
|
||||
}
|
||||
|
||||
createTablesStatements := `
|
||||
CREATE TABLE IF NOT EXISTS integrations_installed(
|
||||
integration_id TEXT PRIMARY KEY,
|
||||
config_json TEXT,
|
||||
installed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`
|
||||
_, err := db.Exec(createTablesStatements)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"could not ensure integrations schema in sqlite DB: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type InstalledIntegrationsSqliteRepo struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) (
|
||||
*InstalledIntegrationsSqliteRepo, error,
|
||||
) {
|
||||
err := InitSqliteDBIfNeeded(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't ensure sqlite schema for installed integrations: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) *InstalledIntegrationsSqliteRepo {
|
||||
return &InstalledIntegrationsSqliteRepo{
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
|
||||
@@ -15,11 +15,7 @@ import (
|
||||
|
||||
func NewTestIntegrationsManager(t *testing.T) *Manager {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
|
||||
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not init sqlite DB for installed integrations: %v", err)
|
||||
}
|
||||
installedIntegrationsRepo := NewInstalledIntegrationsSqliteRepo(testDB)
|
||||
|
||||
return &Manager{
|
||||
availableIntegrationsRepo: &TestAvailableIntegrationsRepo{},
|
||||
|
||||
@@ -27,15 +27,13 @@ type LogParsingPipelineController struct {
|
||||
|
||||
func NewLogParsingPipelinesController(
|
||||
db *sqlx.DB,
|
||||
engine string,
|
||||
getIntegrationPipelines func(context.Context) ([]Pipeline, *model.ApiError),
|
||||
) (*LogParsingPipelineController, error) {
|
||||
repo := NewRepo(db)
|
||||
err := repo.InitDB(engine)
|
||||
return &LogParsingPipelineController{
|
||||
Repo: repo,
|
||||
GetIntegrationPipelines: getIntegrationPipelines,
|
||||
}, err
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PipelinesResponse is used to prepare http response for pipelines config related requests
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline/sqlite"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
@@ -29,15 +28,6 @@ func NewRepo(db *sqlx.DB) Repo {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) InitDB(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
|
||||
// insertPipeline stores a given postable pipeline to database
|
||||
func (r *Repo) insertPipeline(
|
||||
ctx context.Context, postable *PostablePipeline,
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS pipelines(
|
||||
id TEXT PRIMARY KEY,
|
||||
order_id INTEGER,
|
||||
enabled BOOLEAN,
|
||||
created_by TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
name VARCHAR(400) NOT NULL,
|
||||
alias VARCHAR(20) NOT NULL,
|
||||
description TEXT,
|
||||
filter TEXT NOT NULL,
|
||||
config_json TEXT
|
||||
);
|
||||
`
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error in creating pipelines table")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
func AddSecondaryAggregation(seriesAggregator v3.SecondaryAggregation, query string) string {
|
||||
queryImpl := "SELECT %s as aggregated_value, ts" +
|
||||
" FROM (%s)" +
|
||||
" GROUP BY ts" +
|
||||
" ORDER BY ts"
|
||||
|
||||
var op string
|
||||
switch seriesAggregator {
|
||||
case v3.SecondaryAggregationAvg:
|
||||
op = "avg(value)"
|
||||
query = fmt.Sprintf(queryImpl, op, query)
|
||||
case v3.SecondaryAggregationSum:
|
||||
op = "sum(value)"
|
||||
query = fmt.Sprintf(queryImpl, op, query)
|
||||
case v3.SecondaryAggregationMin:
|
||||
op = "min(value)"
|
||||
query = fmt.Sprintf(queryImpl, op, query)
|
||||
case v3.SecondaryAggregationMax:
|
||||
op = "max(value)"
|
||||
query = fmt.Sprintf(queryImpl, op, query)
|
||||
}
|
||||
return query
|
||||
}
|
||||
@@ -89,6 +89,10 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
|
||||
mq.SpaceAggregation = percentileOperator
|
||||
}
|
||||
|
||||
if panelType == v3.PanelTypeValue && len(mq.GroupBy) > 0 {
|
||||
query = helpers.AddSecondaryAggregation(mq.SecondaryAggregation, query)
|
||||
}
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -614,3 +614,149 @@ func TestPrepareMetricQueryGauge(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareMetricQueryValueTypePanelWithGroupBY(t *testing.T) {
|
||||
t.Setenv("USE_METRICS_PRE_AGGREGATION", "false")
|
||||
testCases := []struct {
|
||||
name string
|
||||
builderQuery *v3.BuilderQuery
|
||||
expectedQueryContains string
|
||||
}{
|
||||
{
|
||||
name: "test temporality = cumulative, panel = value, series agg = max group by state",
|
||||
builderQuery: &v3.BuilderQuery{
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorMin,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_memory_usage",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
Type: v3.AttributeKeyType("Gauge"),
|
||||
IsColumn: true,
|
||||
},
|
||||
Temporality: v3.Delta,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
SecondaryAggregation: v3.SecondaryAggregationMax,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "linux",
|
||||
},
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
Disabled: false,
|
||||
StepInterval: 60,
|
||||
OrderBy: []v3.OrderBy{
|
||||
{
|
||||
ColumnName: "state",
|
||||
Order: v3.DirectionDesc,
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "state",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
},
|
||||
Legend: "",
|
||||
ReduceTo: v3.ReduceToOperatorSum,
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "AVG(system_memory_usage)",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedQueryContains: "SELECT max(value) as aggregated_value, ts FROM (SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, anyLast(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'system_memory_usage' AND temporality = 'Delta' AND unix_milli >= 1735891200000 AND unix_milli < 1735894800000 AND JSONExtractString(labels, 'os_type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name = 'system_memory_usage' AND unix_milli >= 1735891800000 AND unix_milli < 1735894800000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "test temporality = cumulative, panel = value, series agg = max group by state, host_name",
|
||||
builderQuery: &v3.BuilderQuery{
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorMin,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_memory_usage",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
Type: v3.AttributeKeyType("Gauge"),
|
||||
IsColumn: true,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
TimeAggregation: v3.TimeAggregationAnyLast,
|
||||
SpaceAggregation: v3.SpaceAggregationAvg,
|
||||
SecondaryAggregation: v3.SecondaryAggregationMax,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "linux",
|
||||
},
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
Disabled: false,
|
||||
StepInterval: 60,
|
||||
OrderBy: []v3.OrderBy{
|
||||
{
|
||||
ColumnName: "state",
|
||||
Order: v3.DirectionDesc,
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "state",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
},
|
||||
Legend: "",
|
||||
ReduceTo: v3.ReduceToOperatorSum,
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "AVG(system_memory_usage)",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedQueryContains: "SELECT max(value) as aggregated_value, ts FROM (SELECT state, host_name, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, anyLast(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'system_memory_usage' AND temporality = 'Cumulative' AND unix_milli >= 1735891200000 AND unix_milli < 1735894800000 AND JSONExtractString(labels, 'os_type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name = 'system_memory_usage' AND unix_milli >= 1735891800000 AND unix_milli < 1735894800000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, host_name, ts ORDER BY state desc, host_name ASC, ts ASC) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
// 1735891811000 - Friday, 3 January 2025 13:40:11 GMT+05:30
|
||||
// 1735894811000 - Friday, 3 January 2025 14:30:11 GMT+05:30
|
||||
query, err := PrepareMetricQuery(1735891811000, 1735894811000, v3.QueryTypeBuilder, v3.PanelTypeValue, testCase.builderQuery, metricsV3.Options{})
|
||||
assert.Nil(t, err)
|
||||
assert.Contains(t, query, testCase.expectedQueryContains)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,10 +166,7 @@ type testbed struct {
|
||||
|
||||
func newTestbed(t *testing.T) *testbed {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
_, err := model.InitDB(testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not init opamp model: %v", err)
|
||||
}
|
||||
model.InitDB(testDB)
|
||||
|
||||
testConfigProvider := NewMockAgentConfigProvider()
|
||||
opampServer := InitializeServer(nil, testConfigProvider)
|
||||
|
||||
@@ -30,28 +30,15 @@ func (a *Agents) Count() int {
|
||||
}
|
||||
|
||||
// Initialize the database and create schema if needed
|
||||
func InitDB(qsDB *sqlx.DB) (*sqlx.DB, error) {
|
||||
func InitDB(qsDB *sqlx.DB) *sqlx.DB {
|
||||
db = qsDB
|
||||
|
||||
tableSchema := `CREATE TABLE IF NOT EXISTS agents (
|
||||
agent_id TEXT PRIMARY KEY UNIQUE,
|
||||
started_at datetime NOT NULL,
|
||||
terminated_at datetime,
|
||||
current_status TEXT NOT NULL,
|
||||
effective_config TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err := db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating agents table: %s", err.Error())
|
||||
}
|
||||
|
||||
AllAgents = Agents{
|
||||
agentsById: make(map[string]*Agent),
|
||||
connections: make(map[types.Connection]map[string]bool),
|
||||
mux: sync.RWMutex{},
|
||||
}
|
||||
return db, nil
|
||||
|
||||
return qsDB
|
||||
}
|
||||
|
||||
// RemoveConnection removes the connection all Agent instances associated with the
|
||||
|
||||
@@ -203,53 +203,8 @@ type UpdatePreference struct {
|
||||
|
||||
var db *sqlx.DB
|
||||
|
||||
func InitDB(datasourceName string) error {
|
||||
var err error
|
||||
db, err = sqlx.Open("sqlite3", datasourceName)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the user preference table
|
||||
tableSchema := `
|
||||
PRAGMA foreign_keys = ON;
|
||||
CREATE TABLE IF NOT EXISTS user_preference(
|
||||
preference_id TEXT NOT NULL,
|
||||
preference_value TEXT,
|
||||
user_id TEXT NOT NULL,
|
||||
PRIMARY KEY (preference_id,user_id),
|
||||
FOREIGN KEY (user_id)
|
||||
REFERENCES users(id)
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE CASCADE
|
||||
);`
|
||||
|
||||
_, err = db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in creating user_preference table: %s", err.Error())
|
||||
}
|
||||
|
||||
// create the org preference table
|
||||
tableSchema = `
|
||||
PRAGMA foreign_keys = ON;
|
||||
CREATE TABLE IF NOT EXISTS org_preference(
|
||||
preference_id TEXT NOT NULL,
|
||||
preference_value TEXT,
|
||||
org_id TEXT NOT NULL,
|
||||
PRIMARY KEY (preference_id,org_id),
|
||||
FOREIGN KEY (org_id)
|
||||
REFERENCES organizations(id)
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE CASCADE
|
||||
);`
|
||||
|
||||
_, err = db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in creating org_preference table: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
func InitDB(inputDB *sqlx.DB) {
|
||||
db = inputDB
|
||||
}
|
||||
|
||||
// org preference functions
|
||||
|
||||
@@ -425,6 +425,9 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
for idx, groupBy := range query.GroupBy {
|
||||
parts = append(parts, fmt.Sprintf("groupBy-%d=%s", idx, groupBy.CacheKey()))
|
||||
}
|
||||
if params.CompositeQuery.PanelType == v3.PanelTypeValue {
|
||||
parts = append(parts, fmt.Sprintf("secondaryAggregation=%s", query.SecondaryAggregation))
|
||||
}
|
||||
}
|
||||
|
||||
if len(query.Having) > 0 {
|
||||
|
||||
@@ -1300,13 +1300,14 @@ func TestGenerateCacheKeysMetricsBuilder(t *testing.T) {
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
SecondaryAggregation: v3.SecondaryAggregationMax,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
@@ -1333,7 +1334,7 @@ func TestGenerateCacheKeysMetricsBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&secondaryAggregation=max&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
_ "net/http/pprof" // http profiler
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -24,7 +25,9 @@ import (
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
@@ -33,8 +36,8 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
@@ -94,24 +97,13 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
if err := dao.InitDao("sqlite", constants.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
func NewServer(serverOptions *ServerOptions, config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
if err := dao.InitDao(signoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := preferences.InitDB(constants.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(constants.RELATIONAL_DATASOURCE_PATH)
|
||||
explorer.InitWithDSN(constants.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
preferences.InitDB(signoz.SQLStore.SQLxDB())
|
||||
dashboards.InitDB(signoz.SQLStore.SQLxDB())
|
||||
explorer.InitWithDB(signoz.SQLStore.SQLxDB())
|
||||
|
||||
// initiate feature manager
|
||||
fm := featureManager.StartManager()
|
||||
@@ -123,7 +115,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
if storage == "clickhouse" {
|
||||
zap.L().Info("Using ClickHouse as datastore ...")
|
||||
clickhouseReader := clickhouseReader.NewReader(
|
||||
localDB,
|
||||
signoz.SQLStore.SQLxDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
fm,
|
||||
serverOptions.MaxIdleConns,
|
||||
@@ -138,7 +130,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
} else {
|
||||
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
|
||||
skipConfig := &model.SkipConfig{}
|
||||
var err error
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
skipConfig, err = model.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
||||
@@ -159,7 +153,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
rm, err := makeRulesManager(
|
||||
serverOptions.PromConfigPath,
|
||||
constants.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL, localDB, reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema, serverOptions.UseTraceNewSchema)
|
||||
serverOptions.RuleRepoURL, signoz.SQLStore.SQLxDB(), reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema, serverOptions.UseTraceNewSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -176,13 +170,18 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
integrationsController, err := integrations.NewController(localDB)
|
||||
integrationsController, err := integrations.NewController(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create integrations controller: %w", err)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider integrations controller: %w", err)
|
||||
}
|
||||
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
signoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -200,6 +199,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
RuleManager: rm,
|
||||
FeatureFlags: fm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
@@ -233,14 +233,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opAmpModel.InitDB(signoz.SQLStore.SQLxDB())
|
||||
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: localDB,
|
||||
DBEngine: "sqlite",
|
||||
DB: signoz.SQLStore.SQLxDB(),
|
||||
AgentFeatures: []agentConf.AgentFeature{
|
||||
logParsingPipelineController,
|
||||
},
|
||||
@@ -310,6 +306,7 @@ func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) {
|
||||
api.RegisterRoutes(r, am)
|
||||
api.RegisterLogsRoutes(r, am)
|
||||
api.RegisterIntegrationRoutes(r, am)
|
||||
api.RegisterCloudIntegrationsRoutes(r, am)
|
||||
api.RegisterQueryRangeV3Routes(r, am)
|
||||
api.RegisterInfraMetricsRoutes(r, am)
|
||||
api.RegisterWebSocketPaths(r, am)
|
||||
@@ -454,12 +451,13 @@ func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error)
|
||||
}
|
||||
|
||||
func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFrom := "/api/v3/query_range"
|
||||
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||
|
||||
data := map[string]interface{}{}
|
||||
var postData *v3.QueryRangeParamsV3
|
||||
|
||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
|
||||
if r.Body != nil {
|
||||
bodyBytes, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -477,34 +475,64 @@ func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface
|
||||
return nil, false
|
||||
}
|
||||
|
||||
signozMetricsUsed := false
|
||||
signozLogsUsed := false
|
||||
signozTracesUsed := false
|
||||
if postData != nil {
|
||||
referrer := r.Header.Get("Referer")
|
||||
|
||||
if postData.CompositeQuery != nil {
|
||||
data["queryType"] = postData.CompositeQuery.QueryType
|
||||
data["panelType"] = postData.CompositeQuery.PanelType
|
||||
|
||||
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||
}
|
||||
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the referrer", zap.Error(err))
|
||||
}
|
||||
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the alert: ", zap.Error(err))
|
||||
}
|
||||
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
|
||||
}
|
||||
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||
}
|
||||
|
||||
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||
if signozMetricsUsed {
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||
|
||||
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||
if queryInfoResult.MetricsUsed {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if signozLogsUsed {
|
||||
if queryInfoResult.LogsUsed {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
if signozTracesUsed {
|
||||
if queryInfoResult.TracesUsed {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
data["metricsUsed"] = signozMetricsUsed
|
||||
data["logsUsed"] = signozLogsUsed
|
||||
data["tracesUsed"] = signozTracesUsed
|
||||
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||
data["queryType"] = queryInfoResult.QueryType
|
||||
data["panelType"] = queryInfoResult.PanelType
|
||||
|
||||
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
// switch case to set data["screen"] based on the referrer
|
||||
switch {
|
||||
case dashboardMatched:
|
||||
data["screen"] = "panel"
|
||||
case alertMatched:
|
||||
data["screen"] = "alert"
|
||||
case logsExplorerMatched:
|
||||
data["screen"] = "logs-explorer"
|
||||
case traceExplorerMatched:
|
||||
data["screen"] = "traces-explorer"
|
||||
default:
|
||||
data["screen"] = "unknown"
|
||||
return data, true
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,26 +1,20 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||
)
|
||||
|
||||
var db ModelDao
|
||||
|
||||
func InitDao(engine, path string) error {
|
||||
func InitDao(inputDB *sqlx.DB) error {
|
||||
var err error
|
||||
|
||||
switch engine {
|
||||
case "sqlite":
|
||||
db, err = sqlite.InitDB(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize DB")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("RelationalDB type: %s is not supported in query service", engine)
|
||||
db, err = sqlite.InitDB(inputDB)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize DB")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
@@ -17,83 +16,8 @@ type ModelDaoSqlite struct {
|
||||
}
|
||||
|
||||
// InitDB sets up setting up the connection pool global variable.
|
||||
func InitDB(dataSourceName string) (*ModelDaoSqlite, error) {
|
||||
var err error
|
||||
|
||||
db, err := sqlx.Open("sqlite3", dataSourceName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to Open sqlite3 DB")
|
||||
}
|
||||
db.SetMaxOpenConns(10)
|
||||
|
||||
table_schema := `
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS invites (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
token TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
role TEXT NOT NULL,
|
||||
org_id TEXT NOT NULL,
|
||||
FOREIGN KEY(org_id) REFERENCES organizations(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS organizations (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
is_anonymous INTEGER NOT NULL DEFAULT 0 CHECK(is_anonymous IN (0,1)),
|
||||
has_opted_updates INTEGER NOT NULL DEFAULT 1 CHECK(has_opted_updates IN (0,1))
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
password TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
profile_picture_url TEXT,
|
||||
group_id TEXT NOT NULL,
|
||||
org_id TEXT NOT NULL,
|
||||
FOREIGN KEY(group_id) REFERENCES groups(id),
|
||||
FOREIGN KEY(org_id) REFERENCES organizations(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS groups (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL UNIQUE
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS reset_password_request (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id TEXT NOT NULL,
|
||||
token TEXT NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS user_flags (
|
||||
user_id TEXT PRIMARY KEY,
|
||||
flags TEXT,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS apdex_settings (
|
||||
service_name TEXT PRIMARY KEY,
|
||||
threshold FLOAT NOT NULL,
|
||||
exclude_status_codes TEXT NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS ingestion_keys (
|
||||
key_id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
ingestion_key TEXT NOT NULL,
|
||||
ingestion_url TEXT NOT NULL,
|
||||
data_region TEXT NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
||||
}
|
||||
|
||||
mds := &ModelDaoSqlite{db: db}
|
||||
func InitDB(inputDB *sqlx.DB) (*ModelDaoSqlite, error) {
|
||||
mds := &ModelDaoSqlite{db: inputDB}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := mds.initializeOrgPreferences(ctx); err != nil {
|
||||
|
||||
@@ -9,25 +9,19 @@ import (
|
||||
"time"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
pkgversion "go.signoz.io/signoz/pkg/version"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog() *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
@@ -67,13 +61,33 @@ func main() {
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog()
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
logger := loggerMgr.Sugar()
|
||||
instrumentation, err := instrumentation.New(context.Background(), pkgversion.Build{}, config.Instrumentation)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create instrumentation", zap.Error(err))
|
||||
}
|
||||
defer instrumentation.Stop(context.Background())
|
||||
|
||||
zap.ReplaceGlobals(instrumentation.Logger())
|
||||
defer instrumentation.Logger().Sync() // flushes buffer, if any
|
||||
|
||||
logger := instrumentation.Logger().Sugar()
|
||||
version.PrintVersion()
|
||||
|
||||
signoz, err := signoz.New(context.Background(), instrumentation, config, signoz.NewProviderFactories())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: constants.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
@@ -101,13 +115,7 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(constants.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
server, err := app.NewServer(serverOptions, config, signoz)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
@@ -16,22 +16,6 @@ type DataMigration struct {
|
||||
Succeeded bool `db:"succeeded"`
|
||||
}
|
||||
|
||||
func initSchema(conn *sqlx.DB) error {
|
||||
tableSchema := `
|
||||
CREATE TABLE IF NOT EXISTS data_migrations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
version VARCHAR(255) NOT NULL UNIQUE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
succeeded BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
||||
`
|
||||
_, err := conn.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMigrationVersion(conn *sqlx.DB, version string) (*DataMigration, error) {
|
||||
var migration DataMigration
|
||||
err := conn.Get(&migration, "SELECT * FROM data_migrations WHERE version = $1", version)
|
||||
@@ -44,18 +28,6 @@ func getMigrationVersion(conn *sqlx.DB, version string) (*DataMigration, error)
|
||||
return &migration, nil
|
||||
}
|
||||
|
||||
func Migrate(dsn string) error {
|
||||
conn, err := sqlx.Connect("sqlite3", dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := initSchema(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ClickHouseMigrate(conn driver.Conn, cluster string) error {
|
||||
|
||||
database := "CREATE DATABASE IF NOT EXISTS signoz_analytics ON CLUSTER %s"
|
||||
|
||||
@@ -618,6 +618,7 @@ type TagsInfo struct {
|
||||
|
||||
type AlertsInfo struct {
|
||||
TotalAlerts int `json:"totalAlerts"`
|
||||
TotalActiveAlerts int `json:"totalActiveAlerts"`
|
||||
LogsBasedAlerts int `json:"logsBasedAlerts"`
|
||||
MetricBasedAlerts int `json:"metricBasedAlerts"`
|
||||
AnomalyBasedAlerts int `json:"anomalyBasedAlerts"`
|
||||
|
||||
@@ -712,6 +712,28 @@ func GetPercentileFromOperator(operator SpaceAggregation) float64 {
|
||||
}
|
||||
}
|
||||
|
||||
type SecondaryAggregation string
|
||||
|
||||
const (
|
||||
SecondaryAggregationUnspecified SecondaryAggregation = ""
|
||||
SecondaryAggregationSum SecondaryAggregation = "sum"
|
||||
SecondaryAggregationAvg SecondaryAggregation = "avg"
|
||||
SecondaryAggregationMin SecondaryAggregation = "min"
|
||||
SecondaryAggregationMax SecondaryAggregation = "max"
|
||||
)
|
||||
|
||||
func (s SecondaryAggregation) Validate() error {
|
||||
switch s {
|
||||
case SecondaryAggregationSum,
|
||||
SecondaryAggregationAvg,
|
||||
SecondaryAggregationMin,
|
||||
SecondaryAggregationMax:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("invalid series aggregation: %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
type FunctionName string
|
||||
|
||||
const (
|
||||
@@ -784,27 +806,28 @@ func (m *MetricValueFilter) Clone() *MetricValueFilter {
|
||||
}
|
||||
|
||||
type BuilderQuery struct {
|
||||
QueryName string `json:"queryName"`
|
||||
StepInterval int64 `json:"stepInterval"`
|
||||
DataSource DataSource `json:"dataSource"`
|
||||
AggregateOperator AggregateOperator `json:"aggregateOperator"`
|
||||
AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"`
|
||||
Temporality Temporality `json:"temporality,omitempty"`
|
||||
Filters *FilterSet `json:"filters,omitempty"`
|
||||
GroupBy []AttributeKey `json:"groupBy,omitempty"`
|
||||
Expression string `json:"expression"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Having []Having `json:"having,omitempty"`
|
||||
Legend string `json:"legend,omitempty"`
|
||||
Limit uint64 `json:"limit"`
|
||||
Offset uint64 `json:"offset"`
|
||||
PageSize uint64 `json:"pageSize"`
|
||||
OrderBy []OrderBy `json:"orderBy,omitempty"`
|
||||
ReduceTo ReduceToOperator `json:"reduceTo,omitempty"`
|
||||
SelectColumns []AttributeKey `json:"selectColumns,omitempty"`
|
||||
TimeAggregation TimeAggregation `json:"timeAggregation,omitempty"`
|
||||
SpaceAggregation SpaceAggregation `json:"spaceAggregation,omitempty"`
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
QueryName string `json:"queryName"`
|
||||
StepInterval int64 `json:"stepInterval"`
|
||||
DataSource DataSource `json:"dataSource"`
|
||||
AggregateOperator AggregateOperator `json:"aggregateOperator"`
|
||||
AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"`
|
||||
Temporality Temporality `json:"temporality,omitempty"`
|
||||
Filters *FilterSet `json:"filters,omitempty"`
|
||||
GroupBy []AttributeKey `json:"groupBy,omitempty"`
|
||||
Expression string `json:"expression"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Having []Having `json:"having,omitempty"`
|
||||
Legend string `json:"legend,omitempty"`
|
||||
Limit uint64 `json:"limit"`
|
||||
Offset uint64 `json:"offset"`
|
||||
PageSize uint64 `json:"pageSize"`
|
||||
OrderBy []OrderBy `json:"orderBy,omitempty"`
|
||||
ReduceTo ReduceToOperator `json:"reduceTo,omitempty"`
|
||||
SelectColumns []AttributeKey `json:"selectColumns,omitempty"`
|
||||
TimeAggregation TimeAggregation `json:"timeAggregation,omitempty"`
|
||||
SpaceAggregation SpaceAggregation `json:"spaceAggregation,omitempty"`
|
||||
SecondaryAggregation SecondaryAggregation `json:"seriesAggregation,omitempty"`
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
ShiftBy int64
|
||||
IsAnomaly bool
|
||||
QueriesUsedInFormula []string
|
||||
@@ -958,6 +981,12 @@ func (b *BuilderQuery) Validate(panelType PanelType) error {
|
||||
// return fmt.Errorf("group by is not supported for list panel type")
|
||||
// }
|
||||
|
||||
if panelType == PanelTypeValue && len(b.GroupBy) > 0 {
|
||||
if err := b.SecondaryAggregation.Validate(); err != nil {
|
||||
return fmt.Errorf("series aggregation is required for value type panel with group by: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, groupBy := range b.GroupBy {
|
||||
if err := groupBy.Validate(); err != nil {
|
||||
return fmt.Errorf("group by is invalid %w", err)
|
||||
|
||||
@@ -616,6 +616,9 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) {
|
||||
}
|
||||
}
|
||||
alertsInfo.TotalAlerts = alertsInfo.TotalAlerts + 1
|
||||
if rule.PostableRule.Disabled == false {
|
||||
alertsInfo.TotalActiveAlerts = alertsInfo.TotalActiveAlerts + 1
|
||||
}
|
||||
}
|
||||
alertsInfo.AlertNames = alertNames
|
||||
|
||||
|
||||
@@ -82,6 +82,19 @@ var OSS_EVENTS_LIST = map[string]struct{}{
|
||||
TELEMETRY_LICENSE_ACT_FAILED: {},
|
||||
}
|
||||
|
||||
type QueryInfoResult struct {
|
||||
LogsUsed bool
|
||||
MetricsUsed bool
|
||||
TracesUsed bool
|
||||
FilterApplied bool
|
||||
GroupByApplied bool
|
||||
AggregateOperator v3.AggregateOperator
|
||||
AggregateAttributeKey string
|
||||
QueryType v3.QueryType
|
||||
PanelType v3.PanelType
|
||||
NumberOfQueries int
|
||||
}
|
||||
|
||||
const api_key = "9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr"
|
||||
|
||||
const IP_NOT_FOUND_PLACEHOLDER = "NA"
|
||||
@@ -107,43 +120,54 @@ func (a *Telemetry) IsSampled() bool {
|
||||
|
||||
}
|
||||
|
||||
func (telemetry *Telemetry) CheckSigNozSignals(postData *v3.QueryRangeParamsV3) (bool, bool, bool) {
|
||||
signozLogsUsed := false
|
||||
signozMetricsUsed := false
|
||||
signozTracesUsed := false
|
||||
func (telemetry *Telemetry) CheckQueryInfo(postData *v3.QueryRangeParamsV3) QueryInfoResult {
|
||||
queryInfoResult := QueryInfoResult{}
|
||||
if postData != nil && postData.CompositeQuery != nil {
|
||||
queryInfoResult.PanelType = postData.CompositeQuery.PanelType
|
||||
queryInfoResult.QueryType = postData.CompositeQuery.QueryType
|
||||
if postData.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
queryInfoResult.NumberOfQueries = len(postData.CompositeQuery.BuilderQueries)
|
||||
for _, query := range postData.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceLogs {
|
||||
queryInfoResult.LogsUsed = true
|
||||
} else if query.DataSource == v3.DataSourceMetrics {
|
||||
queryInfoResult.MetricsUsed = true
|
||||
|
||||
if postData.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
for _, query := range postData.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceLogs && query.Filters != nil && len(query.Filters.Items) > 0 {
|
||||
signozLogsUsed = true
|
||||
} else if query.DataSource == v3.DataSourceMetrics &&
|
||||
!strings.Contains(query.AggregateAttribute.Key, "signoz_") &&
|
||||
len(query.AggregateAttribute.Key) > 0 {
|
||||
signozMetricsUsed = true
|
||||
} else if query.DataSource == v3.DataSourceTraces && query.Filters != nil && len(query.Filters.Items) > 0 {
|
||||
signozTracesUsed = true
|
||||
} else if query.DataSource == v3.DataSourceTraces {
|
||||
queryInfoResult.TracesUsed = true
|
||||
}
|
||||
if query.Filters != nil && len(query.Filters.Items) > 0 {
|
||||
queryInfoResult.FilterApplied = true
|
||||
}
|
||||
if query.GroupBy != nil && len(query.GroupBy) > 0 {
|
||||
queryInfoResult.GroupByApplied = true
|
||||
}
|
||||
queryInfoResult.AggregateOperator = query.AggregateOperator
|
||||
if len(query.AggregateAttribute.Key) > 0 && !strings.Contains(query.AggregateAttribute.Key, "signoz_") {
|
||||
queryInfoResult.AggregateAttributeKey = query.AggregateAttribute.Key
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if postData.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
for _, query := range postData.CompositeQuery.PromQueries {
|
||||
if !strings.Contains(query.Query, "signoz_") && len(query.Query) > 0 {
|
||||
signozMetricsUsed = true
|
||||
} else if postData.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
for _, query := range postData.CompositeQuery.PromQueries {
|
||||
if !strings.Contains(query.Query, "signoz_") && len(query.Query) > 0 {
|
||||
queryInfoResult.MetricsUsed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if postData.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL {
|
||||
for _, query := range postData.CompositeQuery.ClickHouseQueries {
|
||||
if strings.Contains(query.Query, "signoz_metrics") && len(query.Query) > 0 {
|
||||
signozMetricsUsed = true
|
||||
}
|
||||
if strings.Contains(query.Query, "signoz_logs") && len(query.Query) > 0 {
|
||||
signozLogsUsed = true
|
||||
}
|
||||
if strings.Contains(query.Query, "signoz_traces") && len(query.Query) > 0 {
|
||||
signozTracesUsed = true
|
||||
} else if postData.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL {
|
||||
for _, query := range postData.CompositeQuery.ClickHouseQueries {
|
||||
if strings.Contains(query.Query, "signoz_metrics") && len(query.Query) > 0 {
|
||||
queryInfoResult.MetricsUsed = true
|
||||
}
|
||||
if strings.Contains(query.Query, "signoz_logs") && len(query.Query) > 0 {
|
||||
queryInfoResult.LogsUsed = true
|
||||
}
|
||||
if strings.Contains(query.Query, "signoz_traces") && len(query.Query) > 0 {
|
||||
queryInfoResult.TracesUsed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return signozLogsUsed, signozMetricsUsed, signozTracesUsed
|
||||
return queryInfoResult
|
||||
}
|
||||
|
||||
func (telemetry *Telemetry) AddActiveTracesUser() {
|
||||
@@ -350,6 +374,7 @@ func createTelemetry() {
|
||||
"dashboardWithTraceChQuery": dashboardsInfo.DashboardsWithTraceChQuery,
|
||||
"dashboardNamesWithTraceChQuery": dashboardsInfo.DashboardNamesWithTraceChQuery,
|
||||
"totalAlerts": alertsInfo.TotalAlerts,
|
||||
"totalActiveAlerts": alertsInfo.TotalActiveAlerts,
|
||||
"alertsWithTSV2": alertsInfo.AlertsWithTSV2,
|
||||
"logsBasedAlerts": alertsInfo.LogsBasedAlerts,
|
||||
"metricBasedAlerts": alertsInfo.MetricBasedAlerts,
|
||||
@@ -383,6 +408,23 @@ func createTelemetry() {
|
||||
telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, user.Email, false, false)
|
||||
}
|
||||
}
|
||||
telemetry.SendIdentityEvent(map[string]interface{}{
|
||||
"total_logs": totalLogs,
|
||||
"total_traces": totalSpans,
|
||||
"total_metrics": totalSamples,
|
||||
"total_users": userCount,
|
||||
"total_channels": alertsInfo.TotalChannels,
|
||||
"total_dashboards_with_panel": dashboardsInfo.TotalDashboardsWithPanelAndName,
|
||||
"total_saved_views": savedViewsInfo.TotalSavedViews,
|
||||
"total_active_alerts": alertsInfo.TotalActiveAlerts,
|
||||
"total_traces_based_alerts": alertsInfo.TracesBasedAlerts,
|
||||
"total_logs_based_alerts": alertsInfo.LogsBasedAlerts,
|
||||
"total_metric_based_alerts": alertsInfo.MetricBasedAlerts,
|
||||
"total_anomaly_based_alerts": alertsInfo.AnomalyBasedAlerts,
|
||||
"total_metrics_based_panels": dashboardsInfo.MetricBasedPanels,
|
||||
"total_logs_based_panels": dashboardsInfo.LogsBasedPanels,
|
||||
"total_traces_based_panels": dashboardsInfo.TracesBasedPanels,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -390,6 +432,16 @@ func createTelemetry() {
|
||||
telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "", true, false)
|
||||
}
|
||||
|
||||
if totalLogs > 0 {
|
||||
telemetry.SendIdentityEvent(map[string]interface{}{"sent_logs": true})
|
||||
}
|
||||
if totalSpans > 0 {
|
||||
telemetry.SendIdentityEvent(map[string]interface{}{"sent_traces": true})
|
||||
}
|
||||
if totalSamples > 0 {
|
||||
telemetry.SendIdentityEvent(map[string]interface{}{"sent_metrics": true})
|
||||
}
|
||||
|
||||
getDistributedInfoInLastHeartBeatInterval, _ := telemetry.reader.GetDistributedInfoInLastHeartBeatInterval(ctx)
|
||||
telemetry.SendEvent(TELEMETRY_EVENT_DISTRIBUTED, getDistributedInfoInLastHeartBeatInterval, "", true, false)
|
||||
}
|
||||
@@ -518,6 +570,42 @@ func (a *Telemetry) IdentifyUser(user *model.User) {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Telemetry) SendIdentityEvent(data map[string]interface{}) {
|
||||
|
||||
if !a.isTelemetryEnabled() || a.isTelemetryAnonymous() {
|
||||
return
|
||||
}
|
||||
traits := analytics.NewTraits()
|
||||
|
||||
for k, v := range data {
|
||||
traits.Set(k, v)
|
||||
}
|
||||
if a.saasOperator != nil {
|
||||
|
||||
a.saasOperator.Enqueue(analytics.Identify{
|
||||
UserId: a.GetUserEmail(),
|
||||
Traits: traits,
|
||||
})
|
||||
a.saasOperator.Enqueue(analytics.Group{
|
||||
UserId: a.userEmail,
|
||||
GroupId: a.getCompanyDomain(),
|
||||
Traits: traits,
|
||||
})
|
||||
}
|
||||
if a.ossOperator != nil {
|
||||
a.ossOperator.Enqueue(analytics.Identify{
|
||||
UserId: a.ipAddress,
|
||||
Traits: traits,
|
||||
})
|
||||
// Updating a groups properties
|
||||
a.ossOperator.Enqueue(analytics.Group{
|
||||
UserId: a.ipAddress,
|
||||
GroupId: a.getCompanyDomain(),
|
||||
Traits: traits,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Telemetry) SetUserEmail(email string) {
|
||||
a.userEmail = email
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
)
|
||||
|
||||
func TestLogPipelinesLifecycle(t *testing.T) {
|
||||
testbed := NewLogPipelinesTestBed(t, nil)
|
||||
testbed := NewLogPipelinesTestBed(t, utils.NewQueryServiceDBForTests(t))
|
||||
require := require.New(t)
|
||||
|
||||
getPipelinesResp := testbed.GetPipelinesFromQS()
|
||||
@@ -461,7 +461,7 @@ func NewTestbedWithoutOpamp(t *testing.T, testDB *sqlx.DB) *LogPipelinesTestBed
|
||||
}
|
||||
|
||||
controller, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
testDB, "sqlite", ic.GetPipelinesForInstalledIntegrations,
|
||||
testDB, ic.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create a logparsingpipelines controller: %v", err)
|
||||
@@ -481,12 +481,10 @@ func NewTestbedWithoutOpamp(t *testing.T, testDB *sqlx.DB) *LogPipelinesTestBed
|
||||
}
|
||||
|
||||
// Mock an available opamp agent
|
||||
testDB, err = opampModel.InitDB(testDB)
|
||||
require.Nil(t, err, "failed to init opamp model")
|
||||
_ = opampModel.InitDB(testDB)
|
||||
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: testDB,
|
||||
DBEngine: "sqlite",
|
||||
DB: testDB,
|
||||
AgentFeatures: []agentConf.AgentFeature{
|
||||
apiHandler.LogsParsingPipelineController,
|
||||
}})
|
||||
|
||||
@@ -0,0 +1,299 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
"go.signoz.io/signoz/pkg/query-service/featureManager"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
)
|
||||
|
||||
func TestAWSIntegrationLifecycle(t *testing.T) {
|
||||
// Test for happy path of connecting and managing AWS integration accounts
|
||||
|
||||
t0 := time.Now()
|
||||
require := require.New(t)
|
||||
testbed := NewCloudIntegrationsTestBed(t, nil)
|
||||
|
||||
accountsListResp := testbed.GetConnectedAccountsListFromQS("aws")
|
||||
require.Equal(len(accountsListResp.Accounts), 0,
|
||||
"No accounts should be connected at the beginning",
|
||||
)
|
||||
|
||||
// Should be able to generate a connection url from UI - initializing an integration account
|
||||
testAccountConfig := cloudintegrations.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-1", "us-east-2"},
|
||||
}
|
||||
connectionUrlResp := testbed.GenerateConnectionUrlFromQS(
|
||||
"aws", cloudintegrations.GenerateConnectionUrlRequest{
|
||||
AgentConfig: cloudintegrations.SigNozAgentConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
AccountConfig: testAccountConfig,
|
||||
})
|
||||
testAccountId := connectionUrlResp.AccountId
|
||||
require.NotEmpty(testAccountId)
|
||||
connectionUrl := connectionUrlResp.ConnectionUrl
|
||||
require.NotEmpty(connectionUrl)
|
||||
|
||||
// Should be able to poll for account connection status from the UI
|
||||
accountStatusResp := testbed.GetAccountStatusFromQS("aws", testAccountId)
|
||||
require.Equal(testAccountId, accountStatusResp.Id)
|
||||
require.Nil(accountStatusResp.Status.Integration.LastHeartbeatTsMillis)
|
||||
|
||||
// The unconnected account should not show up in connected accounts list yet
|
||||
accountsListResp1 := testbed.GetConnectedAccountsListFromQS("aws")
|
||||
require.Equal(0, len(accountsListResp1.Accounts))
|
||||
|
||||
// An agent installed in user's AWS account should be able to check in for the new integration account
|
||||
tsMillisBeforeAgentCheckIn := time.Now().UnixMilli()
|
||||
testAWSAccountId := "4563215233"
|
||||
agentCheckInResp := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp.Account.Id)
|
||||
require.Equal(testAccountConfig, *agentCheckInResp.Account.Config)
|
||||
require.Equal(testAWSAccountId, *agentCheckInResp.Account.CloudAccountId)
|
||||
require.LessOrEqual(t0.Unix(), agentCheckInResp.Account.CreatedAt.Unix())
|
||||
require.Nil(agentCheckInResp.Account.RemovedAt)
|
||||
|
||||
// Polling for connection status from UI should now return latest status
|
||||
accountStatusResp1 := testbed.GetAccountStatusFromQS("aws", testAccountId)
|
||||
require.Equal(testAccountId, accountStatusResp1.Id)
|
||||
require.NotNil(accountStatusResp1.Status.Integration.LastHeartbeatTsMillis)
|
||||
require.LessOrEqual(
|
||||
tsMillisBeforeAgentCheckIn,
|
||||
*accountStatusResp1.Status.Integration.LastHeartbeatTsMillis,
|
||||
)
|
||||
|
||||
// The account should now show up in list of connected accounts.
|
||||
accountsListResp2 := testbed.GetConnectedAccountsListFromQS("aws")
|
||||
require.Equal(len(accountsListResp2.Accounts), 1)
|
||||
require.Equal(testAccountId, accountsListResp2.Accounts[0].Id)
|
||||
require.Equal(testAWSAccountId, accountsListResp2.Accounts[0].CloudAccountId)
|
||||
|
||||
// Should be able to update account config from UI
|
||||
testAccountConfig2 := cloudintegrations.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-2", "us-west-1"},
|
||||
}
|
||||
latestAccount := testbed.UpdateAccountConfigWithQS(
|
||||
"aws", testAccountId, testAccountConfig2,
|
||||
)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountConfig2, *latestAccount.Config)
|
||||
|
||||
// The agent should now receive latest account config.
|
||||
agentCheckInResp1 := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp1.Account.Id)
|
||||
require.Equal(testAccountConfig2, *agentCheckInResp1.Account.Config)
|
||||
require.Equal(testAWSAccountId, *agentCheckInResp1.Account.CloudAccountId)
|
||||
require.Nil(agentCheckInResp1.Account.RemovedAt)
|
||||
|
||||
// Should be able to disconnect/remove account from UI.
|
||||
tsBeforeDisconnect := time.Now()
|
||||
latestAccount = testbed.DisconnectAccountWithQS("aws", testAccountId)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.LessOrEqual(tsBeforeDisconnect, *latestAccount.RemovedAt)
|
||||
|
||||
// The agent should receive the disconnected status in account config post disconnection
|
||||
agentCheckInResp2 := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp2.Account.Id)
|
||||
require.Equal(testAWSAccountId, *agentCheckInResp2.Account.CloudAccountId)
|
||||
require.LessOrEqual(tsBeforeDisconnect, *agentCheckInResp2.Account.RemovedAt)
|
||||
}
|
||||
|
||||
type CloudIntegrationsTestBed struct {
|
||||
t *testing.T
|
||||
testUser *model.User
|
||||
qsHttpHandler http.Handler
|
||||
mockClickhouse mockhouse.ClickConnMockCommon
|
||||
}
|
||||
|
||||
// testDB can be injected for sharing a DB across multiple integration testbeds.
|
||||
func NewCloudIntegrationsTestBed(t *testing.T, testDB *sqlx.DB) *CloudIntegrationsTestBed {
|
||||
if testDB == nil {
|
||||
testDB = utils.NewQueryServiceDBForTests(t)
|
||||
}
|
||||
|
||||
controller, err := cloudintegrations.NewController(testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create cloud integrations controller: %v", err)
|
||||
}
|
||||
|
||||
fm := featureManager.StartManager()
|
||||
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
|
||||
AppDao: dao.DB(),
|
||||
CloudIntegrationsController: controller,
|
||||
FeatureFlags: fm,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create a new ApiHandler: %v", err)
|
||||
}
|
||||
|
||||
router := app.NewRouter()
|
||||
am := app.NewAuthMiddleware(auth.GetUserFromRequest)
|
||||
apiHandler.RegisterRoutes(router, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
if apiErr != nil {
|
||||
t.Fatalf("could not create a test user: %v", apiErr)
|
||||
}
|
||||
|
||||
return &CloudIntegrationsTestBed{
|
||||
t: t,
|
||||
testUser: user,
|
||||
qsHttpHandler: router,
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) GetConnectedAccountsListFromQS(
|
||||
cloudProvider string,
|
||||
) *cloudintegrations.ConnectedAccountsListResponse {
|
||||
respDataJson := tb.RequestQS(fmt.Sprintf("/api/v1/cloud-integrations/%s/accounts", cloudProvider), nil)
|
||||
|
||||
var resp cloudintegrations.ConnectedAccountsListResponse
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into AccountsListResponse")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) GenerateConnectionUrlFromQS(
|
||||
cloudProvider string, req cloudintegrations.GenerateConnectionUrlRequest,
|
||||
) *cloudintegrations.GenerateConnectionUrlResponse {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf("/api/v1/cloud-integrations/%s/accounts/generate-connection-url", cloudProvider),
|
||||
req,
|
||||
)
|
||||
|
||||
var resp cloudintegrations.GenerateConnectionUrlResponse
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into map[string]any")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) GetAccountStatusFromQS(
|
||||
cloudProvider string, accountId string,
|
||||
) *cloudintegrations.AccountStatusResponse {
|
||||
respDataJson := tb.RequestQS(fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/status",
|
||||
cloudProvider, accountId,
|
||||
), nil)
|
||||
|
||||
var resp cloudintegrations.AccountStatusResponse
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into AccountStatusResponse")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) CheckInAsAgentWithQS(
|
||||
cloudProvider string, req cloudintegrations.AgentCheckInRequest,
|
||||
) *cloudintegrations.AgentCheckInResponse {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf("/api/v1/cloud-integrations/%s/agent-check-in", cloudProvider), req,
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AgentCheckInResponse
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into AgentCheckInResponse")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
cloudProvider string, accountId string, newConfig cloudintegrations.AccountConfig,
|
||||
) *cloudintegrations.AccountRecord {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/config",
|
||||
cloudProvider, accountId,
|
||||
), cloudintegrations.UpdateAccountConfigRequest{
|
||||
Config: newConfig,
|
||||
},
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AccountRecord
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into Account")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) DisconnectAccountWithQS(
|
||||
cloudProvider string, accountId string,
|
||||
) *cloudintegrations.AccountRecord {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/disconnect",
|
||||
cloudProvider, accountId,
|
||||
), map[string]any{},
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AccountRecord
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into Account")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) RequestQS(
|
||||
path string,
|
||||
postData interface{},
|
||||
) (responseDataJson []byte) {
|
||||
req, err := AuthenticatedRequestForTest(
|
||||
tb.testUser, path, postData,
|
||||
)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("couldn't create authenticated test request: %v", err)
|
||||
}
|
||||
|
||||
result, err := HandleTestRequest(tb.qsHttpHandler, req, 200)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("test request failed: %v", err)
|
||||
}
|
||||
|
||||
dataJson, err := json.Marshal(result.Data)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
|
||||
}
|
||||
return dataJson
|
||||
}
|
||||
@@ -1,15 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/sqlstore/sqlitesqlstore"
|
||||
"go.signoz.io/signoz/pkg/sqlstoremigrator"
|
||||
"go.signoz.io/signoz/pkg/sqlstoremigrator/migrations"
|
||||
)
|
||||
|
||||
func NewQueryServiceDBForTests(t *testing.T) *sqlx.DB {
|
||||
func NewQueryServiceDBForTests(t *testing.T) (testDB *sqlx.DB) {
|
||||
testDBFile, err := os.CreateTemp("", "test-signoz-db-*")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create temp file for test db: %v", err)
|
||||
@@ -18,14 +26,45 @@ func NewQueryServiceDBForTests(t *testing.T) *sqlx.DB {
|
||||
t.Cleanup(func() { os.Remove(testDBFilePath) })
|
||||
testDBFile.Close()
|
||||
|
||||
testDB, err := sqlx.Open("sqlite3", testDBFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("could not open test db sqlite file: %v", err)
|
||||
config := sqlstore.Config{
|
||||
Provider: "sqlite",
|
||||
Sqlite: sqlstore.SqliteConfig{
|
||||
Path: testDBFilePath,
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(Raj): This should not require passing in the DB file path
|
||||
dao.InitDao("sqlite", testDBFilePath)
|
||||
dashboards.InitDB(testDBFilePath)
|
||||
sqlStore, err := factory.NewFromFactory(context.Background(), instrumentationtest.New().ToProviderSettings(), config, factory.MustNewNamedMap(sqlitesqlstore.NewFactory()), "sqlite")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create sqlite provider: %v", err)
|
||||
}
|
||||
|
||||
return testDB
|
||||
migrations, err := sqlstoremigrator.NewMigrations(context.Background(), instrumentationtest.New().ToProviderSettings(), config, factory.MustNewNamedMap(
|
||||
migrations.NewAddDataMigrationsFactory(),
|
||||
migrations.NewAddOrganizationFactory(),
|
||||
migrations.NewAddPreferencesFactory(),
|
||||
migrations.NewAddDashboardsFactory(),
|
||||
migrations.NewAddSavedViewsFactory(),
|
||||
migrations.NewAddAgentsFactory(),
|
||||
migrations.NewAddPipelinesFactory(),
|
||||
migrations.NewAddIntegrationsFactory(),
|
||||
))
|
||||
if err != nil {
|
||||
t.Fatalf("could not create migrations: %v", err)
|
||||
}
|
||||
|
||||
sqlStoreMigrator := sqlstoremigrator.New(context.Background(), instrumentationtest.New().ToProviderSettings(), sqlStore, migrations, config)
|
||||
|
||||
err = sqlStoreMigrator.Migrate(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("could not run migrations: %v", err)
|
||||
}
|
||||
|
||||
err = dao.InitDao(sqlStore.SQLxDB())
|
||||
if err != nil {
|
||||
t.Fatalf("could not init dao: %v", err)
|
||||
}
|
||||
|
||||
dashboards.InitDB(sqlStore.SQLxDB())
|
||||
|
||||
return sqlStore.SQLxDB()
|
||||
}
|
||||
|
||||
@@ -8,18 +8,19 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Registry struct {
|
||||
services []NamedService
|
||||
services []factory.Service
|
||||
logger *zap.Logger
|
||||
startCh chan error
|
||||
stopCh chan error
|
||||
}
|
||||
|
||||
// New creates a new registry of services. It needs at least one service in the input.
|
||||
func New(logger *zap.Logger, services ...NamedService) (*Registry, error) {
|
||||
func New(logger *zap.Logger, services ...factory.Service) (*Registry, error) {
|
||||
if logger == nil {
|
||||
return nil, fmt.Errorf("cannot build registry, logger is required")
|
||||
}
|
||||
@@ -38,7 +39,7 @@ func New(logger *zap.Logger, services ...NamedService) (*Registry, error) {
|
||||
|
||||
func (r *Registry) Start(ctx context.Context) error {
|
||||
for _, s := range r.services {
|
||||
go func(s Service) {
|
||||
go func(s factory.Service) {
|
||||
err := s.Start(ctx)
|
||||
r.startCh <- err
|
||||
}(s)
|
||||
@@ -66,7 +67,7 @@ func (r *Registry) Wait(ctx context.Context) error {
|
||||
|
||||
func (r *Registry) Stop(ctx context.Context) error {
|
||||
for _, s := range r.services {
|
||||
go func(s Service) {
|
||||
go func(s factory.Service) {
|
||||
err := s.Stop(ctx)
|
||||
r.stopCh <- err
|
||||
}(s)
|
||||
|
||||
@@ -6,14 +6,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/factory/factorytest"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestRegistryWith2HttpServers(t *testing.T) {
|
||||
http1, err := newHttpService("http1")
|
||||
http1, err := factorytest.NewHttpService("http1")
|
||||
require.NoError(t, err)
|
||||
|
||||
http2, err := newHttpService("http2")
|
||||
http2, err := factorytest.NewHttpService("http2")
|
||||
require.NoError(t, err)
|
||||
|
||||
registry, err := New(zap.NewNop(), http1, http2)
|
||||
@@ -34,10 +35,10 @@ func TestRegistryWith2HttpServers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRegistryWith2HttpServersWithoutWait(t *testing.T) {
|
||||
http1, err := newHttpService("http1")
|
||||
http1, err := factorytest.NewHttpService("http1")
|
||||
require.NoError(t, err)
|
||||
|
||||
http2, err := newHttpService("http2")
|
||||
http2, err := factorytest.NewHttpService("http2")
|
||||
require.NoError(t, err)
|
||||
|
||||
registry, err := New(zap.NewNop(), http1, http2)
|
||||
|
||||
41
pkg/signoz/config.go
Normal file
41
pkg/signoz/config.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package signoz
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
)
|
||||
|
||||
// Config defines the entire configuration of signoz.
|
||||
type Config struct {
|
||||
Instrumentation instrumentation.Config `mapstructure:"instrumentation"`
|
||||
Web web.Config `mapstructure:"web"`
|
||||
Cache cache.Config `mapstructure:"cache"`
|
||||
SQLStore sqlstore.Config `mapstructure:"sqlstore"`
|
||||
}
|
||||
|
||||
func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig) (Config, error) {
|
||||
configFactories := []factory.ConfigFactory{
|
||||
instrumentation.NewConfigFactory(),
|
||||
web.NewConfigFactory(),
|
||||
sqlstore.NewConfigFactory(),
|
||||
cache.NewConfigFactory(),
|
||||
}
|
||||
|
||||
conf, err := config.New(ctx, resolverConfig, configFactories)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := conf.Unmarshal("", &config); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user