mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-07 10:22:12 +00:00
Compare commits
18 Commits
issue_406
...
pkg/config
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b10a9061ec | ||
|
|
fae0581d83 | ||
|
|
9d6928b3e6 | ||
|
|
baaffd79de | ||
|
|
59c416c222 | ||
|
|
63a7ae586b | ||
|
|
f0cef2d9d5 | ||
|
|
616ada91dd | ||
|
|
aab6a1c914 | ||
|
|
bc708cd891 | ||
|
|
c2a11960c1 | ||
|
|
6d76d56dbd | ||
|
|
1977756591 | ||
|
|
1d9c10a214 | ||
|
|
57a25bf98f | ||
|
|
35c310aa9d | ||
|
|
7c0481de7d | ||
|
|
147cf28024 |
@@ -1,4 +1,4 @@
|
||||
##################### SigNoz Configuration Example #####################
|
||||
##################### SigNoz Configuration Defaults #####################
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
@@ -47,7 +47,7 @@ cache:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval: 1m
|
||||
cleanupInterval:
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
|
||||
@@ -32,11 +32,6 @@ has_cmd() {
|
||||
command -v "$1" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check if docker compose plugin is present
|
||||
has_docker_compose_plugin() {
|
||||
docker compose version > /dev/null 2>&1
|
||||
}
|
||||
|
||||
is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
@@ -188,7 +183,9 @@ install_docker() {
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
compose_version () {
|
||||
@@ -230,6 +227,12 @@ start_docker() {
|
||||
echo "Starting docker service"
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
# if [[ -z $sudo_cmd ]]; then
|
||||
# docker ps > /dev/null && true
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# request_sudo
|
||||
# fi
|
||||
# fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
if ! docker ps > /dev/null && true; then
|
||||
request_sudo
|
||||
@@ -262,7 +265,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
@@ -293,6 +296,11 @@ request_sudo() {
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompted."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
# exit 1;
|
||||
# fi
|
||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
@@ -309,7 +317,6 @@ echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
docker_compose_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
@@ -355,8 +362,28 @@ else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
@@ -428,6 +455,8 @@ if [[ $desired_os -eq 0 ]]; then
|
||||
send_event "os_not_supported"
|
||||
fi
|
||||
|
||||
# check_ports_occupied
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
@@ -457,39 +486,27 @@ if ! is_command_present docker; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if has_docker_compose_plugin; then
|
||||
echo "docker compose plugin is present, using it"
|
||||
docker_compose_cmd="docker compose"
|
||||
# Install docker-compose
|
||||
else
|
||||
docker_compose_cmd="docker-compose"
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
|
||||
start_docker
|
||||
|
||||
# check for open ports, if signoz is not installed
|
||||
if is_command_present docker-compose; then
|
||||
if $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
|
||||
echo "SigNoz already installed, skipping the occupied ports check"
|
||||
else
|
||||
check_ports_occupied
|
||||
fi
|
||||
fi
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -499,7 +516,7 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
|
||||
echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
@@ -520,7 +537,7 @@ else
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/rs/cors"
|
||||
@@ -23,13 +29,15 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/http/middleware"
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
@@ -56,7 +64,6 @@ import (
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
@@ -103,26 +110,15 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
func NewServer(serverOptions *ServerOptions, config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
modelDao, err := dao.InitDao(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
baseexplorer.InitWithDB(signoz.SQLStore.SQLxDB())
|
||||
preferences.InitDB(signoz.SQLStore.SQLxDB())
|
||||
dashboards.InitDB(signoz.SQLStore.SQLxDB())
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
@@ -130,7 +126,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
lm, err := licensepkg.StartManager("sqlite", signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -144,7 +140,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
if storage == "clickhouse" {
|
||||
zap.L().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(
|
||||
localDB,
|
||||
signoz.SQLStore.SQLxDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.MaxIdleConns,
|
||||
@@ -180,7 +176,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
baseconst.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
signoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
@@ -193,20 +189,24 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
integrationsController, err := integrations.NewController(localDB)
|
||||
// initiate opamp
|
||||
_ = opAmpModel.InitDB(signoz.SQLStore.SQLxDB())
|
||||
|
||||
integrationsController, err := integrations.NewController(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(localDB)
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create cloud provider integrations controller: %w", err,
|
||||
@@ -215,7 +215,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
signoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -223,8 +223,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// initiate agent config handler
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: localDB,
|
||||
DBEngine: AppDbEngine,
|
||||
DB: signoz.SQLStore.SQLxDB(),
|
||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -286,7 +285,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
usageManager: usageManager,
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||
httpServer, err := s.createPublicServer(apiHandler, signoz.Web)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -312,17 +311,10 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
timeoutMiddleware := middleware.NewTimeout(zap.L(), baseconst.TimeoutExcludedRoutes, 60*time.Second, 600*time.Second)
|
||||
r.Use(timeoutMiddleware.Wrap)
|
||||
|
||||
analyticsMiddleware := middleware.NewAnalytics(zap.L())
|
||||
r.Use(analyticsMiddleware.Wrap)
|
||||
|
||||
loggingMiddleware := middleware.NewLogging(zap.L())
|
||||
r.Use(loggingMiddleware.Wrap)
|
||||
|
||||
logCommentMiddleware := middleware.NewLogComment(zap.L())
|
||||
r.Use(logCommentMiddleware.Wrap)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddlewarePrivate)
|
||||
r.Use(baseapp.LogCommentEnricher)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
@@ -362,17 +354,10 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
timeoutMiddleware := middleware.NewTimeout(zap.L(), baseconst.TimeoutExcludedRoutes, 60*time.Second, 600*time.Second)
|
||||
r.Use(timeoutMiddleware.Wrap)
|
||||
|
||||
analyticsMiddleware := middleware.NewAnalytics(zap.L())
|
||||
r.Use(analyticsMiddleware.Wrap)
|
||||
|
||||
loggingMiddleware := middleware.NewLogging(zap.L())
|
||||
r.Use(loggingMiddleware.Wrap)
|
||||
|
||||
logCommentMiddleware := middleware.NewLogComment(zap.L())
|
||||
r.Use(logCommentMiddleware.Wrap)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
r.Use(baseapp.LogCommentEnricher)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
@@ -404,6 +389,216 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||
// we default to that status code.
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Flush implements the http.Flush interface.
|
||||
func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Support websockets
|
||||
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h, ok := lrw.ResponseWriter.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("hijack not supported")
|
||||
}
|
||||
return h.Hijack()
|
||||
}
|
||||
|
||||
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||
|
||||
data := map[string]interface{}{}
|
||||
var postData *v3.QueryRangeParamsV3
|
||||
|
||||
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
|
||||
if r.Body != nil {
|
||||
bodyBytes, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
r.Body.Close() // must close
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
json.Unmarshal(bodyBytes, &postData)
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
referrer := r.Header.Get("Referer")
|
||||
|
||||
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the referrer", zap.Error(err))
|
||||
}
|
||||
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the alert: ", zap.Error(err))
|
||||
}
|
||||
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
|
||||
}
|
||||
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||
}
|
||||
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||
|
||||
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||
if queryInfoResult.MetricsUsed {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if queryInfoResult.LogsUsed {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
if queryInfoResult.TracesUsed {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||
data["queryType"] = queryInfoResult.QueryType
|
||||
data["panelType"] = queryInfoResult.PanelType
|
||||
|
||||
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
// switch case to set data["screen"] based on the referrer
|
||||
switch {
|
||||
case dashboardMatched:
|
||||
data["screen"] = "panel"
|
||||
case alertMatched:
|
||||
data["screen"] = "alert"
|
||||
case logsExplorerMatched:
|
||||
data["screen"] = "logs-explorer"
|
||||
case traceExplorerMatched:
|
||||
data["screen"] = "traces-explorer"
|
||||
default:
|
||||
data["screen"] = "unknown"
|
||||
return data, true
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
return data, true
|
||||
}
|
||||
|
||||
func getActiveLogs(path string, r *http.Request) {
|
||||
// if path == "/api/v1/dashboards/{uuid}" {
|
||||
// telemetry.GetInstance().AddActiveMetricsUser()
|
||||
// }
|
||||
if path == "/api/v1/logs" {
|
||||
hasFilters := len(r.URL.Query().Get("q"))
|
||||
if hasFilters > 0 {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := baseauth.AttachJwtToContext(r.Context(), r)
|
||||
r = r.WithContext(ctx)
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
queryRangeData, metadataExists := extractQueryRangeData(path, r)
|
||||
getActiveLogs(path, r)
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||
if metadataExists {
|
||||
for key, value := range queryRangeData {
|
||||
data[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
||||
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var cancel context.CancelFunc
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// initListeners initialises listeners of the server
|
||||
func (s *Server) initListeners() error {
|
||||
// listen on public port
|
||||
|
||||
@@ -1,18 +1,11 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
)
|
||||
|
||||
func InitDao(engine, path string) (ModelDao, error) {
|
||||
|
||||
switch engine {
|
||||
case "sqlite":
|
||||
return sqlite.InitDB(path)
|
||||
default:
|
||||
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
|
||||
}
|
||||
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
|
||||
return sqlite.InitDB(inputDB)
|
||||
|
||||
}
|
||||
|
||||
@@ -65,8 +65,8 @@ func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(dataSourceName)
|
||||
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(inputDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,86 +3,28 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
signozconfig "go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
"go.signoz.io/signoz/pkg/instrumentation"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
pkgversion "go.signoz.io/signoz/pkg/version"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||
defaultLogLevel := zapcore.InfoLevel
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("query-service"),
|
||||
)
|
||||
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
)
|
||||
|
||||
if enableQueryServiceLogOTLPExport {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to establish connection: %v", err)
|
||||
} else {
|
||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||
if err != nil {
|
||||
logExportBatchSizeInt = 512
|
||||
}
|
||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||
BatchSize: logExportBatchSizeInt,
|
||||
ResourceSchema: semconv.SchemaURL,
|
||||
Resource: res,
|
||||
}))
|
||||
core = zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
|
||||
)
|
||||
}
|
||||
}
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
|
||||
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
@@ -100,7 +42,6 @@ func main() {
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
@@ -121,37 +62,39 @@ func main() {
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
config, err := signoz.NewConfig(context.Background(), signozconfig.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
|
||||
instrumentation, err := instrumentation.New(context.Background(), pkgversion.Build{}, config.Instrumentation)
|
||||
if err != nil {
|
||||
fmt.Println(err, err.Error())
|
||||
zap.L().Fatal("Failed to create instrumentation", zap.Error(err))
|
||||
}
|
||||
defer instrumentation.Stop(context.Background())
|
||||
|
||||
zap.ReplaceGlobals(instrumentation.Logger())
|
||||
defer instrumentation.Logger().Sync() // flushes buffer, if any
|
||||
|
||||
signoz, err := signoz.New(context.Background(), instrumentation, config, signoz.NewProviderFactories())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
SigNoz: signoz,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
@@ -179,13 +122,7 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
server, err := app.NewServer(serverOptions, config, signoz)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import axios from 'api';
|
||||
import { ApiBaseInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -59,7 +59,7 @@ export const getHostLists = async (
|
||||
headers?: Record<string, string>,
|
||||
): Promise<SuccessResponse<HostListResponse> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/hosts/list', props, {
|
||||
const response = await ApiBaseInstance.post('/hosts/list', props, {
|
||||
signal,
|
||||
headers,
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import axios from 'api';
|
||||
import { ApiBaseInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -47,7 +47,7 @@ export const getK8sNodesList = async (
|
||||
headers?: Record<string, string>,
|
||||
): Promise<SuccessResponse<K8sNodesListResponse> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/nodes/list', props, {
|
||||
const response = await ApiBaseInstance.post('/nodes/list', props, {
|
||||
signal,
|
||||
headers,
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import axios from 'api';
|
||||
import { ApiBaseInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -75,7 +75,7 @@ export const getK8sPodsList = async (
|
||||
headers?: Record<string, string>,
|
||||
): Promise<SuccessResponse<K8sPodsListResponse> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/pods/list', props, {
|
||||
const response = await ApiBaseInstance.post('/pods/list', props, {
|
||||
signal,
|
||||
headers,
|
||||
});
|
||||
|
||||
@@ -219,14 +219,12 @@ function ListLogView({
|
||||
<LogStateIndicator type={logType} fontSize={fontSize} />
|
||||
<div>
|
||||
<LogContainer fontSize={fontSize}>
|
||||
{updatedSelecedFields.some((field) => field.name === 'body') && (
|
||||
<LogGeneralField
|
||||
fieldKey="Log"
|
||||
fieldValue={flattenLogData.body}
|
||||
linesPerRow={linesPerRow}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
)}
|
||||
<LogGeneralField
|
||||
fieldKey="Log"
|
||||
fieldValue={flattenLogData.body}
|
||||
linesPerRow={linesPerRow}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
{flattenLogData.stream && (
|
||||
<LogGeneralField
|
||||
fieldKey="Stream"
|
||||
@@ -234,27 +232,23 @@ function ListLogView({
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
)}
|
||||
{updatedSelecedFields.some((field) => field.name === 'timestamp') && (
|
||||
<LogGeneralField
|
||||
fieldKey="Timestamp"
|
||||
fieldValue={timestampValue}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
)}
|
||||
<LogGeneralField
|
||||
fieldKey="Timestamp"
|
||||
fieldValue={timestampValue}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
|
||||
{updatedSelecedFields
|
||||
.filter((field) => !['timestamp', 'body'].includes(field.name))
|
||||
.map((field) =>
|
||||
isValidLogField(flattenLogData[field.name] as never) ? (
|
||||
<LogSelectedField
|
||||
key={field.name}
|
||||
fieldKey={field.name}
|
||||
fieldValue={flattenLogData[field.name] as never}
|
||||
onAddToQuery={onAddToQuery}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
) : null,
|
||||
)}
|
||||
{updatedSelecedFields.map((field) =>
|
||||
isValidLogField(flattenLogData[field.name] as never) ? (
|
||||
<LogSelectedField
|
||||
key={field.name}
|
||||
fieldKey={field.name}
|
||||
fieldValue={flattenLogData[field.name] as never}
|
||||
onAddToQuery={onAddToQuery}
|
||||
fontSize={fontSize}
|
||||
/>
|
||||
) : null,
|
||||
)}
|
||||
</LogContainer>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -73,7 +73,6 @@ function RawLogView({
|
||||
);
|
||||
|
||||
const attributesValues = updatedSelecedFields
|
||||
.filter((field) => !['timestamp', 'body'].includes(field.name))
|
||||
.map((field) => flattenLogData[field.name])
|
||||
.filter((attribute) => {
|
||||
// loadash isEmpty doesnot work with numbers
|
||||
@@ -93,40 +92,19 @@ function RawLogView({
|
||||
const { formatTimezoneAdjustedTimestamp } = useTimezone();
|
||||
|
||||
const text = useMemo(() => {
|
||||
const parts = [];
|
||||
const date =
|
||||
typeof data.timestamp === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(data.timestamp, 'YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
data.timestamp / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
|
||||
// Check if timestamp is selected
|
||||
const showTimestamp = selectedFields.some(
|
||||
(field) => field.name === 'timestamp',
|
||||
);
|
||||
if (showTimestamp) {
|
||||
const date =
|
||||
typeof data.timestamp === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(
|
||||
data.timestamp,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
)
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
data.timestamp / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
parts.push(date);
|
||||
}
|
||||
|
||||
// Check if body is selected
|
||||
const showBody = selectedFields.some((field) => field.name === 'body');
|
||||
if (showBody) {
|
||||
parts.push(`${attributesText} ${data.body}`);
|
||||
} else {
|
||||
parts.push(attributesText);
|
||||
}
|
||||
|
||||
return parts.join(' | ');
|
||||
return `${date} | ${attributesText} ${data.body}`;
|
||||
}, [
|
||||
selectedFields,
|
||||
attributesText,
|
||||
data.timestamp,
|
||||
data.body,
|
||||
attributesText,
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
]);
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
||||
|
||||
const columns: ColumnsType<Record<string, unknown>> = useMemo(() => {
|
||||
const fieldColumns: ColumnsType<Record<string, unknown>> = fields
|
||||
.filter((e) => !['id', 'body', 'timestamp'].includes(e.name))
|
||||
.filter((e) => e.name !== 'id')
|
||||
.map(({ name }) => ({
|
||||
title: name,
|
||||
dataIndex: name,
|
||||
@@ -91,67 +91,55 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
||||
),
|
||||
}),
|
||||
},
|
||||
...(fields.some((field) => field.name === 'timestamp')
|
||||
? [
|
||||
{
|
||||
title: 'timestamp',
|
||||
dataIndex: 'timestamp',
|
||||
key: 'timestamp',
|
||||
// https://github.com/ant-design/ant-design/discussions/36886
|
||||
render: (
|
||||
field: string | number,
|
||||
): ColumnTypeRender<Record<string, unknown>> => {
|
||||
const date =
|
||||
typeof field === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(field, 'YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
field / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
return {
|
||||
children: (
|
||||
<div className="table-timestamp">
|
||||
<Typography.Paragraph ellipsis className={cx('text', fontSize)}>
|
||||
{date}
|
||||
</Typography.Paragraph>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
},
|
||||
},
|
||||
]
|
||||
: []),
|
||||
{
|
||||
title: 'timestamp',
|
||||
dataIndex: 'timestamp',
|
||||
key: 'timestamp',
|
||||
// https://github.com/ant-design/ant-design/discussions/36886
|
||||
render: (field): ColumnTypeRender<Record<string, unknown>> => {
|
||||
const date =
|
||||
typeof field === 'string'
|
||||
? formatTimezoneAdjustedTimestamp(field, 'YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: formatTimezoneAdjustedTimestamp(
|
||||
field / 1e6,
|
||||
'YYYY-MM-DD HH:mm:ss.SSS',
|
||||
);
|
||||
return {
|
||||
children: (
|
||||
<div className="table-timestamp">
|
||||
<Typography.Paragraph ellipsis className={cx('text', fontSize)}>
|
||||
{date}
|
||||
</Typography.Paragraph>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
},
|
||||
},
|
||||
...(appendTo === 'center' ? fieldColumns : []),
|
||||
...(fields.some((field) => field.name === 'body')
|
||||
? [
|
||||
{
|
||||
title: 'body',
|
||||
dataIndex: 'body',
|
||||
key: 'body',
|
||||
render: (
|
||||
field: string | number,
|
||||
): ColumnTypeRender<Record<string, unknown>> => ({
|
||||
props: {
|
||||
style: defaultTableStyle,
|
||||
},
|
||||
children: (
|
||||
<TableBodyContent
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(unescapeString(field as string), {
|
||||
FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS],
|
||||
}),
|
||||
),
|
||||
}}
|
||||
fontSize={fontSize}
|
||||
linesPerRow={linesPerRow}
|
||||
isDarkMode={isDarkMode}
|
||||
/>
|
||||
{
|
||||
title: 'body',
|
||||
dataIndex: 'body',
|
||||
key: 'body',
|
||||
render: (field): ColumnTypeRender<Record<string, unknown>> => ({
|
||||
props: {
|
||||
style: defaultTableStyle,
|
||||
},
|
||||
children: (
|
||||
<TableBodyContent
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(unescapeString(field), {
|
||||
FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS],
|
||||
}),
|
||||
),
|
||||
}),
|
||||
},
|
||||
]
|
||||
: []),
|
||||
}}
|
||||
fontSize={fontSize}
|
||||
linesPerRow={linesPerRow}
|
||||
isDarkMode={isDarkMode}
|
||||
/>
|
||||
),
|
||||
}),
|
||||
},
|
||||
...(appendTo === 'end' ? fieldColumns : []),
|
||||
];
|
||||
}, [
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import 'uplot/dist/uPlot.min.css';
|
||||
import './AnomalyAlertEvaluationView.styles.scss';
|
||||
|
||||
import { Checkbox, Input, Typography } from 'antd';
|
||||
import { Checkbox, Typography } from 'antd';
|
||||
import Search from 'antd/es/input/Search';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import useDebouncedFn from 'hooks/useDebouncedFunction';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
@@ -15,8 +16,6 @@ import uPlot from 'uplot';
|
||||
|
||||
import tooltipPlugin from './tooltipPlugin';
|
||||
|
||||
const { Search } = Input;
|
||||
|
||||
function UplotChart({
|
||||
data,
|
||||
options,
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import CreateAlertPage from 'pages/CreateAlert';
|
||||
import { MemoryRouter, Route } from 'react-router-dom';
|
||||
import { act, fireEvent, render } from 'tests/test-utils';
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
|
||||
import { ALERT_TYPE_TO_TITLE, ALERT_TYPE_URL_MAP } from './constants';
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: (): { pathname: string } => ({
|
||||
pathname: `${process.env.FRONTEND_API_ENDPOINT}${ROUTES.ALERTS_NEW}`,
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('uplot', () => {
|
||||
const paths = {
|
||||
spline: jest.fn(),
|
||||
bars: jest.fn(),
|
||||
};
|
||||
const uplotMock = jest.fn(() => ({
|
||||
paths,
|
||||
}));
|
||||
return {
|
||||
paths,
|
||||
default: uplotMock,
|
||||
};
|
||||
});
|
||||
|
||||
let mockWindowOpen: jest.Mock;
|
||||
|
||||
window.ResizeObserver =
|
||||
window.ResizeObserver ||
|
||||
jest.fn().mockImplementation(() => ({
|
||||
disconnect: jest.fn(),
|
||||
observe: jest.fn(),
|
||||
unobserve: jest.fn(),
|
||||
}));
|
||||
|
||||
function findLinkForAlertType(
|
||||
links: HTMLElement[],
|
||||
alertType: AlertTypes,
|
||||
): HTMLElement {
|
||||
const link = links.find(
|
||||
(el) =>
|
||||
el.closest('[data-testid]')?.getAttribute('data-testid') ===
|
||||
`alert-type-card-${alertType}`,
|
||||
);
|
||||
expect(link).toBeTruthy();
|
||||
return link as HTMLElement;
|
||||
}
|
||||
|
||||
function clickLinkAndVerifyRedirect(
|
||||
link: HTMLElement,
|
||||
expectedUrl: string,
|
||||
): void {
|
||||
fireEvent.click(link);
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(expectedUrl, '_blank');
|
||||
}
|
||||
describe('Alert rule documentation redirection', () => {
|
||||
let renderResult: ReturnType<typeof render>;
|
||||
|
||||
beforeAll(() => {
|
||||
mockWindowOpen = jest.fn();
|
||||
window.open = mockWindowOpen;
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
act(() => {
|
||||
renderResult = render(
|
||||
<MemoryRouter initialEntries={['/alerts/new']}>
|
||||
<Route path={ROUTES.ALERTS_NEW}>
|
||||
<CreateAlertPage />
|
||||
</Route>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should render alert type cards', () => {
|
||||
const { getByText, getAllByText } = renderResult;
|
||||
|
||||
// Check for the heading
|
||||
expect(getByText('choose_alert_type')).toBeInTheDocument();
|
||||
|
||||
// Check for alert type titles and descriptions
|
||||
Object.values(AlertTypes).forEach((alertType) => {
|
||||
const title = ALERT_TYPE_TO_TITLE[alertType];
|
||||
expect(getByText(title)).toBeInTheDocument();
|
||||
expect(getByText(`${title}_desc`)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
const clickHereLinks = getAllByText(
|
||||
'Click here to see how to create a sample alert.',
|
||||
);
|
||||
|
||||
expect(clickHereLinks).toHaveLength(5);
|
||||
});
|
||||
|
||||
it('should redirect to correct documentation for each alert type', () => {
|
||||
const { getAllByText } = renderResult;
|
||||
|
||||
const clickHereLinks = getAllByText(
|
||||
'Click here to see how to create a sample alert.',
|
||||
);
|
||||
const alertTypeCount = Object.keys(AlertTypes).length;
|
||||
|
||||
expect(clickHereLinks).toHaveLength(alertTypeCount);
|
||||
|
||||
Object.values(AlertTypes).forEach((alertType) => {
|
||||
const linkForAlertType = findLinkForAlertType(clickHereLinks, alertType);
|
||||
const expectedUrl = ALERT_TYPE_URL_MAP[alertType];
|
||||
|
||||
clickLinkAndVerifyRedirect(linkForAlertType, expectedUrl.selection);
|
||||
});
|
||||
|
||||
expect(mockWindowOpen).toHaveBeenCalledTimes(alertTypeCount);
|
||||
});
|
||||
|
||||
Object.values(AlertTypes)
|
||||
.filter((type) => type !== AlertTypes.ANOMALY_BASED_ALERT)
|
||||
.forEach((alertType) => {
|
||||
it(`should redirect to create alert page for ${alertType} and "Check an example alert" should redirect to the correct documentation`, () => {
|
||||
const { getByTestId, getByRole } = renderResult;
|
||||
|
||||
const alertTypeLink = getByTestId(`alert-type-card-${alertType}`);
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(alertTypeLink);
|
||||
});
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(
|
||||
getByRole('button', {
|
||||
name: /alert setup guide/i,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(
|
||||
ALERT_TYPE_URL_MAP[alertType].creation,
|
||||
'_blank',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,71 +0,0 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import CreateAlertPage from 'pages/CreateAlert';
|
||||
import { MemoryRouter, Route } from 'react-router-dom';
|
||||
import { act, fireEvent, render } from 'tests/test-utils';
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
|
||||
import { ALERT_TYPE_URL_MAP } from './constants';
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: (): { pathname: string; search: string } => ({
|
||||
pathname: `${process.env.FRONTEND_API_ENDPOINT}${ROUTES.ALERTS_NEW}`,
|
||||
search: 'ruleType=anomaly_rule',
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('uplot', () => {
|
||||
const paths = {
|
||||
spline: jest.fn(),
|
||||
bars: jest.fn(),
|
||||
};
|
||||
const uplotMock = jest.fn(() => ({
|
||||
paths,
|
||||
}));
|
||||
return {
|
||||
paths,
|
||||
default: uplotMock,
|
||||
};
|
||||
});
|
||||
|
||||
window.ResizeObserver =
|
||||
window.ResizeObserver ||
|
||||
jest.fn().mockImplementation(() => ({
|
||||
disconnect: jest.fn(),
|
||||
observe: jest.fn(),
|
||||
unobserve: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('Anomaly Alert Documentation Redirection', () => {
|
||||
let mockWindowOpen: jest.Mock;
|
||||
|
||||
beforeAll(() => {
|
||||
mockWindowOpen = jest.fn();
|
||||
window.open = mockWindowOpen;
|
||||
});
|
||||
|
||||
it('should handle anomaly alert documentation redirection correctly', () => {
|
||||
const { getByRole } = render(
|
||||
<MemoryRouter initialEntries={['/alerts/new']}>
|
||||
<Route path={ROUTES.ALERTS_NEW}>
|
||||
<CreateAlertPage />
|
||||
</Route>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
|
||||
const alertType = AlertTypes.ANOMALY_BASED_ALERT;
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(
|
||||
getByRole('button', {
|
||||
name: /alert setup guide/i,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(
|
||||
ALERT_TYPE_URL_MAP[alertType].creation,
|
||||
'_blank',
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1,47 +0,0 @@
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
|
||||
// since we don't have a card in alert creation for anomaly based alert
|
||||
|
||||
export const ALERT_TYPE_URL_MAP: Record<
|
||||
AlertTypes,
|
||||
{ selection: string; creation: string }
|
||||
> = {
|
||||
[AlertTypes.METRICS_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/metrics-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/metrics-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.LOGS_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/log-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/log-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.TRACES_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/trace-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/trace-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.EXCEPTIONS_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/exceptions-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/exceptions-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
[AlertTypes.ANOMALY_BASED_ALERT]: {
|
||||
selection:
|
||||
'https://signoz.io/docs/alerts-management/anomaly-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples',
|
||||
creation:
|
||||
'https://signoz.io/docs/alerts-management/anomaly-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
},
|
||||
};
|
||||
|
||||
export const ALERT_TYPE_TO_TITLE: Record<AlertTypes, string> = {
|
||||
[AlertTypes.METRICS_BASED_ALERT]: 'metric_based_alert',
|
||||
[AlertTypes.LOGS_BASED_ALERT]: 'log_based_alert',
|
||||
[AlertTypes.TRACES_BASED_ALERT]: 'traces_based_alert',
|
||||
[AlertTypes.EXCEPTIONS_BASED_ALERT]: 'exceptions_based_alert',
|
||||
[AlertTypes.ANOMALY_BASED_ALERT]: 'anomaly_based_alert',
|
||||
};
|
||||
@@ -39,6 +39,10 @@
|
||||
.ant-collapse-header {
|
||||
border-bottom: 1px solid var(--bg-slate-400);
|
||||
padding: 12px 8px;
|
||||
|
||||
&[aria-expanded='true'] {
|
||||
background: var(--bg-ink-400);
|
||||
}
|
||||
}
|
||||
|
||||
.ant-collapse-content-box {
|
||||
@@ -267,6 +271,8 @@
|
||||
|
||||
.group-by-label {
|
||||
min-width: max-content;
|
||||
|
||||
color: var(--bg-vanilla-100, #c0c1c3);
|
||||
font-size: 13px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
@@ -276,6 +282,7 @@
|
||||
border-radius: 2px 0px 0px 2px;
|
||||
border: 1px solid var(--bg-slate-400, #1d212d);
|
||||
border-right: none;
|
||||
background: var(--bg-ink-100, #16181d);
|
||||
border-top-right-radius: 0px;
|
||||
border-bottom-right-radius: 0px;
|
||||
|
||||
@@ -481,7 +488,7 @@
|
||||
.expanded-table-container {
|
||||
border: 1px solid var(--bg-ink-400);
|
||||
overflow-x: auto;
|
||||
padding-left: 48px;
|
||||
padding-left: 16px;
|
||||
|
||||
&::-webkit-scrollbar {
|
||||
width: 0.1rem;
|
||||
@@ -703,34 +710,8 @@
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
min-width: 140px !important;
|
||||
max-width: 140px !important;
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
&:has(.pod-name-header) {
|
||||
min-width: 250px !important;
|
||||
max-width: 250px !important;
|
||||
}
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
&:has(.med-col) {
|
||||
min-width: 180px !important;
|
||||
max-width: 180px !important;
|
||||
}
|
||||
}
|
||||
|
||||
.expanded-k8s-list-table {
|
||||
.ant-table-cell {
|
||||
min-width: 180px !important;
|
||||
max-width: 180px !important;
|
||||
}
|
||||
|
||||
.ant-table-row-expand-icon-cell {
|
||||
min-width: 30px !important;
|
||||
max-width: 30px !important;
|
||||
}
|
||||
min-width: 170px !important;
|
||||
max-width: 170px !important;
|
||||
}
|
||||
|
||||
.ant-table-row-expand-icon-cell {
|
||||
@@ -827,24 +808,6 @@
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.infra-monitoring-container {
|
||||
.k8s-list-table {
|
||||
.ant-table-expanded-row {
|
||||
&:hover {
|
||||
background: var(--bg-vanilla-100) !important;
|
||||
}
|
||||
|
||||
.ant-table-cell {
|
||||
background: var(--bg-vanilla-100) !important;
|
||||
}
|
||||
|
||||
.ant-table .ant-table-thead > tr > th {
|
||||
padding: 4px 16px !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.event-content-container {
|
||||
.ant-table {
|
||||
background: var(--bg-vanilla-100);
|
||||
@@ -868,11 +831,4 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.entity-group-header {
|
||||
.ant-tag {
|
||||
background-color: var(--bg-vanilla-300) !important;
|
||||
color: var(--bg-slate-400) !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
|
||||
import { Container, Workflow } from 'lucide-react';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { useState } from 'react';
|
||||
import { useCallback, useState } from 'react';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
import {
|
||||
@@ -24,7 +24,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
const [showFilters, setShowFilters] = useState(true);
|
||||
|
||||
const [selectedCategory, setSelectedCategory] = useState(K8sCategories.PODS);
|
||||
const [quickFiltersLastUpdated, setQuickFiltersLastUpdated] = useState(-1);
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
@@ -38,12 +37,14 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
entityVersion: '',
|
||||
});
|
||||
|
||||
const handleFilterChange = (query: Query): void => {
|
||||
// update the current query with the new filters
|
||||
// in infra monitoring k8s, we are using only one query, hence updating the 0th index of queryData
|
||||
handleChangeQueryData('filters', query.builder.queryData[0].filters);
|
||||
setQuickFiltersLastUpdated(Date.now());
|
||||
};
|
||||
const handleFilterChange = useCallback(
|
||||
(query: Query): void => {
|
||||
// update the current query with the new filters
|
||||
// in infra monitoring k8s, we are using only one query, hence updating the 0th index of queryData
|
||||
handleChangeQueryData('filters', query.builder.queryData[0].filters);
|
||||
},
|
||||
[handleChangeQueryData],
|
||||
);
|
||||
|
||||
const items: CollapseProps['items'] = [
|
||||
{
|
||||
@@ -261,8 +262,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
const handleCategoryChange = (key: string | string[]): void => {
|
||||
if (Array.isArray(key) && key.length > 0) {
|
||||
setSelectedCategory(key[0] as string);
|
||||
// Reset filters
|
||||
handleChangeQueryData('filters', { items: [], op: 'and' });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -303,7 +302,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
<K8sPodLists
|
||||
isFiltersVisible={showFilters}
|
||||
handleFilterVisibilityChange={handleFilterVisibilityChange}
|
||||
quickFiltersLastUpdated={quickFiltersLastUpdated}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -311,7 +309,6 @@ export default function InfraMonitoringK8s(): JSX.Element {
|
||||
<K8sNodesList
|
||||
isFiltersVisible={showFilters}
|
||||
handleFilterVisibilityChange={handleFilterVisibilityChange}
|
||||
quickFiltersLastUpdated={quickFiltersLastUpdated}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -7,7 +7,7 @@ import { Button, Input } from 'antd';
|
||||
import { GripVertical, TableColumnsSplit, X } from 'lucide-react';
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
|
||||
import { IEntityColumn } from '../utils';
|
||||
import { IPodColumn } from '../utils';
|
||||
|
||||
function K8sFiltersSidePanel({
|
||||
defaultAddedColumns,
|
||||
@@ -17,12 +17,12 @@ function K8sFiltersSidePanel({
|
||||
onAddColumn = () => {},
|
||||
onRemoveColumn = () => {},
|
||||
}: {
|
||||
defaultAddedColumns: IEntityColumn[];
|
||||
defaultAddedColumns: IPodColumn[];
|
||||
onClose: () => void;
|
||||
addedColumns?: IEntityColumn[];
|
||||
availableColumns?: IEntityColumn[];
|
||||
onAddColumn?: (column: IEntityColumn) => void;
|
||||
onRemoveColumn?: (column: IEntityColumn) => void;
|
||||
addedColumns?: IPodColumn[];
|
||||
availableColumns?: IPodColumn[];
|
||||
onAddColumn?: (column: IPodColumn) => void;
|
||||
onRemoveColumn?: (column: IPodColumn) => void;
|
||||
}): JSX.Element {
|
||||
const [searchValue, setSearchValue] = useState('');
|
||||
const sidePanelRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
@@ -12,7 +12,7 @@ import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
import { K8sCategory } from './constants';
|
||||
import K8sFiltersSidePanel from './K8sFiltersSidePanel/K8sFiltersSidePanel';
|
||||
import { IEntityColumn } from './utils';
|
||||
import { IPodColumn } from './utils';
|
||||
|
||||
interface K8sHeaderProps {
|
||||
selectedGroupBy: BaseAutocompleteData[];
|
||||
@@ -20,11 +20,11 @@ interface K8sHeaderProps {
|
||||
isLoadingGroupByFilters: boolean;
|
||||
handleFiltersChange: (value: IBuilderQuery['filters']) => void;
|
||||
handleGroupByChange: (value: IBuilderQuery['groupBy']) => void;
|
||||
defaultAddedColumns: IEntityColumn[];
|
||||
addedColumns?: IEntityColumn[];
|
||||
availableColumns?: IEntityColumn[];
|
||||
onAddColumn?: (column: IEntityColumn) => void;
|
||||
onRemoveColumn?: (column: IEntityColumn) => void;
|
||||
defaultAddedColumns: IPodColumn[];
|
||||
addedColumns?: IPodColumn[];
|
||||
availableColumns?: IPodColumn[];
|
||||
onAddColumn?: (column: IPodColumn) => void;
|
||||
onRemoveColumn?: (column: IPodColumn) => void;
|
||||
handleFilterVisibilityChange: () => void;
|
||||
isFiltersVisible: boolean;
|
||||
entity: K8sCategory;
|
||||
|
||||
@@ -45,11 +45,9 @@ import {
|
||||
function K8sNodesList({
|
||||
isFiltersVisible,
|
||||
handleFilterVisibilityChange,
|
||||
quickFiltersLastUpdated,
|
||||
}: {
|
||||
isFiltersVisible: boolean;
|
||||
handleFilterVisibilityChange: () => void;
|
||||
quickFiltersLastUpdated: number;
|
||||
}): JSX.Element {
|
||||
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
@@ -62,7 +60,7 @@ function K8sNodesList({
|
||||
const [orderBy, setOrderBy] = useState<{
|
||||
columnName: string;
|
||||
order: 'asc' | 'desc';
|
||||
} | null>({ columnName: 'cpu', order: 'desc' });
|
||||
} | null>(null);
|
||||
|
||||
const [selectedNodeUID, setselectedNodeUID] = useState<string | null>(null);
|
||||
|
||||
@@ -78,28 +76,12 @@ function K8sNodesList({
|
||||
{ value: string; label: string }[]
|
||||
>([]);
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
const queryFilters = useMemo(
|
||||
() =>
|
||||
currentQuery?.builder?.queryData[0]?.filters || {
|
||||
items: [],
|
||||
op: 'and',
|
||||
},
|
||||
[currentQuery?.builder?.queryData],
|
||||
);
|
||||
|
||||
// Reset pagination every time quick filters are changed
|
||||
useEffect(() => {
|
||||
setCurrentPage(1);
|
||||
}, [quickFiltersLastUpdated]);
|
||||
|
||||
const createFiltersForSelectedRowData = (
|
||||
selectedRowData: K8sNodesRowData,
|
||||
groupBy: IBuilderQuery['groupBy'],
|
||||
): IBuilderQuery['filters'] => {
|
||||
const baseFilters: IBuilderQuery['filters'] = {
|
||||
items: [...queryFilters.items],
|
||||
items: [],
|
||||
op: 'and',
|
||||
};
|
||||
|
||||
@@ -138,7 +120,6 @@ function K8sNodesList({
|
||||
end: Math.floor(maxTime / 1000000),
|
||||
orderBy,
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [minTime, maxTime, orderBy, selectedRowData, groupBy]);
|
||||
|
||||
const {
|
||||
@@ -152,6 +133,8 @@ function K8sNodesList({
|
||||
enabled: !!fetchGroupedByRowDataQuery && !!selectedRowData,
|
||||
});
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
const {
|
||||
data: groupByFiltersData,
|
||||
isLoading: isLoadingGroupByFilters,
|
||||
@@ -170,6 +153,15 @@ function K8sNodesList({
|
||||
K8sCategory.NODES,
|
||||
);
|
||||
|
||||
const queryFilters = useMemo(
|
||||
() =>
|
||||
currentQuery?.builder?.queryData[0]?.filters || {
|
||||
items: [],
|
||||
op: 'and',
|
||||
},
|
||||
[currentQuery?.builder?.queryData],
|
||||
);
|
||||
|
||||
const query = useMemo(() => {
|
||||
const baseQuery = getK8sNodesListQuery();
|
||||
const queryPayload = {
|
||||
@@ -316,7 +308,6 @@ function K8sNodesList({
|
||||
) : (
|
||||
<div className="expanded-table">
|
||||
<Table
|
||||
className="expanded-table-view"
|
||||
columns={nestedColumns as ColumnType<K8sNodesRowData>[]}
|
||||
dataSource={formattedGroupedByNodesData}
|
||||
pagination={false}
|
||||
@@ -391,6 +382,18 @@ function K8sNodesList({
|
||||
setselectedNodeUID(null);
|
||||
};
|
||||
|
||||
const showsNodesTable =
|
||||
!isError &&
|
||||
!isLoading &&
|
||||
!isFetching &&
|
||||
!(formattedNodesData.length === 0 && queryFilters.items.length > 0);
|
||||
|
||||
const showNoFilteredNodesMessage =
|
||||
!isFetching &&
|
||||
!isLoading &&
|
||||
formattedNodesData.length === 0 &&
|
||||
queryFilters.items.length > 0;
|
||||
|
||||
const handleGroupByChange = useCallback(
|
||||
(value: IBuilderQuery['groupBy']) => {
|
||||
const groupBy = [];
|
||||
@@ -439,53 +442,54 @@ function K8sNodesList({
|
||||
/>
|
||||
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
|
||||
|
||||
<Table
|
||||
className="k8s-list-table nodes-list-table"
|
||||
dataSource={isFetching || isLoading ? [] : formattedNodesData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
locale={{
|
||||
emptyText:
|
||||
isFetching || isLoading ? null : (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
{showNoFilteredNodesMessage && (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{(isFetching || isLoading) && <LoadingContainer />}
|
||||
|
||||
{showsNodesTable && (
|
||||
<Table
|
||||
className="k8s-list-table nodes-list-table"
|
||||
dataSource={isFetching || isLoading ? [] : formattedNodesData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
<NodeDetails
|
||||
node={selectedNodeData}
|
||||
isModalTimeSelection
|
||||
|
||||
@@ -155,7 +155,6 @@ export default function Events({
|
||||
id: event.data.id,
|
||||
key: event.data.id,
|
||||
resources_string: event.data.resources_string,
|
||||
attributes_string: event.data.attributes_string,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -175,9 +174,7 @@ export default function Events({
|
||||
}, [eventsData]);
|
||||
|
||||
const handleExpandRow = (record: EventDataType): JSX.Element => (
|
||||
<EventContents
|
||||
data={{ ...record.attributes_string, ...record.resources_string }}
|
||||
/>
|
||||
<EventContents data={record.resources_string} />
|
||||
);
|
||||
|
||||
const handlePrev = (): void => {
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
initialQueryState,
|
||||
} from 'constants/queryBuilder';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { filterDuplicateFilters } from 'container/InfraMonitoringK8s/entityDetailUtils';
|
||||
import {
|
||||
CustomTimeType,
|
||||
Time,
|
||||
@@ -98,9 +97,22 @@ function NodeDetails({
|
||||
op: '=',
|
||||
value: node?.meta.k8s_node_name || '',
|
||||
},
|
||||
{
|
||||
id: uuidv4(),
|
||||
key: {
|
||||
key: QUERY_KEYS.K8S_CLUSTER_NAME,
|
||||
dataType: DataTypes.String,
|
||||
type: 'resource',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
id: 'k8s_node_name--string--resource--false',
|
||||
},
|
||||
op: '=',
|
||||
value: node?.meta.k8s_cluster_name || '',
|
||||
},
|
||||
],
|
||||
}),
|
||||
[node?.meta.k8s_node_name],
|
||||
[node?.meta.k8s_node_name, node?.meta.k8s_cluster_name],
|
||||
);
|
||||
|
||||
const initialEventsFilters = useMemo(
|
||||
@@ -227,13 +239,11 @@ function NodeDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
@@ -256,14 +266,12 @@ function NodeDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_NODE_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_NODE_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
|
||||
@@ -64,7 +64,7 @@ export interface K8sNodesRowData {
|
||||
|
||||
const nodeGroupColumnConfig = {
|
||||
title: (
|
||||
<div className="column-header entity-group-header">
|
||||
<div className="column-header node-group-header">
|
||||
<Group size={14} /> NODE GROUP
|
||||
</div>
|
||||
),
|
||||
@@ -74,7 +74,6 @@ const nodeGroupColumnConfig = {
|
||||
width: 150,
|
||||
align: 'left',
|
||||
sorter: false,
|
||||
className: 'column entity-group-header',
|
||||
};
|
||||
|
||||
export const getK8sNodesListQuery = (): K8sNodesListPayload => ({
|
||||
@@ -87,7 +86,7 @@ export const getK8sNodesListQuery = (): K8sNodesListPayload => ({
|
||||
|
||||
const columnsConfig = [
|
||||
{
|
||||
title: <div className="column-header-left name-header">Node Name</div>,
|
||||
title: <div className="column-header-left">Node Name</div>,
|
||||
dataIndex: 'nodeName',
|
||||
key: 'nodeName',
|
||||
ellipsis: true,
|
||||
@@ -96,7 +95,7 @@ const columnsConfig = [
|
||||
align: 'left',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header-left name-header">Cluster Name</div>,
|
||||
title: <div className="column-header-left">Cluster Name</div>,
|
||||
dataIndex: 'clusterName',
|
||||
key: 'clusterName',
|
||||
ellipsis: true,
|
||||
|
||||
@@ -15,7 +15,6 @@ import get from 'api/browser/localstorage/get';
|
||||
import set from 'api/browser/localstorage/set';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { K8sPodsListPayload } from 'api/infraMonitoring/getK8sPodsList';
|
||||
import classNames from 'classnames';
|
||||
import { useGetK8sPodsList } from 'hooks/infraMonitoring/useGetK8sPodsList';
|
||||
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
@@ -39,7 +38,7 @@ import {
|
||||
formatDataForTable,
|
||||
getK8sPodsListColumns,
|
||||
getK8sPodsListQuery,
|
||||
IEntityColumn,
|
||||
IPodColumn,
|
||||
K8sPodsRowData,
|
||||
} from '../utils';
|
||||
import PodDetails from './PodDetails/PodDetails';
|
||||
@@ -48,11 +47,9 @@ import PodDetails from './PodDetails/PodDetails';
|
||||
function K8sPodsList({
|
||||
isFiltersVisible,
|
||||
handleFilterVisibilityChange,
|
||||
quickFiltersLastUpdated,
|
||||
}: {
|
||||
isFiltersVisible: boolean;
|
||||
handleFilterVisibilityChange: () => void;
|
||||
quickFiltersLastUpdated: number;
|
||||
}): JSX.Element {
|
||||
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
@@ -60,9 +57,9 @@ function K8sPodsList({
|
||||
|
||||
const [currentPage, setCurrentPage] = useState(1);
|
||||
|
||||
const [addedColumns, setAddedColumns] = useState<IEntityColumn[]>([]);
|
||||
const [addedColumns, setAddedColumns] = useState<IPodColumn[]>([]);
|
||||
|
||||
const [availableColumns, setAvailableColumns] = useState<IEntityColumn[]>(
|
||||
const [availableColumns, setAvailableColumns] = useState<IPodColumn[]>(
|
||||
defaultAvailableColumns,
|
||||
);
|
||||
|
||||
@@ -107,11 +104,6 @@ function K8sPodsList({
|
||||
K8sCategory.PODS, // infraMonitoringEntity
|
||||
);
|
||||
|
||||
// Reset pagination every time quick filters are changed
|
||||
useEffect(() => {
|
||||
setCurrentPage(1);
|
||||
}, [quickFiltersLastUpdated]);
|
||||
|
||||
useEffect(() => {
|
||||
const addedColumns = JSON.parse(get('k8sPodsAddedColumns') ?? '[]');
|
||||
|
||||
@@ -132,7 +124,7 @@ function K8sPodsList({
|
||||
const [orderBy, setOrderBy] = useState<{
|
||||
columnName: string;
|
||||
order: 'asc' | 'desc';
|
||||
} | null>({ columnName: 'cpu', order: 'desc' });
|
||||
} | null>(null);
|
||||
|
||||
const [selectedPodUID, setSelectedPodUID] = useState<string | null>(null);
|
||||
|
||||
@@ -170,7 +162,7 @@ function K8sPodsList({
|
||||
selectedRowData: K8sPodsRowData,
|
||||
): IBuilderQuery['filters'] => {
|
||||
const baseFilters: IBuilderQuery['filters'] = {
|
||||
items: [...query.filters.items],
|
||||
items: [],
|
||||
op: 'and',
|
||||
};
|
||||
|
||||
@@ -209,7 +201,6 @@ function K8sPodsList({
|
||||
end: Math.floor(maxTime / 1000000),
|
||||
orderBy,
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [minTime, maxTime, orderBy, selectedRowData]);
|
||||
|
||||
const {
|
||||
@@ -347,8 +338,20 @@ function K8sPodsList({
|
||||
setSelectedPodUID(null);
|
||||
};
|
||||
|
||||
const showPodsTable =
|
||||
!isError &&
|
||||
!isLoading &&
|
||||
!isFetching &&
|
||||
!(formattedPodsData.length === 0 && queryFilters.items.length > 0);
|
||||
|
||||
const showNoFilteredPodsMessage =
|
||||
!isFetching &&
|
||||
!isLoading &&
|
||||
formattedPodsData.length === 0 &&
|
||||
queryFilters.items.length > 0;
|
||||
|
||||
const handleAddColumn = useCallback(
|
||||
(column: IEntityColumn): void => {
|
||||
(column: IPodColumn): void => {
|
||||
setAddedColumns((prev) => [...prev, column]);
|
||||
|
||||
setAvailableColumns((prev) => prev.filter((c) => c.value !== column.value));
|
||||
@@ -375,7 +378,7 @@ function K8sPodsList({
|
||||
}, [groupByFiltersData]);
|
||||
|
||||
const handleRemoveColumn = useCallback(
|
||||
(column: IEntityColumn): void => {
|
||||
(column: IPodColumn): void => {
|
||||
setAddedColumns((prev) => prev.filter((c) => c.value !== column.value));
|
||||
|
||||
setAvailableColumns((prev) => [...prev, column]);
|
||||
@@ -502,54 +505,54 @@ function K8sPodsList({
|
||||
/>
|
||||
{isError && <Typography>{data?.error || 'Something went wrong'}</Typography>}
|
||||
|
||||
<Table
|
||||
className={classNames('k8s-list-table', {
|
||||
'expanded-k8s-list-table': isGroupedByAttribute,
|
||||
})}
|
||||
dataSource={isFetching || isLoading ? [] : formattedPodsData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
locale={{
|
||||
emptyText:
|
||||
isFetching || isLoading ? null : (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
{showNoFilteredPodsMessage && (
|
||||
<div className="no-filtered-hosts-message-container">
|
||||
<div className="no-filtered-hosts-message-content">
|
||||
<img
|
||||
src="/Icons/emptyState.svg"
|
||||
alt="thinking-emoji"
|
||||
className="empty-state-svg"
|
||||
/>
|
||||
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
<Typography.Text className="no-filtered-hosts-message">
|
||||
This query had no results. Edit your query and try again!
|
||||
</Typography.Text>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{(isFetching || isLoading) && <LoadingContainer />}
|
||||
|
||||
{showPodsTable && (
|
||||
<Table
|
||||
className="k8s-list-table"
|
||||
dataSource={isFetching || isLoading ? [] : formattedPodsData}
|
||||
columns={columns}
|
||||
pagination={{
|
||||
current: currentPage,
|
||||
pageSize,
|
||||
total: totalCount,
|
||||
showSizeChanger: false,
|
||||
hideOnSinglePage: true,
|
||||
}}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
tableLayout="fixed"
|
||||
onChange={handleTableChange}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
onClick: (): void => handleRowClick(record),
|
||||
className: 'clickable-row',
|
||||
})}
|
||||
expandable={{
|
||||
expandedRowRender: isGroupedByAttribute ? expandedRowRender : undefined,
|
||||
expandIcon: expandRowIconRenderer,
|
||||
expandedRowKeys,
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{selectedPodData && (
|
||||
<PodDetails
|
||||
|
||||
@@ -155,7 +155,6 @@ export default function Events({
|
||||
id: event.data.id,
|
||||
key: event.data.id,
|
||||
resources_string: event.data.resources_string,
|
||||
attributes_string: event.data.attributes_string,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -175,9 +174,7 @@ export default function Events({
|
||||
}, [eventsData]);
|
||||
|
||||
const handleExpandRow = (record: EventDataType): JSX.Element => (
|
||||
<EventContents
|
||||
data={{ ...record.attributes_string, ...record.resources_string }}
|
||||
/>
|
||||
<EventContents data={record.resources_string} />
|
||||
);
|
||||
|
||||
const handlePrev = (): void => {
|
||||
|
||||
@@ -13,7 +13,6 @@ import {
|
||||
initialQueryState,
|
||||
} from 'constants/queryBuilder';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { filterDuplicateFilters } from 'container/InfraMonitoringK8s/entityDetailUtils';
|
||||
import {
|
||||
CustomTimeType,
|
||||
Time,
|
||||
@@ -51,7 +50,7 @@ import { PodDetailProps } from './PodDetail.interfaces';
|
||||
import PodLogsDetailedView from './PodLogs/PodLogsDetailedView';
|
||||
import PodTraces from './PodTraces/PodTraces';
|
||||
|
||||
const TimeRangeOffset = 1000000000;
|
||||
const TimeRangeOffset = 1000000;
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
function PodDetails({
|
||||
@@ -102,6 +101,19 @@ function PodDetails({
|
||||
op: '=',
|
||||
value: pod?.meta.k8s_pod_name || '',
|
||||
},
|
||||
{
|
||||
id: uuidv4(),
|
||||
key: {
|
||||
key: QUERY_KEYS.K8S_CLUSTER_NAME,
|
||||
dataType: DataTypes.String,
|
||||
type: 'resource',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
id: 'k8s_pod_name--string--resource--false',
|
||||
},
|
||||
op: '=',
|
||||
value: pod?.meta.k8s_cluster_name || '',
|
||||
},
|
||||
{
|
||||
id: uuidv4(),
|
||||
key: {
|
||||
@@ -117,7 +129,11 @@ function PodDetails({
|
||||
},
|
||||
],
|
||||
}),
|
||||
[pod?.meta.k8s_namespace_name, pod?.meta.k8s_pod_name],
|
||||
[
|
||||
pod?.meta.k8s_cluster_name,
|
||||
pod?.meta.k8s_namespace_name,
|
||||
pod?.meta.k8s_pod_name,
|
||||
],
|
||||
);
|
||||
|
||||
const initialEventsFilters = useMemo(
|
||||
@@ -246,13 +262,11 @@ function PodDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...newFilters,
|
||||
...(paginationFilter ? [paginationFilter] : []),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
@@ -277,14 +291,12 @@ function PodDetails({
|
||||
|
||||
return {
|
||||
op: 'AND',
|
||||
items: filterDuplicateFilters(
|
||||
[
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_POD_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
),
|
||||
items: [
|
||||
...primaryFilters,
|
||||
...value.items.filter(
|
||||
(item) => item.key?.key !== QUERY_KEYS.K8S_POD_NAME,
|
||||
),
|
||||
].filter((item): item is TagFilterItem => item !== undefined),
|
||||
};
|
||||
});
|
||||
},
|
||||
|
||||
@@ -78,6 +78,8 @@ function PodTraces({
|
||||
[currentQuery],
|
||||
);
|
||||
|
||||
console.log({ updatedCurrentQuery });
|
||||
|
||||
const query = updatedCurrentQuery?.builder?.queryData[0] || null;
|
||||
|
||||
const { queryData: paginationQueryData } = useUrlQueryData<Pagination>(
|
||||
|
||||
@@ -100,13 +100,7 @@ export function getStrokeColorForLimitUtilization(value: number): string {
|
||||
export const getProgressBarText = (percent: number): React.ReactNode =>
|
||||
`${percent}%`;
|
||||
|
||||
export function EntityProgressBar({
|
||||
value,
|
||||
type,
|
||||
}: {
|
||||
value: number;
|
||||
type: 'request' | 'limit';
|
||||
}): JSX.Element {
|
||||
export function EntityProgressBar({ value }: { value: number }): JSX.Element {
|
||||
const percentage = Number((value * 100).toFixed(1));
|
||||
|
||||
return (
|
||||
@@ -116,11 +110,7 @@ export function EntityProgressBar({
|
||||
strokeLinecap="butt"
|
||||
size="small"
|
||||
status="normal"
|
||||
strokeColor={
|
||||
type === 'limit'
|
||||
? getStrokeColorForLimitUtilization(value)
|
||||
: getStrokeColorForRequestUtilization(value)
|
||||
}
|
||||
strokeColor={getStrokeColorForLimitUtilization(value)}
|
||||
className="progress-bar"
|
||||
showInfo={false}
|
||||
/>
|
||||
|
||||
@@ -150,8 +150,6 @@ export const PodsQuickFiltersConfig: IQuickFiltersConfig[] = [
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
aggregateOperator: 'noop',
|
||||
aggregateAttribute: 'k8s_pod_cpu_utilization',
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: false,
|
||||
},
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import { TagFilterItem } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
export const filterDuplicateFilters = (
|
||||
filters: TagFilterItem[],
|
||||
): TagFilterItem[] => {
|
||||
const uniqueFilters = [];
|
||||
const seenIds = new Set();
|
||||
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
for (const filter of filters) {
|
||||
if (!seenIds.has(filter.id)) {
|
||||
seenIds.add(filter.id);
|
||||
uniqueFilters.push(filter);
|
||||
}
|
||||
}
|
||||
|
||||
return uniqueFilters;
|
||||
};
|
||||
@@ -26,9 +26,16 @@ export interface IEntityColumn {
|
||||
canRemove: boolean;
|
||||
}
|
||||
|
||||
export interface IPodColumn {
|
||||
label: string;
|
||||
value: string;
|
||||
id: string;
|
||||
canRemove: boolean;
|
||||
}
|
||||
|
||||
const columnProgressBarClassName = 'column-progress-bar';
|
||||
|
||||
export const defaultAddedColumns: IEntityColumn[] = [
|
||||
export const defaultAddedColumns: IPodColumn[] = [
|
||||
{
|
||||
label: 'Pod name',
|
||||
value: 'podName',
|
||||
@@ -71,13 +78,12 @@ export const defaultAddedColumns: IEntityColumn[] = [
|
||||
id: 'memory',
|
||||
canRemove: false,
|
||||
},
|
||||
// TODO - Re-enable the column once backend issue is fixed
|
||||
// {
|
||||
// label: 'Restarts',
|
||||
// value: 'restarts',
|
||||
// id: 'restarts',
|
||||
// canRemove: false,
|
||||
// },
|
||||
{
|
||||
label: 'Restarts',
|
||||
value: 'restarts',
|
||||
id: 'restarts',
|
||||
canRemove: false,
|
||||
},
|
||||
];
|
||||
|
||||
export const defaultAvailableColumns = [
|
||||
@@ -125,7 +131,7 @@ export const getK8sPodsListQuery = (): K8sPodsListPayload => ({
|
||||
|
||||
const podGroupColumnConfig = {
|
||||
title: (
|
||||
<div className="column-header entity-group-header">
|
||||
<div className="column-header pod-group-header">
|
||||
<Group size={14} /> POD GROUP
|
||||
</div>
|
||||
),
|
||||
@@ -134,7 +140,7 @@ const podGroupColumnConfig = {
|
||||
ellipsis: true,
|
||||
width: 180,
|
||||
sorter: false,
|
||||
className: 'column entity-group-header',
|
||||
className: 'column column-pod-group',
|
||||
};
|
||||
|
||||
export const dummyColumnConfig = {
|
||||
@@ -154,11 +160,11 @@ const columnsConfig = [
|
||||
key: 'podName',
|
||||
width: 180,
|
||||
ellipsis: true,
|
||||
sorter: false,
|
||||
sorter: true,
|
||||
className: 'column column-pod-name',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header med-col">CPU Req Usage (%)</div>,
|
||||
title: <div className="column-header">CPU Req Usage (%)</div>,
|
||||
dataIndex: 'cpu_request',
|
||||
key: 'cpu_request',
|
||||
width: 180,
|
||||
@@ -168,7 +174,7 @@ const columnsConfig = [
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
{
|
||||
title: <div className="column-header med-col">CPU Limit Usage (%)</div>,
|
||||
title: <div className="column-header">CPU Limit Usage (%)</div>,
|
||||
dataIndex: 'cpu_limit',
|
||||
key: 'cpu_limit',
|
||||
width: 120,
|
||||
@@ -186,7 +192,7 @@ const columnsConfig = [
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
{
|
||||
title: <div className="column-heade med-col">Mem Req Usage (%)</div>,
|
||||
title: <div className="column-header">Mem Req Usage (%)</div>,
|
||||
dataIndex: 'memory_request',
|
||||
key: 'memory_request',
|
||||
width: 120,
|
||||
@@ -195,7 +201,7 @@ const columnsConfig = [
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
{
|
||||
title: <div className="column-header med-col">Mem Limit Usage (%)</div>,
|
||||
title: <div className="column-header">Mem Limit Usage (%)</div>,
|
||||
dataIndex: 'memory_limit',
|
||||
key: 'memory_limit',
|
||||
width: 120,
|
||||
@@ -213,21 +219,20 @@ const columnsConfig = [
|
||||
align: 'left',
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
// TODO - Re-enable the column once backend issue is fixed
|
||||
// {
|
||||
// title: (
|
||||
// <div className="column-header">
|
||||
// <Tooltip title="Container Restarts">Restarts</Tooltip>
|
||||
// </div>
|
||||
// ),
|
||||
// dataIndex: 'restarts',
|
||||
// key: 'restarts',
|
||||
// width: 40,
|
||||
// ellipsis: true,
|
||||
// sorter: true,
|
||||
// align: 'left',
|
||||
// className: `column ${columnProgressBarClassName}`,
|
||||
// },
|
||||
{
|
||||
title: (
|
||||
<div className="column-header">
|
||||
<Tooltip title="Container Restarts">Restarts</Tooltip>
|
||||
</div>
|
||||
),
|
||||
dataIndex: 'restarts',
|
||||
key: 'restarts',
|
||||
width: 40,
|
||||
ellipsis: true,
|
||||
sorter: true,
|
||||
align: 'left',
|
||||
className: `column ${columnProgressBarClassName}`,
|
||||
},
|
||||
];
|
||||
|
||||
export const namespaceColumnConfig = {
|
||||
@@ -246,7 +251,7 @@ export const nodeColumnConfig = {
|
||||
dataIndex: 'node',
|
||||
key: 'node',
|
||||
width: 100,
|
||||
sorter: false,
|
||||
sorter: true,
|
||||
ellipsis: true,
|
||||
align: 'left',
|
||||
className: 'column column-node',
|
||||
@@ -257,7 +262,7 @@ export const clusterColumnConfig = {
|
||||
dataIndex: 'cluster',
|
||||
key: 'cluster',
|
||||
width: 100,
|
||||
sorter: false,
|
||||
sorter: true,
|
||||
ellipsis: true,
|
||||
align: 'left',
|
||||
className: 'column column-cluster',
|
||||
@@ -270,7 +275,7 @@ export const columnConfigMap = {
|
||||
};
|
||||
|
||||
export const getK8sPodsListColumns = (
|
||||
addedColumns: IEntityColumn[],
|
||||
addedColumns: IPodColumn[],
|
||||
groupBy: IBuilderQuery['groupBy'],
|
||||
): ColumnType<K8sPodsRowData>[] => {
|
||||
const updatedColumnsConfig = [...columnsConfig];
|
||||
@@ -336,7 +341,7 @@ export const formatDataForTable = (
|
||||
attribute="CPU Request"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podCPURequest} type="request" />
|
||||
<EntityProgressBar value={pod.podCPURequest} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
@@ -347,7 +352,7 @@ export const formatDataForTable = (
|
||||
attribute="CPU Limit"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podCPULimit} type="limit" />
|
||||
<EntityProgressBar value={pod.podCPULimit} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
@@ -363,7 +368,7 @@ export const formatDataForTable = (
|
||||
attribute="Memory Request"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podMemoryRequest} type="request" />
|
||||
<EntityProgressBar value={pod.podMemoryRequest} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
@@ -374,7 +379,7 @@ export const formatDataForTable = (
|
||||
attribute="Memory Limit"
|
||||
>
|
||||
<div className="progress-container">
|
||||
<EntityProgressBar value={pod.podMemoryLimit} type="limit" />
|
||||
<EntityProgressBar value={pod.podMemoryLimit} />
|
||||
</div>
|
||||
</ValidateColumnValueWrapper>
|
||||
),
|
||||
|
||||
@@ -58,11 +58,7 @@ import { useTranslation } from 'react-i18next';
|
||||
import { useMutation } from 'react-query';
|
||||
import { useCopyToClipboard } from 'react-use';
|
||||
import { ErrorResponse } from 'types/api';
|
||||
import {
|
||||
AddLimitProps,
|
||||
LimitProps,
|
||||
UpdateLimitProps,
|
||||
} from 'types/api/ingestionKeys/limits/types';
|
||||
import { LimitProps } from 'types/api/ingestionKeys/limits/types';
|
||||
import {
|
||||
IngestionKeyProps,
|
||||
PaginationProps,
|
||||
@@ -73,18 +69,6 @@ const { Option } = Select;
|
||||
|
||||
const BYTES = 1073741824;
|
||||
|
||||
const COUNT_MULTIPLIER = {
|
||||
thousand: 1000,
|
||||
million: 1000000,
|
||||
billion: 1000000000,
|
||||
};
|
||||
|
||||
const SIGNALS_CONFIG = [
|
||||
{ name: 'logs', usesSize: true, usesCount: false },
|
||||
{ name: 'traces', usesSize: true, usesCount: false },
|
||||
{ name: 'metrics', usesSize: false, usesCount: true },
|
||||
];
|
||||
|
||||
// Using any type here because antd's DatePicker expects its own internal Dayjs type
|
||||
// which conflicts with our project's Dayjs type that has additional plugins (tz, utc etc).
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/explicit-module-boundary-types
|
||||
@@ -92,6 +76,8 @@ export const disabledDate = (current: any): boolean =>
|
||||
// Disable all dates before today
|
||||
current && current < dayjs().endOf('day');
|
||||
|
||||
const SIGNALS = ['logs', 'traces', 'metrics'];
|
||||
|
||||
export const showErrorNotification = (
|
||||
notifications: NotificationInstance,
|
||||
err: Error,
|
||||
@@ -115,31 +101,6 @@ export const API_KEY_EXPIRY_OPTIONS: ExpiryOption[] = [
|
||||
{ value: '0', label: 'No Expiry' },
|
||||
];
|
||||
|
||||
const countToUnit = (count: number): { value: number; unit: string } => {
|
||||
if (
|
||||
count >= COUNT_MULTIPLIER.billion ||
|
||||
count / COUNT_MULTIPLIER.million >= 1000
|
||||
) {
|
||||
return { value: count / COUNT_MULTIPLIER.billion, unit: 'billion' };
|
||||
}
|
||||
if (
|
||||
count >= COUNT_MULTIPLIER.million ||
|
||||
count / COUNT_MULTIPLIER.thousand >= 1000
|
||||
) {
|
||||
return { value: count / COUNT_MULTIPLIER.million, unit: 'million' };
|
||||
}
|
||||
if (count >= COUNT_MULTIPLIER.thousand) {
|
||||
return { value: count / COUNT_MULTIPLIER.thousand, unit: 'thousand' };
|
||||
}
|
||||
// Default to million for small numbers
|
||||
return { value: count / COUNT_MULTIPLIER.million, unit: 'million' };
|
||||
};
|
||||
|
||||
const countFromUnit = (value: number, unit: string): number =>
|
||||
value *
|
||||
(COUNT_MULTIPLIER[unit as keyof typeof COUNT_MULTIPLIER] ||
|
||||
COUNT_MULTIPLIER.million);
|
||||
|
||||
function MultiIngestionSettings(): JSX.Element {
|
||||
const { user } = useAppContext();
|
||||
const { notifications } = useNotifications();
|
||||
@@ -220,6 +181,7 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
|
||||
const showEditModal = (apiKey: IngestionKeyProps): void => {
|
||||
setActiveAPIKey(apiKey);
|
||||
|
||||
handleFormReset();
|
||||
setUpdatedTags(apiKey.tags || []);
|
||||
|
||||
@@ -462,90 +424,44 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
addEditLimitForm.resetFields();
|
||||
};
|
||||
|
||||
/* eslint-disable sonarjs/cognitive-complexity */
|
||||
const handleAddLimit = (
|
||||
APIKey: IngestionKeyProps,
|
||||
signalName: string,
|
||||
): void => {
|
||||
const {
|
||||
dailyLimit,
|
||||
secondsLimit,
|
||||
dailyCount,
|
||||
dailyCountUnit,
|
||||
secondsCount,
|
||||
secondsCountUnit,
|
||||
} = addEditLimitForm.getFieldsValue();
|
||||
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
|
||||
|
||||
const payload: AddLimitProps = {
|
||||
const payload = {
|
||||
keyID: APIKey.id,
|
||||
signal: signalName,
|
||||
config: {},
|
||||
};
|
||||
|
||||
const signalCfg = SIGNALS_CONFIG.find((cfg) => cfg.name === signalName);
|
||||
if (!signalCfg) return;
|
||||
|
||||
// Only set size if usesSize is true
|
||||
if (signalCfg.usesSize) {
|
||||
if (!isUndefined(dailyLimit)) {
|
||||
payload.config.day = {
|
||||
...payload.config.day,
|
||||
if (!isUndefined(dailyLimit)) {
|
||||
payload.config = {
|
||||
day: {
|
||||
size: gbToBytes(dailyLimit),
|
||||
};
|
||||
}
|
||||
if (!isUndefined(secondsLimit)) {
|
||||
payload.config.second = {
|
||||
...payload.config.second,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
if (!isUndefined(secondsLimit)) {
|
||||
payload.config = {
|
||||
...payload.config,
|
||||
second: {
|
||||
size: gbToBytes(secondsLimit),
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Only set count if usesCount is true
|
||||
if (signalCfg.usesCount) {
|
||||
if (!isUndefined(dailyCount)) {
|
||||
payload.config.day = {
|
||||
...payload.config.day,
|
||||
count: countFromUnit(dailyCount, dailyCountUnit || 'million'),
|
||||
};
|
||||
}
|
||||
if (!isUndefined(secondsCount)) {
|
||||
payload.config.second = {
|
||||
...payload.config.second,
|
||||
count: countFromUnit(secondsCount, secondsCountUnit || 'million'),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// If neither size nor count was given, skip
|
||||
const noSizeProvided =
|
||||
isUndefined(dailyLimit) && isUndefined(secondsLimit) && signalCfg.usesSize;
|
||||
const noCountProvided =
|
||||
isUndefined(dailyCount) && isUndefined(secondsCount) && signalCfg.usesCount;
|
||||
|
||||
if (
|
||||
signalCfg.usesSize &&
|
||||
signalCfg.usesCount &&
|
||||
noSizeProvided &&
|
||||
noCountProvided
|
||||
) {
|
||||
// Both size and count are effectively empty
|
||||
if (isUndefined(dailyLimit) && isUndefined(secondsLimit)) {
|
||||
// No need to save as no limit is provided, close the edit view and reset active signal and api key
|
||||
setActiveSignal(null);
|
||||
setActiveAPIKey(null);
|
||||
setIsEditAddLimitOpen(false);
|
||||
setUpdatedTags([]);
|
||||
hideAddViewModal();
|
||||
setHasCreateLimitForIngestionKeyError(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!signalCfg.usesSize && !signalCfg.usesCount) {
|
||||
// Edge case: If there's no count or size usage at all
|
||||
setActiveSignal(null);
|
||||
setActiveAPIKey(null);
|
||||
setIsEditAddLimitOpen(false);
|
||||
setUpdatedTags([]);
|
||||
hideAddViewModal();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -556,73 +472,44 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
APIKey: IngestionKeyProps,
|
||||
signal: LimitProps,
|
||||
): void => {
|
||||
const {
|
||||
dailyLimit,
|
||||
secondsLimit,
|
||||
dailyCount,
|
||||
dailyCountUnit,
|
||||
secondsCount,
|
||||
secondsCountUnit,
|
||||
} = addEditLimitForm.getFieldsValue();
|
||||
|
||||
const payload: UpdateLimitProps = {
|
||||
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
|
||||
const payload = {
|
||||
limitID: signal.id,
|
||||
signal: signal.signal,
|
||||
config: {},
|
||||
};
|
||||
|
||||
const signalCfg = SIGNALS_CONFIG.find((cfg) => cfg.name === signal.signal);
|
||||
if (!signalCfg) return;
|
||||
|
||||
const noSizeProvided =
|
||||
isUndefined(dailyLimit) && isUndefined(secondsLimit) && signalCfg.usesSize;
|
||||
const noCountProvided =
|
||||
isUndefined(dailyCount) && isUndefined(secondsCount) && signalCfg.usesCount;
|
||||
|
||||
// If the user cleared out all fields, remove the limit
|
||||
if (noSizeProvided && noCountProvided) {
|
||||
if (isUndefined(dailyLimit) && isUndefined(secondsLimit)) {
|
||||
showDeleteLimitModal(APIKey, signal);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (signalCfg.usesSize) {
|
||||
if (!isUndefined(dailyLimit)) {
|
||||
payload.config.day = {
|
||||
...payload.config.day,
|
||||
if (!isUndefined(dailyLimit)) {
|
||||
payload.config = {
|
||||
day: {
|
||||
size: gbToBytes(dailyLimit),
|
||||
};
|
||||
}
|
||||
if (!isUndefined(secondsLimit)) {
|
||||
payload.config.second = {
|
||||
...payload.config.second,
|
||||
size: gbToBytes(secondsLimit),
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
if (signalCfg.usesCount) {
|
||||
if (!isUndefined(dailyCount)) {
|
||||
payload.config.day = {
|
||||
...payload.config.day,
|
||||
count: countFromUnit(dailyCount, dailyCountUnit || 'million'),
|
||||
};
|
||||
}
|
||||
if (!isUndefined(secondsCount)) {
|
||||
payload.config.second = {
|
||||
...payload.config.second,
|
||||
count: countFromUnit(secondsCount, secondsCountUnit || 'million'),
|
||||
};
|
||||
}
|
||||
if (!isUndefined(secondsLimit)) {
|
||||
payload.config = {
|
||||
...payload.config,
|
||||
second: {
|
||||
size: gbToBytes(secondsLimit),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
updateLimitForIngestionKey(payload);
|
||||
};
|
||||
/* eslint-enable sonarjs/cognitive-complexity */
|
||||
|
||||
const bytesToGb = (size: number | undefined): number => {
|
||||
if (!size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return size / BYTES;
|
||||
};
|
||||
|
||||
@@ -630,12 +517,6 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
APIKey: IngestionKeyProps,
|
||||
signal: LimitProps,
|
||||
): void => {
|
||||
const dayCount = signal?.config?.day?.count;
|
||||
const secondCount = signal?.config?.second?.count;
|
||||
|
||||
const dayCountConverted = countToUnit(dayCount || 0);
|
||||
const secondCountConverted = countToUnit(secondCount || 0);
|
||||
|
||||
setActiveAPIKey(APIKey);
|
||||
setActiveSignal({
|
||||
...signal,
|
||||
@@ -643,14 +524,11 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
...signal.config,
|
||||
day: {
|
||||
...signal.config?.day,
|
||||
enabled:
|
||||
!isNil(signal?.config?.day?.size) || !isNil(signal?.config?.day?.count),
|
||||
enabled: !isNil(signal?.config?.day?.size),
|
||||
},
|
||||
second: {
|
||||
...signal.config?.second,
|
||||
enabled:
|
||||
!isNil(signal?.config?.second?.size) ||
|
||||
!isNil(signal?.config?.second?.count),
|
||||
enabled: !isNil(signal?.config?.second?.size),
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -658,22 +536,15 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
addEditLimitForm.setFieldsValue({
|
||||
dailyLimit: bytesToGb(signal?.config?.day?.size || 0),
|
||||
secondsLimit: bytesToGb(signal?.config?.second?.size || 0),
|
||||
enableDailyLimit:
|
||||
!isNil(signal?.config?.day?.size) || !isNil(signal?.config?.day?.count),
|
||||
enableSecondLimit:
|
||||
!isNil(signal?.config?.second?.size) ||
|
||||
!isNil(signal?.config?.second?.count),
|
||||
dailyCount: dayCountConverted.value,
|
||||
dailyCountUnit: dayCountConverted.unit,
|
||||
secondsCount: secondCountConverted.value,
|
||||
secondsCountUnit: secondCountConverted.unit,
|
||||
enableDailyLimit: !isNil(signal?.config?.day?.size),
|
||||
enableSecondLimit: !isNil(signal?.config?.second?.size),
|
||||
});
|
||||
|
||||
setIsEditAddLimitOpen(true);
|
||||
};
|
||||
|
||||
const onDeleteLimitHandler = (): void => {
|
||||
if (activeSignal && activeSignal.id) {
|
||||
if (activeSignal && activeSignal?.id) {
|
||||
deleteLimitForKey(activeSignal.id);
|
||||
}
|
||||
};
|
||||
@@ -701,13 +572,13 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
);
|
||||
|
||||
// Convert array of limits to a dictionary for quick access
|
||||
const limitsDict: Record<string, LimitProps> = {};
|
||||
APIKey.limits?.forEach((limitItem: LimitProps) => {
|
||||
limitsDict[limitItem.signal] = limitItem;
|
||||
const limits: { [key: string]: LimitProps } = {};
|
||||
|
||||
APIKey.limits?.forEach((limit: LimitProps) => {
|
||||
limits[limit.signal] = limit;
|
||||
});
|
||||
|
||||
const hasLimits = (signalName: string): boolean => !!limitsDict[signalName];
|
||||
const hasLimits = (signal: string): boolean => !!limits[signal];
|
||||
|
||||
const items: CollapseProps['items'] = [
|
||||
{
|
||||
@@ -743,9 +614,11 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
onClick={(e): void => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
|
||||
showEditModal(APIKey);
|
||||
}}
|
||||
/>
|
||||
|
||||
<Button
|
||||
className="periscope-btn ghost"
|
||||
icon={<Trash2 color={Color.BG_CHERRY_500} size={14} />}
|
||||
@@ -797,23 +670,18 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
|
||||
<div className="limits-data">
|
||||
<div className="signals">
|
||||
{SIGNALS_CONFIG.map((signalCfg) => {
|
||||
const signalName = signalCfg.name;
|
||||
const limit = limitsDict[signalName];
|
||||
|
||||
const hasValidDayLimit =
|
||||
limit?.config?.day?.size !== undefined ||
|
||||
limit?.config?.day?.count !== undefined;
|
||||
const hasValidSecondLimit =
|
||||
limit?.config?.second?.size !== undefined ||
|
||||
limit?.config?.second?.count !== undefined;
|
||||
{SIGNALS.map((signal) => {
|
||||
const hasValidDayLimit = !isNil(limits[signal]?.config?.day?.size);
|
||||
const hasValidSecondLimit = !isNil(
|
||||
limits[signal]?.config?.second?.size,
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="signal" key={signalName}>
|
||||
<div className="signal" key={signal}>
|
||||
<div className="header">
|
||||
<div className="signal-name">{signalName}</div>
|
||||
<div className="signal-name">{signal}</div>
|
||||
<div className="actions">
|
||||
{hasLimits(signalName) ? (
|
||||
{hasLimits(signal) ? (
|
||||
<>
|
||||
<Button
|
||||
className="periscope-btn ghost"
|
||||
@@ -822,9 +690,10 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
onClick={(e): void => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
enableEditLimitMode(APIKey, limit);
|
||||
enableEditLimitMode(APIKey, limits[signal]);
|
||||
}}
|
||||
/>
|
||||
|
||||
<Button
|
||||
className="periscope-btn ghost"
|
||||
icon={<Trash2 color={Color.BG_CHERRY_500} size={14} />}
|
||||
@@ -832,7 +701,7 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
onClick={(e): void => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
showDeleteLimitModal(APIKey, limit);
|
||||
showDeleteLimitModal(APIKey, limits[signal]);
|
||||
}}
|
||||
/>
|
||||
</>
|
||||
@@ -843,12 +712,14 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
shape="round"
|
||||
icon={<PlusIcon size={14} />}
|
||||
disabled={!!(activeAPIKey?.id === APIKey.id && activeSignal)}
|
||||
// eslint-disable-next-line sonarjs/no-identical-functions
|
||||
onClick={(e): void => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
|
||||
enableEditLimitMode(APIKey, {
|
||||
id: signalName,
|
||||
signal: signalName,
|
||||
id: signal,
|
||||
signal,
|
||||
config: {},
|
||||
});
|
||||
}}
|
||||
@@ -861,7 +732,7 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
|
||||
<div className="signal-limit-values">
|
||||
{activeAPIKey?.id === APIKey.id &&
|
||||
activeSignal?.signal === signalName &&
|
||||
activeSignal?.signal === signal &&
|
||||
isEditAddLimitOpen ? (
|
||||
<Form
|
||||
name="edit-ingestion-key-limit-form"
|
||||
@@ -869,8 +740,8 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
form={addEditLimitForm}
|
||||
autoComplete="off"
|
||||
initialValues={{
|
||||
dailyLimit: bytesToGb(limit?.config?.day?.size || 0),
|
||||
secondsLimit: bytesToGb(limit?.config?.second?.size || 0),
|
||||
dailyLimit: bytesToGb(limits[signal]?.config?.day?.size),
|
||||
secondsLimit: bytesToGb(limits[signal]?.config?.second?.size),
|
||||
}}
|
||||
className="edit-ingestion-key-limit-form"
|
||||
>
|
||||
@@ -885,20 +756,16 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
size="small"
|
||||
checked={activeSignal?.config?.day?.enabled}
|
||||
onChange={(value): void => {
|
||||
setActiveSignal((prev) =>
|
||||
prev
|
||||
? {
|
||||
...prev,
|
||||
config: {
|
||||
...prev.config,
|
||||
day: {
|
||||
...prev.config?.day,
|
||||
enabled: value,
|
||||
},
|
||||
},
|
||||
}
|
||||
: null,
|
||||
);
|
||||
setActiveSignal({
|
||||
...activeSignal,
|
||||
config: {
|
||||
...activeSignal.config,
|
||||
day: {
|
||||
...activeSignal.config?.day,
|
||||
enabled: value,
|
||||
},
|
||||
},
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Form.Item>
|
||||
@@ -908,87 +775,50 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
Add a limit for data ingested daily
|
||||
</div>
|
||||
</div>
|
||||
{signalCfg.usesSize && (
|
||||
<div className="size">
|
||||
{activeSignal?.config?.day?.enabled ? (
|
||||
<Form.Item name="dailyLimit" key="dailyLimit">
|
||||
<InputNumber
|
||||
disabled={!activeSignal?.config?.day?.enabled}
|
||||
addonAfter={
|
||||
<Select defaultValue="GiB" disabled>
|
||||
<Option value="TiB">TiB</Option>
|
||||
<Option value="GiB">GiB</Option>
|
||||
<Option value="MiB">MiB</Option>
|
||||
<Option value="KiB">KiB</Option>
|
||||
</Select>
|
||||
}
|
||||
/>
|
||||
</Form.Item>
|
||||
) : (
|
||||
<div className="no-limit">
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{signalCfg.usesCount && (
|
||||
<div className="count">
|
||||
{activeSignal?.config?.day?.enabled ? (
|
||||
<Form.Item name="dailyCount" key="dailyCount">
|
||||
<InputNumber
|
||||
placeholder="Enter max # of samples/day"
|
||||
addonAfter={
|
||||
<Form.Item
|
||||
name="dailyCountUnit"
|
||||
noStyle
|
||||
initialValue="million"
|
||||
>
|
||||
<Select
|
||||
style={{
|
||||
width: 90,
|
||||
}}
|
||||
>
|
||||
<Option value="thousand">Thousand</Option>
|
||||
<Option value="million">Million</Option>
|
||||
<Option value="billion">Billion</Option>
|
||||
</Select>
|
||||
</Form.Item>
|
||||
}
|
||||
/>
|
||||
</Form.Item>
|
||||
) : (
|
||||
<div className="no-limit">
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<div className="size">
|
||||
{activeSignal?.config?.day?.enabled ? (
|
||||
<Form.Item name="dailyLimit" key="dailyLimit">
|
||||
<InputNumber
|
||||
disabled={!activeSignal?.config?.day?.enabled}
|
||||
key="dailyLimit"
|
||||
addonAfter={
|
||||
<Select defaultValue="GiB" disabled>
|
||||
<Option value="TiB"> TiB</Option>
|
||||
<Option value="GiB"> GiB</Option>
|
||||
<Option value="MiB"> MiB </Option>
|
||||
<Option value="KiB"> KiB </Option>
|
||||
</Select>
|
||||
}
|
||||
/>
|
||||
</Form.Item>
|
||||
) : (
|
||||
<div className="no-limit">
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="second-limit">
|
||||
<div className="heading">
|
||||
<div className="title">
|
||||
Per Second limit
|
||||
Per Second limit{' '}
|
||||
<div className="limit-enable-disable-toggle">
|
||||
<Form.Item name="enableSecondLimit">
|
||||
<Switch
|
||||
size="small"
|
||||
checked={activeSignal?.config?.second?.enabled}
|
||||
onChange={(value): void => {
|
||||
setActiveSignal((prev) =>
|
||||
prev
|
||||
? {
|
||||
...prev,
|
||||
config: {
|
||||
...prev.config,
|
||||
second: {
|
||||
...prev.config?.second,
|
||||
enabled: value,
|
||||
},
|
||||
},
|
||||
}
|
||||
: null,
|
||||
);
|
||||
setActiveSignal({
|
||||
...activeSignal,
|
||||
config: {
|
||||
...activeSignal.config,
|
||||
second: {
|
||||
...activeSignal.config?.second,
|
||||
enabled: value,
|
||||
},
|
||||
},
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Form.Item>
|
||||
@@ -998,68 +828,37 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
Add a limit for data ingested every second
|
||||
</div>
|
||||
</div>
|
||||
{signalCfg.usesSize && (
|
||||
<div className="size">
|
||||
{activeSignal?.config?.second?.enabled ? (
|
||||
<Form.Item name="secondsLimit" key="secondsLimit">
|
||||
<InputNumber
|
||||
disabled={!activeSignal?.config?.second?.enabled}
|
||||
addonAfter={
|
||||
<Select defaultValue="GiB" disabled>
|
||||
<Option value="TiB">TiB</Option>
|
||||
<Option value="GiB">GiB</Option>
|
||||
<Option value="MiB">MiB</Option>
|
||||
<Option value="KiB">KiB</Option>
|
||||
</Select>
|
||||
}
|
||||
/>
|
||||
</Form.Item>
|
||||
) : (
|
||||
<div className="no-limit">
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{signalCfg.usesCount && (
|
||||
<div className="count">
|
||||
{activeSignal?.config?.second?.enabled ? (
|
||||
<Form.Item name="secondsCount" key="secondsCount">
|
||||
<InputNumber
|
||||
placeholder="Enter max # of samples/s"
|
||||
addonAfter={
|
||||
<Form.Item
|
||||
name="secondsCountUnit"
|
||||
noStyle
|
||||
initialValue="million"
|
||||
>
|
||||
<Select
|
||||
style={{
|
||||
width: 90,
|
||||
}}
|
||||
>
|
||||
<Option value="thousand">Thousand</Option>
|
||||
<Option value="million">Million</Option>
|
||||
<Option value="billion">Billion</Option>
|
||||
</Select>
|
||||
</Form.Item>
|
||||
}
|
||||
/>
|
||||
</Form.Item>
|
||||
) : (
|
||||
<div className="no-limit">
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="size">
|
||||
{activeSignal?.config?.second?.enabled ? (
|
||||
<Form.Item name="secondsLimit" key="secondsLimit">
|
||||
<InputNumber
|
||||
key="secondsLimit"
|
||||
disabled={!activeSignal?.config?.second?.enabled}
|
||||
addonAfter={
|
||||
<Select defaultValue="GiB" disabled>
|
||||
<Option value="TiB"> TiB</Option>
|
||||
<Option value="GiB"> GiB</Option>
|
||||
<Option value="MiB"> MiB </Option>
|
||||
<Option value="KiB"> KiB </Option>
|
||||
</Select>
|
||||
}
|
||||
/>
|
||||
</Form.Item>
|
||||
) : (
|
||||
<div className="no-limit">
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{activeAPIKey?.id === APIKey.id &&
|
||||
activeSignal.signal === signalName &&
|
||||
activeSignal.signal === signal &&
|
||||
!isLoadingLimitForKey &&
|
||||
hasCreateLimitForIngestionKeyError &&
|
||||
createLimitForIngestionKeyError &&
|
||||
createLimitForIngestionKeyError?.error && (
|
||||
<div className="error">
|
||||
{createLimitForIngestionKeyError?.error}
|
||||
@@ -1067,17 +866,17 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
)}
|
||||
|
||||
{activeAPIKey?.id === APIKey.id &&
|
||||
activeSignal.signal === signalName &&
|
||||
activeSignal.signal === signal &&
|
||||
!isLoadingLimitForKey &&
|
||||
hasUpdateLimitForIngestionKeyError &&
|
||||
updateLimitForIngestionKeyError?.error && (
|
||||
updateLimitForIngestionKeyError && (
|
||||
<div className="error">
|
||||
{updateLimitForIngestionKeyError?.error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{activeAPIKey?.id === APIKey.id &&
|
||||
activeSignal.signal === signalName &&
|
||||
activeSignal.signal === signal &&
|
||||
isEditAddLimitOpen && (
|
||||
<div className="signal-limit-save-discard">
|
||||
<Button
|
||||
@@ -1091,10 +890,10 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
isLoadingLimitForKey || isLoadingUpdatedLimitForKey
|
||||
}
|
||||
onClick={(): void => {
|
||||
if (!hasLimits(signalName)) {
|
||||
handleAddLimit(APIKey, signalName);
|
||||
if (!hasLimits(signal)) {
|
||||
handleAddLimit(APIKey, signal);
|
||||
} else {
|
||||
handleUpdateLimit(APIKey, limitsDict[signalName]);
|
||||
handleUpdateLimit(APIKey, limits[signal]);
|
||||
}
|
||||
}}
|
||||
>
|
||||
@@ -1116,99 +915,55 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
</Form>
|
||||
) : (
|
||||
<div className="signal-limit-view-mode">
|
||||
{/* DAILY limit usage/limit */}
|
||||
<div className="signal-limit-value">
|
||||
<div className="limit-type">
|
||||
Daily <Minus size={16} />
|
||||
Daily <Minus size={16} />{' '}
|
||||
</div>
|
||||
<div className="limit-value">
|
||||
{/* Size (if usesSize) */}
|
||||
{signalCfg.usesSize &&
|
||||
(hasValidDayLimit &&
|
||||
limit?.config?.day?.size !== undefined ? (
|
||||
<>
|
||||
{getYAxisFormattedValue(
|
||||
(limit?.metric?.day?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}{' '}
|
||||
/{' '}
|
||||
{getYAxisFormattedValue(
|
||||
(limit?.config?.day?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</>
|
||||
))}
|
||||
|
||||
{/* Count (if usesCount) */}
|
||||
{signalCfg.usesCount &&
|
||||
(limit?.config?.day?.count !== undefined ? (
|
||||
<div style={{ marginTop: 4 }}>
|
||||
{countToUnit(
|
||||
limit?.metric?.day?.count || 0,
|
||||
).value.toFixed(2)}{' '}
|
||||
{countToUnit(limit?.metric?.day?.count || 0).unit} /{' '}
|
||||
{countToUnit(
|
||||
limit?.config?.day?.count || 0,
|
||||
).value.toFixed(2)}{' '}
|
||||
{countToUnit(limit?.config?.day?.count || 0).unit}
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</>
|
||||
))}
|
||||
<div className="limit-value">
|
||||
{hasValidDayLimit ? (
|
||||
<>
|
||||
{getYAxisFormattedValue(
|
||||
(limits[signal]?.metric?.day?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}{' '}
|
||||
/{' '}
|
||||
{getYAxisFormattedValue(
|
||||
(limits[signal]?.config?.day?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* SECOND limit usage/limit */}
|
||||
<div className="signal-limit-value">
|
||||
<div className="limit-type">
|
||||
Seconds <Minus size={16} />
|
||||
</div>
|
||||
<div className="limit-value">
|
||||
{/* Size (if usesSize) */}
|
||||
{signalCfg.usesSize &&
|
||||
(hasValidSecondLimit &&
|
||||
limit?.config?.second?.size !== undefined ? (
|
||||
<>
|
||||
{getYAxisFormattedValue(
|
||||
(limit?.metric?.second?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}{' '}
|
||||
/{' '}
|
||||
{getYAxisFormattedValue(
|
||||
(limit?.config?.second?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</>
|
||||
))}
|
||||
|
||||
{/* Count (if usesCount) */}
|
||||
{signalCfg.usesCount &&
|
||||
(limit?.config?.second?.count !== undefined ? (
|
||||
<div style={{ marginTop: 4 }}>
|
||||
{countToUnit(
|
||||
limit?.metric?.second?.count || 0,
|
||||
).value.toFixed(2)}{' '}
|
||||
{countToUnit(limit?.metric?.second?.count || 0).unit} /{' '}
|
||||
{countToUnit(
|
||||
limit?.config?.second?.count || 0,
|
||||
).value.toFixed(2)}{' '}
|
||||
{countToUnit(limit?.config?.second?.count || 0).unit}
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</>
|
||||
))}
|
||||
<div className="limit-value">
|
||||
{hasValidSecondLimit ? (
|
||||
<>
|
||||
{getYAxisFormattedValue(
|
||||
(limits[signal]?.metric?.second?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}{' '}
|
||||
/{' '}
|
||||
{getYAxisFormattedValue(
|
||||
(limits[signal]?.config?.second?.size || 0).toString(),
|
||||
'bytes',
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Infinity size={16} /> NO LIMIT
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1278,6 +1033,7 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
className="learn-more"
|
||||
rel="noreferrer"
|
||||
>
|
||||
{' '}
|
||||
Learn more <ArrowUpRight size={14} />
|
||||
</a>
|
||||
</Typography.Text>
|
||||
|
||||
@@ -121,25 +121,23 @@ const InfinityTable = forwardRef<TableVirtuosoHandle, InfinityTableProps>(
|
||||
const tableHeader = useCallback(
|
||||
() => (
|
||||
<tr>
|
||||
{tableColumns
|
||||
.filter((column) => column.key)
|
||||
.map((column) => {
|
||||
const isDragColumn = column.key !== 'expand';
|
||||
{tableColumns.map((column) => {
|
||||
const isDragColumn = column.key !== 'expand';
|
||||
|
||||
return (
|
||||
<TableHeaderCellStyled
|
||||
$isLogIndicator={column.key === 'state-indicator'}
|
||||
$isDarkMode={isDarkMode}
|
||||
$isDragColumn={isDragColumn}
|
||||
key={column.key}
|
||||
fontSize={tableViewProps?.fontSize}
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...(isDragColumn && { className: 'dragHandler' })}
|
||||
>
|
||||
{(column.title as string).replace(/^\w/, (c) => c.toUpperCase())}
|
||||
</TableHeaderCellStyled>
|
||||
);
|
||||
})}
|
||||
return (
|
||||
<TableHeaderCellStyled
|
||||
$isLogIndicator={column.key === 'state-indicator'}
|
||||
$isDarkMode={isDarkMode}
|
||||
$isDragColumn={isDragColumn}
|
||||
key={column.key}
|
||||
fontSize={tableViewProps?.fontSize}
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...(isDragColumn && { className: 'dragHandler' })}
|
||||
>
|
||||
{(column.title as string).replace(/^\w/, (c) => c.toUpperCase())}
|
||||
</TableHeaderCellStyled>
|
||||
);
|
||||
})}
|
||||
</tr>
|
||||
),
|
||||
[tableColumns, isDarkMode, tableViewProps?.fontSize],
|
||||
|
||||
@@ -29,7 +29,7 @@ export const TableCellStyled = styled.td<TableHeaderCellStyledProps>`
|
||||
props.$isDarkMode ? 'inherit' : themeColors.whiteCream};
|
||||
|
||||
${({ $isLogIndicator }): string =>
|
||||
$isLogIndicator ? 'padding: 0 0 0 8px;width: 15px;' : ''}
|
||||
$isLogIndicator ? 'padding: 0 0 0 8px;' : ''}
|
||||
color: ${(props): string =>
|
||||
props.$isDarkMode ? themeColors.white : themeColors.bckgGrey};
|
||||
`;
|
||||
|
||||
@@ -5,26 +5,7 @@ import { FontSize, OptionsQuery } from './types';
|
||||
export const URL_OPTIONS = 'options';
|
||||
|
||||
export const defaultOptionsQuery: OptionsQuery = {
|
||||
selectColumns: [
|
||||
{
|
||||
key: 'timestamp',
|
||||
dataType: DataTypes.String,
|
||||
type: 'tag',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
id: 'timestamp--string--tag--true',
|
||||
isIndexed: false,
|
||||
},
|
||||
{
|
||||
key: 'body',
|
||||
dataType: DataTypes.String,
|
||||
type: 'tag',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
id: 'body--string--tag--true',
|
||||
isIndexed: false,
|
||||
},
|
||||
],
|
||||
selectColumns: [],
|
||||
maxLines: 2,
|
||||
format: 'raw',
|
||||
fontSize: FontSize.SMALL,
|
||||
|
||||
@@ -169,15 +169,6 @@ const useOptionsMenu = ({
|
||||
|
||||
const searchedAttributeKeys = useMemo(() => {
|
||||
if (searchedAttributesData?.payload?.attributeKeys?.length) {
|
||||
if (dataSource === DataSource.LOGS) {
|
||||
// add timestamp and body to the list of attributes
|
||||
return [
|
||||
...defaultOptionsQuery.selectColumns,
|
||||
...searchedAttributesData.payload.attributeKeys.filter(
|
||||
(attribute) => attribute.key !== 'body',
|
||||
),
|
||||
];
|
||||
}
|
||||
return searchedAttributesData.payload.attributeKeys;
|
||||
}
|
||||
if (dataSource === DataSource.TRACES) {
|
||||
@@ -207,17 +198,12 @@ const useOptionsMenu = ({
|
||||
);
|
||||
|
||||
const optionsFromAttributeKeys = useMemo(() => {
|
||||
const filteredAttributeKeys = searchedAttributeKeys.filter((item) => {
|
||||
// For other data sources, only filter out 'body' if it exists
|
||||
if (dataSource !== DataSource.LOGS) {
|
||||
return item.key !== 'body';
|
||||
}
|
||||
// For LOGS, keep all keys
|
||||
return true;
|
||||
});
|
||||
const filteredAttributeKeys = searchedAttributeKeys.filter(
|
||||
(item) => item.key !== 'body',
|
||||
);
|
||||
|
||||
return getOptionsFromKeys(filteredAttributeKeys, selectedColumnKeys);
|
||||
}, [dataSource, searchedAttributeKeys, selectedColumnKeys]);
|
||||
}, [searchedAttributeKeys, selectedColumnKeys]);
|
||||
|
||||
const handleRedirectWithOptionsData = useCallback(
|
||||
(newQueryData: OptionsQuery) => {
|
||||
|
||||
@@ -95,7 +95,6 @@ function QueryBuilderSearch({
|
||||
isMulti,
|
||||
isFetching,
|
||||
setSearchKey,
|
||||
setSearchValue,
|
||||
searchKey,
|
||||
key,
|
||||
exampleQueries,
|
||||
@@ -146,11 +145,7 @@ function QueryBuilderSearch({
|
||||
|
||||
const tagEditHandler = (value: string): void => {
|
||||
updateTag(value);
|
||||
if (isInfraMonitoring) {
|
||||
setSearchValue(value);
|
||||
} else {
|
||||
handleSearch(value);
|
||||
}
|
||||
handleSearch(value);
|
||||
};
|
||||
|
||||
const isDisabled = !!searchValue;
|
||||
|
||||
@@ -153,7 +153,6 @@ export const useAutoComplete = (
|
||||
isMulti,
|
||||
isFetching,
|
||||
setSearchKey,
|
||||
setSearchValue,
|
||||
searchKey,
|
||||
key,
|
||||
exampleQueries,
|
||||
@@ -173,7 +172,6 @@ interface IAutoComplete {
|
||||
isMulti: boolean;
|
||||
isFetching: boolean;
|
||||
setSearchKey: (value: string) => void;
|
||||
setSearchValue: (value: string) => void;
|
||||
searchKey: string;
|
||||
key: string;
|
||||
exampleQueries: TagFilter[];
|
||||
|
||||
@@ -8,7 +8,7 @@ import RouteTab from 'components/RouteTab';
|
||||
import Spinner from 'components/Spinner';
|
||||
import ROUTES from 'constants/routes';
|
||||
import history from 'lib/history';
|
||||
import { useEffect, useMemo } from 'react';
|
||||
import { useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
|
||||
@@ -80,11 +80,6 @@ function AlertDetails(): JSX.Element {
|
||||
alertDetailsResponse,
|
||||
} = useGetAlertRuleDetails();
|
||||
|
||||
useEffect(() => {
|
||||
const alertTitle = alertDetailsResponse?.payload?.data.alert;
|
||||
document.title = alertTitle || document.title;
|
||||
}, [alertDetailsResponse?.payload?.data.alert, isRefetching]);
|
||||
|
||||
if (
|
||||
isError ||
|
||||
!isValidRuleId ||
|
||||
|
||||
@@ -5,12 +5,12 @@ import { TabRoutes } from 'components/RouteTab/types';
|
||||
import history from 'lib/history';
|
||||
import { useLocation } from 'react-use';
|
||||
|
||||
import { Hosts, Kubernetes } from './constants';
|
||||
import { Hosts } from './constants';
|
||||
|
||||
export default function InfrastructureMonitoringPage(): JSX.Element {
|
||||
const { pathname } = useLocation();
|
||||
|
||||
const routes: TabRoutes[] = [Hosts, Kubernetes];
|
||||
const routes: TabRoutes[] = [Hosts];
|
||||
|
||||
return (
|
||||
<div className="infra-monitoring-module-container">
|
||||
|
||||
@@ -4,7 +4,6 @@ import NotFound from 'components/NotFound';
|
||||
import Spinner from 'components/Spinner';
|
||||
import NewDashboard from 'container/NewDashboard';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import { useEffect } from 'react';
|
||||
import { ErrorType } from 'types/common';
|
||||
|
||||
function DashboardPage(): JSX.Element {
|
||||
@@ -18,11 +17,6 @@ function DashboardPage(): JSX.Element {
|
||||
(dashboardResponse?.error as AxiosError)?.response?.data?.errorType
|
||||
: 'Something went wrong';
|
||||
|
||||
useEffect(() => {
|
||||
const dashboardTitle = dashboardResponse.data?.data.title;
|
||||
document.title = dashboardTitle || document.title;
|
||||
}, [dashboardResponse.data?.data.title, isFetching]);
|
||||
|
||||
if (isError && !isFetching && errorMessage === ErrorType.NotFound) {
|
||||
return <NotFound />;
|
||||
}
|
||||
|
||||
@@ -1,14 +1,3 @@
|
||||
export interface LimitConfig {
|
||||
size?: number;
|
||||
count?: number; // mainly used for metrics
|
||||
enabled?: boolean;
|
||||
}
|
||||
|
||||
export interface LimitSettings {
|
||||
day?: LimitConfig;
|
||||
second?: LimitConfig;
|
||||
}
|
||||
|
||||
export interface LimitProps {
|
||||
id: string;
|
||||
signal: string;
|
||||
@@ -16,20 +5,56 @@ export interface LimitProps {
|
||||
key_id?: string;
|
||||
created_at?: string;
|
||||
updated_at?: string;
|
||||
config?: LimitSettings;
|
||||
metric?: LimitSettings;
|
||||
config?: {
|
||||
day?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
second?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
};
|
||||
metric?: {
|
||||
day?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
second?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
export interface AddLimitProps {
|
||||
keyID: string;
|
||||
signal: string;
|
||||
config: LimitSettings;
|
||||
config: {
|
||||
day?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
second?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
export interface UpdateLimitProps {
|
||||
limitID: string;
|
||||
signal: string;
|
||||
config: LimitSettings;
|
||||
config: {
|
||||
day?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
second?: {
|
||||
size?: number;
|
||||
enabled?: boolean;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
export interface LimitSuccessProps {
|
||||
|
||||
6
go.mod
6
go.mod
@@ -9,8 +9,6 @@ require (
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
github.com/auth0/go-jwt-middleware v1.0.1
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
@@ -32,7 +30,7 @@ require (
|
||||
github.com/knadh/koanf v1.5.0
|
||||
github.com/knadh/koanf/v2 v2.1.1
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/oklog/oklog v0.3.2
|
||||
github.com/open-telemetry/opamp-go v0.5.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0
|
||||
@@ -69,7 +67,6 @@ require (
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/text v0.21.0
|
||||
google.golang.org/grpc v1.67.1
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
@@ -226,6 +223,7 @@ require (
|
||||
google.golang.org/api v0.199.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
k8s.io/client-go v0.31.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -70,12 +70,6 @@ github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PU
|
||||
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
|
||||
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974/go.mod h1:fpiHtiboLJpIE5TtkQfiWx6xtnlA+uWmv+N9opETqKY=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 h1:G2JzCrqdeOTtAn4tDFZEg5gCAEYVRXcddG3ZlrFMumo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974/go.mod h1:YtDal1xBRQfPRNo7iSU3W37RGT0jMW7Rnzk6EON3a4M=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -521,8 +515,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
go_cache "github.com/patrickmn/go-cache"
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
cc *go_cache.Cache
|
||||
type memory struct {
|
||||
cc *gocache.Cache
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
|
||||
@@ -20,16 +20,16 @@ func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) {
|
||||
return &provider{cc: go_cache.New(config.Memory.TTL, config.Memory.CleanupInterval)}, nil
|
||||
return &memory{cc: gocache.New(config.Memory.TTL, config.Memory.CleanupInterval)}, nil
|
||||
}
|
||||
|
||||
// Connect does nothing
|
||||
func (c *provider) Connect(_ context.Context) error {
|
||||
func (c *memory) Connect(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store stores the data in the cache
|
||||
func (c *provider) Store(_ context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
|
||||
func (c *memory) Store(_ context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
|
||||
// check if the data being passed is a pointer and is not nil
|
||||
rv := reflect.ValueOf(data)
|
||||
if rv.Kind() != reflect.Pointer || rv.IsNil() {
|
||||
@@ -41,7 +41,7 @@ func (c *provider) Store(_ context.Context, cacheKey string, data cache.Cacheabl
|
||||
}
|
||||
|
||||
// Retrieve retrieves the data from the cache
|
||||
func (c *provider) Retrieve(_ context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
|
||||
func (c *memory) Retrieve(_ context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
|
||||
// check if the destination being passed is a pointer and is not nil
|
||||
dstv := reflect.ValueOf(dest)
|
||||
if dstv.Kind() != reflect.Pointer || dstv.IsNil() {
|
||||
@@ -70,7 +70,7 @@ func (c *provider) Retrieve(_ context.Context, cacheKey string, dest cache.Cache
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL for the cache entry
|
||||
func (c *provider) SetTTL(_ context.Context, cacheKey string, ttl time.Duration) {
|
||||
func (c *memory) SetTTL(_ context.Context, cacheKey string, ttl time.Duration) {
|
||||
item, found := c.cc.Get(cacheKey)
|
||||
if !found {
|
||||
return
|
||||
@@ -79,23 +79,23 @@ func (c *provider) SetTTL(_ context.Context, cacheKey string, ttl time.Duration)
|
||||
}
|
||||
|
||||
// Remove removes the cache entry
|
||||
func (c *provider) Remove(_ context.Context, cacheKey string) {
|
||||
func (c *memory) Remove(_ context.Context, cacheKey string) {
|
||||
c.cc.Delete(cacheKey)
|
||||
}
|
||||
|
||||
// BulkRemove removes the cache entries
|
||||
func (c *provider) BulkRemove(_ context.Context, cacheKeys []string) {
|
||||
func (c *memory) BulkRemove(_ context.Context, cacheKeys []string) {
|
||||
for _, cacheKey := range cacheKeys {
|
||||
c.cc.Delete(cacheKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (c *provider) Close(_ context.Context) error {
|
||||
func (c *memory) Close(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration returns the cache configuration
|
||||
func (c *provider) Configuration() *cache.Memory {
|
||||
func (c *memory) Configuration() *cache.Memory {
|
||||
return nil
|
||||
}
|
||||
@@ -8,20 +8,20 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory/providertest"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
// TestNew tests the New function
|
||||
func TestNew(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, c)
|
||||
assert.NotNil(t, c.(*provider).cc)
|
||||
assert.NotNil(t, c.(*memory).cc)
|
||||
assert.NoError(t, c.Connect(context.Background()))
|
||||
}
|
||||
|
||||
@@ -56,11 +56,11 @@ func (dce DCacheableEntity) UnmarshalBinary(data []byte) error {
|
||||
// TestStore tests the Store function
|
||||
// this should fail because of nil pointer error
|
||||
func TestStoreWithNilPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
var storeCacheableEntity *CacheableEntity
|
||||
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
@@ -68,22 +68,22 @@ func TestStoreWithNilPointer(t *testing.T) {
|
||||
|
||||
// this should fail because of no pointer error
|
||||
func TestStoreWithStruct(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
var storeCacheableEntity CacheableEntity
|
||||
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
}
|
||||
|
||||
func TestStoreWithNonNilPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -95,11 +95,11 @@ func TestStoreWithNonNilPointer(t *testing.T) {
|
||||
|
||||
// TestRetrieve tests the Retrieve function
|
||||
func TestRetrieveWithNilPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -112,15 +112,15 @@ func TestRetrieveWithNilPointer(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError)
|
||||
}
|
||||
|
||||
func TestRetrieveWitNonPointer(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -133,15 +133,15 @@ func TestRetrieveWitNonPointer(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError)
|
||||
}
|
||||
|
||||
func TestRetrieveWithDifferentTypes(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -153,15 +153,11 @@ func TestRetrieveWithDifferentTypes(t *testing.T) {
|
||||
retrieveCacheableEntity := new(DCacheableEntity)
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError)
|
||||
}
|
||||
|
||||
func TestRetrieveWithSameTypes(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: _cache.Memory{TTL: 10 * time.Second, CleanupInterval: 10 * time.Second}})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -173,13 +169,13 @@ func TestRetrieveWithSameTypes(t *testing.T) {
|
||||
retrieveCacheableEntity := new(CacheableEntity)
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit)
|
||||
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
|
||||
}
|
||||
|
||||
// TestSetTTL tests the SetTTL function
|
||||
func TestSetTTL(t *testing.T) {
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second}})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: _cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second}})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -191,7 +187,7 @@ func TestSetTTL(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
|
||||
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 2*time.Second))
|
||||
@@ -199,18 +195,17 @@ func TestSetTTL(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
retrieveStatus, err = c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveCacheableEntity, storeCacheableEntity)
|
||||
}
|
||||
|
||||
// TestRemove tests the Remove function
|
||||
func TestRemove(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
Value: 1,
|
||||
@@ -222,17 +217,17 @@ func TestRemove(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
}
|
||||
|
||||
// TestBulkRemove tests the BulkRemove function
|
||||
func TestBulkRemove(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -246,22 +241,22 @@ func TestBulkRemove(t *testing.T) {
|
||||
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key1", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
|
||||
retrieveStatus, err = c.Retrieve(context.Background(), "key2", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss)
|
||||
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
|
||||
}
|
||||
|
||||
// TestCache tests the cache
|
||||
func TestCache(t *testing.T) {
|
||||
opts := cache.Memory{
|
||||
opts := _cache.Memory{
|
||||
TTL: 10 * time.Second,
|
||||
CleanupInterval: 10 * time.Second,
|
||||
}
|
||||
c, err := New(context.Background(), providertest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
|
||||
c, err := New(context.Background(), factory.ProviderSettings{}, _cache.Config{Provider: "memory", Memory: opts})
|
||||
require.NoError(t, err)
|
||||
storeCacheableEntity := &CacheableEntity{
|
||||
Key: "some-random-key",
|
||||
@@ -272,7 +267,7 @@ func TestCache(t *testing.T) {
|
||||
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
|
||||
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
|
||||
assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit)
|
||||
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
|
||||
c.Remove(context.Background(), "key")
|
||||
}
|
||||
@@ -7,31 +7,31 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
_cache "go.signoz.io/signoz/pkg/cache"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
type cache struct {
|
||||
client *redis.Client
|
||||
opts cache.Redis
|
||||
opts _cache.Redis
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
|
||||
func NewFactory() factory.ProviderFactory[_cache.Cache, _cache.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("redis"), New)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) {
|
||||
return &provider{opts: config.Redis}, nil
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config _cache.Config) (_cache.Cache, error) {
|
||||
return &cache{opts: config.Redis}, nil
|
||||
}
|
||||
|
||||
// WithClient creates a new cache with the given client
|
||||
func WithClient(client *redis.Client) *provider {
|
||||
return &provider{client: client}
|
||||
func WithClient(client *redis.Client) *cache {
|
||||
return &cache{client: client}
|
||||
}
|
||||
|
||||
// Connect connects to the redis server
|
||||
func (c *provider) Connect(_ context.Context) error {
|
||||
func (c *cache) Connect(_ context.Context) error {
|
||||
c.client = redis.NewClient(&redis.Options{
|
||||
Addr: fmt.Sprintf("%s:%d", c.opts.Host, c.opts.Port),
|
||||
Password: c.opts.Password,
|
||||
@@ -41,24 +41,24 @@ func (c *provider) Connect(_ context.Context) error {
|
||||
}
|
||||
|
||||
// Store stores the data in the cache
|
||||
func (c *provider) Store(ctx context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
|
||||
func (c *cache) Store(ctx context.Context, cacheKey string, data _cache.CacheableEntity, ttl time.Duration) error {
|
||||
return c.client.Set(ctx, cacheKey, data, ttl).Err()
|
||||
}
|
||||
|
||||
// Retrieve retrieves the data from the cache
|
||||
func (c *provider) Retrieve(ctx context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
|
||||
func (c *cache) Retrieve(ctx context.Context, cacheKey string, dest _cache.CacheableEntity, allowExpired bool) (_cache.RetrieveStatus, error) {
|
||||
err := c.client.Get(ctx, cacheKey).Scan(dest)
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return cache.RetrieveStatusKeyMiss, nil
|
||||
return _cache.RetrieveStatusKeyMiss, nil
|
||||
}
|
||||
return cache.RetrieveStatusError, err
|
||||
return _cache.RetrieveStatusError, err
|
||||
}
|
||||
return cache.RetrieveStatusHit, nil
|
||||
return _cache.RetrieveStatusHit, nil
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL for the cache entry
|
||||
func (c *provider) SetTTL(ctx context.Context, cacheKey string, ttl time.Duration) {
|
||||
func (c *cache) SetTTL(ctx context.Context, cacheKey string, ttl time.Duration) {
|
||||
err := c.client.Expire(ctx, cacheKey, ttl).Err()
|
||||
if err != nil {
|
||||
zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
|
||||
@@ -66,34 +66,34 @@ func (c *provider) SetTTL(ctx context.Context, cacheKey string, ttl time.Duratio
|
||||
}
|
||||
|
||||
// Remove removes the cache entry
|
||||
func (c *provider) Remove(ctx context.Context, cacheKey string) {
|
||||
func (c *cache) Remove(ctx context.Context, cacheKey string) {
|
||||
c.BulkRemove(ctx, []string{cacheKey})
|
||||
}
|
||||
|
||||
// BulkRemove removes the cache entries
|
||||
func (c *provider) BulkRemove(ctx context.Context, cacheKeys []string) {
|
||||
func (c *cache) BulkRemove(ctx context.Context, cacheKeys []string) {
|
||||
if err := c.client.Del(ctx, cacheKeys...).Err(); err != nil {
|
||||
zap.L().Error("error deleting cache keys", zap.Strings("cacheKeys", cacheKeys), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the redis server
|
||||
func (c *provider) Close(_ context.Context) error {
|
||||
func (c *cache) Close(_ context.Context) error {
|
||||
return c.client.Close()
|
||||
}
|
||||
|
||||
// Ping pings the redis server
|
||||
func (c *provider) Ping(ctx context.Context) error {
|
||||
func (c *cache) Ping(ctx context.Context) error {
|
||||
return c.client.Ping(ctx).Err()
|
||||
}
|
||||
|
||||
// GetClient returns the redis client
|
||||
func (c *provider) GetClient() *redis.Client {
|
||||
func (c *cache) GetClient() *redis.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
// GetTTL returns the TTL for the cache entry
|
||||
func (c *provider) GetTTL(ctx context.Context, cacheKey string) time.Duration {
|
||||
func (c *cache) GetTTL(ctx context.Context, cacheKey string) time.Duration {
|
||||
ttl, err := c.client.TTL(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
|
||||
@@ -102,12 +102,12 @@ func (c *provider) GetTTL(ctx context.Context, cacheKey string) time.Duration {
|
||||
}
|
||||
|
||||
// GetKeys returns the keys matching the pattern
|
||||
func (c *provider) GetKeys(ctx context.Context, pattern string) ([]string, error) {
|
||||
func (c *cache) GetKeys(ctx context.Context, pattern string) ([]string, error) {
|
||||
return c.client.Keys(ctx, pattern).Result()
|
||||
}
|
||||
|
||||
// GetKeysWithTTL returns the keys matching the pattern with their TTL
|
||||
func (c *provider) GetKeysWithTTL(ctx context.Context, pattern string) (map[string]time.Duration, error) {
|
||||
func (c *cache) GetKeysWithTTL(ctx context.Context, pattern string) (map[string]time.Duration, error) {
|
||||
keys, err := c.GetKeys(ctx, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -30,18 +30,6 @@ func TestGetWithStrings(t *testing.T) {
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithNoPrefix(t *testing.T) {
|
||||
t.Setenv("K1_K2", "string")
|
||||
t.Setenv("K3_K4", "string")
|
||||
expected := map[string]any{}
|
||||
|
||||
provider := New(config.ProviderConfig{})
|
||||
actual, err := provider.Get(context.Background(), config.MustNewUri("env:"))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.All())
|
||||
}
|
||||
|
||||
func TestGetWithGoTypes(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_BOOL", "true")
|
||||
t.Setenv("SIGNOZ_STRING", "string")
|
||||
|
||||
@@ -30,7 +30,7 @@ func (factory *providerFactory) New(config ProviderConfig) Provider {
|
||||
// ProviderConfig is the configuration for a provider.
|
||||
type ProviderConfig struct{}
|
||||
|
||||
// Provider is an interface that represents a configuration provider.
|
||||
// Provider is an interface that represents a provider.
|
||||
type Provider interface {
|
||||
// Get returns the configuration for the given URI.
|
||||
Get(context.Context, Uri) (*Conf, error)
|
||||
|
||||
@@ -21,7 +21,7 @@ type configFactory struct {
|
||||
newConfigFunc NewConfigFunc
|
||||
}
|
||||
|
||||
// Name returns the name of the factory.
|
||||
// New creates a new config.
|
||||
func (factory *configFactory) Name() Name {
|
||||
return factory.name
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type c1 struct{}
|
||||
|
||||
func (c1) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNewConfigFactory(t *testing.T) {
|
||||
cf := NewConfigFactory(MustNewName("c1"), func() Config {
|
||||
return c1{}
|
||||
})
|
||||
assert.Equal(t, MustNewName("c1"), cf.Name())
|
||||
assert.IsType(t, c1{}, cf.New())
|
||||
}
|
||||
|
||||
func TestNewConfigFactoryWithPointer(t *testing.T) {
|
||||
cfp := NewConfigFactory(MustNewName("c1"), func() Config {
|
||||
return &c1{}
|
||||
})
|
||||
assert.Equal(t, MustNewName("c1"), cfp.Name())
|
||||
assert.IsType(t, &c1{}, cfp.New())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package servicetest
|
||||
package factorytest
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,20 +0,0 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
assert.Equal(t, Name{name: "c1"}, MustNewName("c1"))
|
||||
}
|
||||
|
||||
func TestNameWithInvalidCharacters(t *testing.T) {
|
||||
_, err := NewName("c1%")
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Panics(t, func() {
|
||||
MustNewName("c1%")
|
||||
})
|
||||
}
|
||||
@@ -12,8 +12,6 @@ type NamedMap[T Named] struct {
|
||||
factoriesInOrder []T
|
||||
}
|
||||
|
||||
// NewNamedMap creates a new NamedMap from a list of factories.
|
||||
// It returns an error if the factories have duplicate names.
|
||||
func NewNamedMap[T Named](factories ...T) (NamedMap[T], error) {
|
||||
fmap := make(map[Name]T)
|
||||
for _, factory := range factories {
|
||||
@@ -27,8 +25,6 @@ func NewNamedMap[T Named](factories ...T) (NamedMap[T], error) {
|
||||
return NamedMap[T]{factories: fmap, factoriesInOrder: factories}, nil
|
||||
}
|
||||
|
||||
// MustNewNamedMap creates a new NamedMap from a list of factories.
|
||||
// It panics if the factories have duplicate names.
|
||||
func MustNewNamedMap[T Named](factories ...T) NamedMap[T] {
|
||||
nm, err := NewNamedMap(factories...)
|
||||
if err != nil {
|
||||
@@ -37,9 +33,7 @@ func MustNewNamedMap[T Named](factories ...T) NamedMap[T] {
|
||||
return nm
|
||||
}
|
||||
|
||||
// Get returns the factory for the given name by string.
|
||||
// It returns an error if the factory is not found or the name is invalid.
|
||||
func (n *NamedMap[T]) Get(namestr string) (t T, err error) {
|
||||
func (n NamedMap[T]) Get(namestr string) (t T, err error) {
|
||||
name, err := NewName(namestr)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -55,20 +49,16 @@ func (n *NamedMap[T]) Get(namestr string) (t T, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Add adds a factory to the NamedMap.
|
||||
// It returns an error if the factory already exists.
|
||||
func (n *NamedMap[T]) Add(factory T) (err error) {
|
||||
func (n NamedMap[T]) Add(factory T) (err error) {
|
||||
name := factory.Name()
|
||||
if _, ok := n.factories[name]; ok {
|
||||
return fmt.Errorf("factory %q already exists", name)
|
||||
}
|
||||
|
||||
n.factories[name] = factory
|
||||
n.factoriesInOrder = append(n.factoriesInOrder, factory)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInOrder returns the factories in the order they were added.
|
||||
func (n *NamedMap[T]) GetInOrder() []T {
|
||||
func (n NamedMap[T]) GetInOrder() []T {
|
||||
return n.factoriesInOrder
|
||||
}
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type f1 struct{}
|
||||
|
||||
func (*f1) Name() Name {
|
||||
return MustNewName("f1")
|
||||
}
|
||||
|
||||
type f2 struct{}
|
||||
|
||||
func (*f2) Name() Name {
|
||||
return MustNewName("f2")
|
||||
}
|
||||
|
||||
func TestNewNamedMap(t *testing.T) {
|
||||
nm, err := NewNamedMap[Named](&f1{}, &f2{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, map[Name]Named{
|
||||
MustNewName("f1"): &f1{},
|
||||
MustNewName("f2"): &f2{},
|
||||
}, nm.factories)
|
||||
assert.Equal(t, []Named{&f1{}, &f2{}}, nm.GetInOrder())
|
||||
}
|
||||
|
||||
func TestNewNamedMapWithDuplicateNames(t *testing.T) {
|
||||
_, err := NewNamedMap[Named](&f1{}, &f1{})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMustNewNamedMap(t *testing.T) {
|
||||
nm := MustNewNamedMap[Named](&f1{}, &f2{})
|
||||
assert.Equal(t, map[Name]Named{
|
||||
MustNewName("f1"): &f1{},
|
||||
MustNewName("f2"): &f2{},
|
||||
}, nm.factories)
|
||||
assert.Equal(t, []Named{&f1{}, &f2{}}, nm.GetInOrder())
|
||||
}
|
||||
|
||||
func TestMustNewNamedMapDuplicateNames(t *testing.T) {
|
||||
assert.Panics(t, func() {
|
||||
MustNewNamedMap[Named](&f1{}, &f1{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestNamedMapGet(t *testing.T) {
|
||||
nm := MustNewNamedMap[Named](&f1{}, &f2{})
|
||||
|
||||
nf1, err := nm.Get("f1")
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, &f1{}, nf1)
|
||||
|
||||
_, err = nm.Get("f3")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNamedMapAdd(t *testing.T) {
|
||||
nm := MustNewNamedMap[Named](&f1{})
|
||||
|
||||
err := nm.Add(&f2{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, map[Name]Named{
|
||||
MustNewName("f1"): &f1{},
|
||||
MustNewName("f2"): &f2{},
|
||||
}, nm.factories)
|
||||
assert.Equal(t, []Named{&f1{}, &f2{}}, nm.GetInOrder())
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
import "context"
|
||||
|
||||
type Provider = any
|
||||
|
||||
@@ -23,17 +21,10 @@ func (factory *providerFactory[P, C]) Name() Name {
|
||||
return factory.name
|
||||
}
|
||||
|
||||
func (factory *providerFactory[P, C]) New(ctx context.Context, settings ProviderSettings, config C) (p P, err error) {
|
||||
provider, err := factory.newProviderFunc(ctx, settings, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p = provider
|
||||
return
|
||||
func (factory *providerFactory[P, C]) New(ctx context.Context, settings ProviderSettings, config C) (P, error) {
|
||||
return factory.newProviderFunc(ctx, settings, config)
|
||||
}
|
||||
|
||||
// NewProviderFactory creates a new provider factory.
|
||||
func NewProviderFactory[P Provider, C Config](name Name, newProviderFunc NewProviderFunc[P, C]) ProviderFactory[P, C] {
|
||||
return &providerFactory[P, C]{
|
||||
name: name,
|
||||
@@ -41,8 +32,7 @@ func NewProviderFactory[P Provider, C Config](name Name, newProviderFunc NewProv
|
||||
}
|
||||
}
|
||||
|
||||
// NewProviderFromNamedMap creates a new provider from a factory based on the input key.
|
||||
func NewProviderFromNamedMap[P Provider, C Config](ctx context.Context, settings ProviderSettings, config C, factories NamedMap[ProviderFactory[P, C]], key string) (p P, err error) {
|
||||
func NewFromFactory[P Provider, C Config](ctx context.Context, settings ProviderSettings, config C, factories NamedMap[ProviderFactory[P, C]], key string) (p P, err error) {
|
||||
providerFactory, err := factories.Get(key)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type p1 struct{}
|
||||
|
||||
type pc1 struct{}
|
||||
|
||||
func (pc1) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNewProviderFactory(t *testing.T) {
|
||||
pf := NewProviderFactory(MustNewName("p1"), func(ctx context.Context, settings ProviderSettings, config pc1) (p1, error) {
|
||||
return p1{}, nil
|
||||
})
|
||||
assert.Equal(t, MustNewName("p1"), pf.Name())
|
||||
p, err := pf.New(context.Background(), ProviderSettings{}, pc1{})
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, p1{}, p)
|
||||
}
|
||||
|
||||
func TestNewProviderFactoryFromFactory(t *testing.T) {
|
||||
pf := NewProviderFactory(MustNewName("p1"), func(ctx context.Context, settings ProviderSettings, config pc1) (p1, error) {
|
||||
return p1{}, nil
|
||||
})
|
||||
|
||||
m := MustNewNamedMap(pf)
|
||||
assert.Equal(t, MustNewName("p1"), pf.Name())
|
||||
p, err := NewProviderFromNamedMap(context.Background(), ProviderSettings{}, pc1{}, m, "p1")
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, p1{}, p)
|
||||
|
||||
_, err = NewProviderFromNamedMap(context.Background(), ProviderSettings{}, pc1{}, m, "p2")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package providertest
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/instrumentation/instrumentationtest"
|
||||
)
|
||||
|
||||
func NewSettings() factory.ProviderSettings {
|
||||
return instrumentationtest.New().ToProviderSettings()
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Analytics struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func NewAnalytics(logger *zap.Logger) *Analytics {
|
||||
if logger == nil {
|
||||
panic("cannot build analytics, logger is empty")
|
||||
}
|
||||
|
||||
return &Analytics{
|
||||
logger: logger.Named(pkgname),
|
||||
}
|
||||
}
|
||||
|
||||
func (middleware *Analytics) Wrap(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := auth.AttachJwtToContext(r.Context(), r)
|
||||
r = r.WithContext(ctx)
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
queryRangeData, metadataExists := extractQueryRangeData(path, r)
|
||||
getActiveLogs(path, r)
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||
if metadataExists {
|
||||
for key, value := range queryRangeData {
|
||||
data[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
||||
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||
// we default to that status code.
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Flush implements the http.Flush interface.
|
||||
func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Support websockets
|
||||
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h, ok := lrw.ResponseWriter.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("hijack not supported")
|
||||
}
|
||||
return h.Hijack()
|
||||
}
|
||||
|
||||
func getActiveLogs(path string, r *http.Request) {
|
||||
// if path == "/api/v1/dashboards/{uuid}" {
|
||||
// telemetry.GetInstance().AddActiveMetricsUser()
|
||||
// }
|
||||
if path == "/api/v1/logs" {
|
||||
hasFilters := len(r.URL.Query().Get("q"))
|
||||
if hasFilters > 0 {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||
|
||||
data := map[string]interface{}{}
|
||||
var postData *v3.QueryRangeParamsV3
|
||||
|
||||
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
|
||||
if r.Body != nil {
|
||||
bodyBytes, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
r.Body.Close() // must close
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
json.Unmarshal(bodyBytes, &postData)
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
referrer := r.Header.Get("Referer")
|
||||
|
||||
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the referrer", zap.Error(err))
|
||||
}
|
||||
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the alert: ", zap.Error(err))
|
||||
}
|
||||
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
|
||||
}
|
||||
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
|
||||
if err != nil {
|
||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||
}
|
||||
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||
|
||||
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||
if queryInfoResult.MetricsUsed {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if queryInfoResult.LogsUsed {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
if queryInfoResult.TracesUsed {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||
data["queryType"] = queryInfoResult.QueryType
|
||||
data["panelType"] = queryInfoResult.PanelType
|
||||
|
||||
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
// switch case to set data["screen"] based on the referrer
|
||||
switch {
|
||||
case dashboardMatched:
|
||||
data["screen"] = "panel"
|
||||
case alertMatched:
|
||||
data["screen"] = "alert"
|
||||
case logsExplorerMatched:
|
||||
data["screen"] = "logs-explorer"
|
||||
case traceExplorerMatched:
|
||||
data["screen"] = "traces-explorer"
|
||||
default:
|
||||
data["screen"] = "unknown"
|
||||
return data, true
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
return data, true
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type LogComment struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func NewLogComment(logger *zap.Logger) *LogComment {
|
||||
if logger == nil {
|
||||
panic("cannot build log enrichment, logger is empty")
|
||||
}
|
||||
|
||||
return &LogComment{
|
||||
logger: logger.Named(pkgname),
|
||||
}
|
||||
}
|
||||
|
||||
func (middleware *LogComment) Wrap(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
referrer := r.Header.Get("Referer")
|
||||
|
||||
var path, dashboardID, alertID, page, client, viewName, tab string
|
||||
|
||||
if referrer != "" {
|
||||
referrerURL, _ := url.Parse(referrer)
|
||||
client = "browser"
|
||||
path = referrerURL.Path
|
||||
|
||||
if strings.Contains(path, "/dashboard") {
|
||||
// Split the path into segments
|
||||
pathSegments := strings.Split(referrerURL.Path, "/")
|
||||
// The dashboard ID should be the segment after "/dashboard/"
|
||||
// Loop through pathSegments to find "dashboard" and then take the next segment as the ID
|
||||
for i, segment := range pathSegments {
|
||||
if segment == "dashboard" && i < len(pathSegments)-1 {
|
||||
// Return the next segment, which should be the dashboard ID
|
||||
dashboardID = pathSegments[i+1]
|
||||
}
|
||||
}
|
||||
page = "dashboards"
|
||||
} else if strings.Contains(path, "/alerts") {
|
||||
urlParams := referrerURL.Query()
|
||||
alertID = urlParams.Get("ruleId")
|
||||
page = "alerts"
|
||||
} else if strings.Contains(path, "logs") && strings.Contains(path, "explorer") {
|
||||
page = "logs-explorer"
|
||||
viewName = referrerURL.Query().Get("viewName")
|
||||
} else if strings.Contains(path, "/trace") || strings.Contains(path, "traces-explorer") {
|
||||
page = "traces-explorer"
|
||||
viewName = referrerURL.Query().Get("viewName")
|
||||
} else if strings.Contains(path, "/services") {
|
||||
page = "services"
|
||||
tab = referrerURL.Query().Get("tab")
|
||||
if tab == "" {
|
||||
tab = "OVER_METRICS"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
client = "api"
|
||||
}
|
||||
|
||||
email, _ := auth.GetEmailFromJwt(r.Context())
|
||||
|
||||
kvs := map[string]string{
|
||||
"path": path,
|
||||
"dashboardID": dashboardID,
|
||||
"alertID": alertID,
|
||||
"source": page,
|
||||
"client": client,
|
||||
"viewName": viewName,
|
||||
"servicesTab": tab,
|
||||
"email": email,
|
||||
}
|
||||
|
||||
r = r.WithContext(context.WithValue(r.Context(), common.LogCommentKey, kvs))
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -7,3 +7,14 @@ type Config struct {
|
||||
// See net.Dial for details of the address format.
|
||||
Address string `mapstructure:"address"`
|
||||
}
|
||||
|
||||
func (c *Config) NewWithDefaults() *Config {
|
||||
return &Config{
|
||||
Address: "0.0.0.0:8080",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@ type Server struct {
|
||||
logger *zap.Logger
|
||||
handler http.Handler
|
||||
cfg Config
|
||||
name factory.Name
|
||||
}
|
||||
|
||||
func New(logger *zap.Logger, cfg Config, handler http.Handler) (*Server, error) {
|
||||
func New(logger *zap.Logger, name factory.Name, cfg Config, handler http.Handler) (*Server, error) {
|
||||
if handler == nil {
|
||||
return nil, fmt.Errorf("cannot build http server, handler is required")
|
||||
}
|
||||
@@ -41,9 +42,14 @@ func New(logger *zap.Logger, cfg Config, handler http.Handler) (*Server, error)
|
||||
logger: logger.Named("go.signoz.io/pkg/http/server"),
|
||||
handler: handler,
|
||||
cfg: cfg,
|
||||
name: name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (server *Server) Name() factory.Name {
|
||||
return server.name
|
||||
}
|
||||
|
||||
func (server *Server) Start(ctx context.Context) error {
|
||||
server.logger.Info("starting http server", zap.String("address", server.srv.Addr))
|
||||
if err := server.srv.ListenAndServe(); err != nil {
|
||||
|
||||
@@ -1,34 +1,151 @@
|
||||
package instrumentation
|
||||
|
||||
import (
|
||||
"os"
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/contrib/bridges/otelzap"
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
sdkmetric "go.opentelemetry.io/otel/metric"
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
sdktrace "go.opentelemetry.io/otel/trace"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// Instrumentation provides the core components for application instrumentation.
|
||||
var _ factory.Service = (*SDK)(nil)
|
||||
var _ Instrumentation = (*SDK)(nil)
|
||||
|
||||
type Instrumentation interface {
|
||||
// LoggerProvider returns the OpenTelemetry logger provider.
|
||||
LoggerProvider() sdklog.LoggerProvider
|
||||
// Logger returns the Zap logger.
|
||||
Logger() *zap.Logger
|
||||
// MeterProvider returns the OpenTelemetry meter provider.
|
||||
MeterProvider() sdkmetric.MeterProvider
|
||||
// TracerProvider returns the OpenTelemetry tracer provider.
|
||||
TracerProvider() sdktrace.TracerProvider
|
||||
// ToProviderSettings converts instrumentation to provider settings.
|
||||
ToProviderSettings() factory.ProviderSettings
|
||||
}
|
||||
|
||||
// Merges the input attributes with the resource attributes.
|
||||
func mergeAttributes(input map[string]any, resource *sdkresource.Resource) map[string]any {
|
||||
// SDK holds the core components for application instrumentation.
|
||||
type SDK struct {
|
||||
sdk contribsdkconfig.SDK
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// New creates a new Instrumentation instance with configured providers.
|
||||
// It sets up logging, tracing, and metrics based on the provided configuration.
|
||||
func New(ctx context.Context, build version.Build, cfg Config) (*SDK, error) {
|
||||
// Set default resource attributes if not provided
|
||||
if cfg.Resource.Attributes == nil {
|
||||
cfg.Resource.Attributes = map[string]any{
|
||||
string(semconv.ServiceNameKey): build.Name,
|
||||
string(semconv.ServiceVersionKey): build.Version,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new resource with default detectors.
|
||||
// The upstream contrib repository is not taking detectors into account.
|
||||
// We are, therefore, using some sensible defaults here.
|
||||
resource, err := sdkresource.New(
|
||||
ctx,
|
||||
sdkresource.WithContainer(),
|
||||
sdkresource.WithFromEnv(),
|
||||
sdkresource.WithHost(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare the resource configuration by merging
|
||||
// resource and attributes.
|
||||
sch := semconv.SchemaURL
|
||||
configResource := contribsdkconfig.Resource{
|
||||
Attributes: attributes(cfg.Resource.Attributes, resource),
|
||||
Detectors: nil,
|
||||
SchemaUrl: &sch,
|
||||
}
|
||||
|
||||
var loggerProvider *contribsdkconfig.LoggerProvider
|
||||
if cfg.Logs.Enabled {
|
||||
loggerProvider = &contribsdkconfig.LoggerProvider{
|
||||
Processors: []contribsdkconfig.LogRecordProcessor{
|
||||
{Batch: &cfg.Logs.Processors.Batch},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var tracerProvider *contribsdkconfig.TracerProvider
|
||||
if cfg.Traces.Enabled {
|
||||
tracerProvider = &contribsdkconfig.TracerProvider{
|
||||
Processors: []contribsdkconfig.SpanProcessor{
|
||||
{Batch: &cfg.Traces.Processors.Batch},
|
||||
},
|
||||
Sampler: &cfg.Traces.Sampler,
|
||||
}
|
||||
}
|
||||
|
||||
var meterProvider *contribsdkconfig.MeterProvider
|
||||
if cfg.Metrics.Enabled {
|
||||
meterProvider = &contribsdkconfig.MeterProvider{
|
||||
Readers: []contribsdkconfig.MetricReader{
|
||||
{Pull: &cfg.Metrics.Readers.Pull},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
sdk, err := contribsdkconfig.NewSDK(
|
||||
contribsdkconfig.WithContext(ctx),
|
||||
contribsdkconfig.WithOpenTelemetryConfiguration(contribsdkconfig.OpenTelemetryConfiguration{
|
||||
LoggerProvider: loggerProvider,
|
||||
TracerProvider: tracerProvider,
|
||||
MeterProvider: meterProvider,
|
||||
Resource: &configResource,
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SDK{
|
||||
sdk: sdk,
|
||||
logger: newLogger(cfg, sdk.LoggerProvider()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *SDK) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *SDK) Stop(ctx context.Context) error {
|
||||
return i.sdk.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (i *SDK) LoggerProvider() sdklog.LoggerProvider {
|
||||
return i.sdk.LoggerProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) Logger() *zap.Logger {
|
||||
return i.logger
|
||||
}
|
||||
|
||||
func (i *SDK) MeterProvider() sdkmetric.MeterProvider {
|
||||
return i.sdk.MeterProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) TracerProvider() sdktrace.TracerProvider {
|
||||
return i.sdk.TracerProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) ToProviderSettings() factory.ProviderSettings {
|
||||
return factory.ProviderSettings{
|
||||
LoggerProvider: i.LoggerProvider(),
|
||||
ZapLogger: i.Logger(),
|
||||
MeterProvider: i.MeterProvider(),
|
||||
TracerProvider: i.TracerProvider(),
|
||||
}
|
||||
}
|
||||
|
||||
// attributes merges the input attributes with the resource attributes.
|
||||
func attributes(input map[string]any, resource *sdkresource.Resource) map[string]any {
|
||||
output := make(map[string]any)
|
||||
|
||||
for k, v := range input {
|
||||
@@ -42,14 +159,3 @@ func mergeAttributes(input map[string]any, resource *sdkresource.Resource) map[s
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// newLogger creates a new Zap logger with the configured level and output.
|
||||
// It combines a JSON encoder for stdout and an OpenTelemetry bridge.
|
||||
func newLogger(cfg Config, provider sdklog.LoggerProvider) *zap.Logger {
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), zapcore.AddSync(os.Stdout), cfg.Logs.Level),
|
||||
otelzap.NewCore("go.signoz.io/pkg/instrumentation", otelzap.WithLoggerProvider(provider)),
|
||||
)
|
||||
|
||||
return zap.New(core, zap.AddCaller(), zap.AddStacktrace(zap.ErrorLevel))
|
||||
}
|
||||
|
||||
21
pkg/instrumentation/logger.go
Normal file
21
pkg/instrumentation/logger.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package instrumentation
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"go.opentelemetry.io/contrib/bridges/otelzap"
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// newLogger creates a new Zap logger with the configured level and output.
|
||||
// It combines a JSON encoder for stdout and an OpenTelemetry bridge.
|
||||
func newLogger(cfg Config, provider sdklog.LoggerProvider) *zap.Logger {
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), zapcore.AddSync(os.Stdout), cfg.Logs.Level),
|
||||
otelzap.NewCore("go.signoz.io/pkg/instrumentation", otelzap.WithLoggerProvider(provider)),
|
||||
)
|
||||
|
||||
return zap.New(core, zap.AddCaller(), zap.AddStacktrace(zap.ErrorLevel))
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
package instrumentation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
contribsdkconfig "go.opentelemetry.io/contrib/config"
|
||||
sdklog "go.opentelemetry.io/otel/log"
|
||||
sdkmetric "go.opentelemetry.io/otel/metric"
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
sdktrace "go.opentelemetry.io/otel/trace"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var _ factory.Service = (*SDK)(nil)
|
||||
var _ Instrumentation = (*SDK)(nil)
|
||||
|
||||
// SDK holds the core components for application instrumentation.
|
||||
type SDK struct {
|
||||
sdk contribsdkconfig.SDK
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// New creates a new Instrumentation instance with configured providers.
|
||||
// It sets up logging, tracing, and metrics based on the provided configuration.
|
||||
func New(ctx context.Context, build version.Build, cfg Config) (*SDK, error) {
|
||||
// Set default resource attributes if not provided
|
||||
if cfg.Resource.Attributes == nil {
|
||||
cfg.Resource.Attributes = map[string]any{
|
||||
string(semconv.ServiceNameKey): build.Name,
|
||||
string(semconv.ServiceVersionKey): build.Version,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new resource with default detectors.
|
||||
// The upstream contrib repository is not taking detectors into account.
|
||||
// We are, therefore, using some sensible defaults here.
|
||||
resource, err := sdkresource.New(
|
||||
ctx,
|
||||
sdkresource.WithContainer(),
|
||||
sdkresource.WithFromEnv(),
|
||||
sdkresource.WithHost(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare the resource configuration by merging
|
||||
// resource and attributes.
|
||||
sch := semconv.SchemaURL
|
||||
configResource := contribsdkconfig.Resource{
|
||||
Attributes: mergeAttributes(cfg.Resource.Attributes, resource),
|
||||
Detectors: nil,
|
||||
SchemaUrl: &sch,
|
||||
}
|
||||
|
||||
var loggerProvider *contribsdkconfig.LoggerProvider
|
||||
if cfg.Logs.Enabled {
|
||||
loggerProvider = &contribsdkconfig.LoggerProvider{
|
||||
Processors: []contribsdkconfig.LogRecordProcessor{
|
||||
{Batch: &cfg.Logs.Processors.Batch},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var tracerProvider *contribsdkconfig.TracerProvider
|
||||
if cfg.Traces.Enabled {
|
||||
tracerProvider = &contribsdkconfig.TracerProvider{
|
||||
Processors: []contribsdkconfig.SpanProcessor{
|
||||
{Batch: &cfg.Traces.Processors.Batch},
|
||||
},
|
||||
Sampler: &cfg.Traces.Sampler,
|
||||
}
|
||||
}
|
||||
|
||||
var meterProvider *contribsdkconfig.MeterProvider
|
||||
if cfg.Metrics.Enabled {
|
||||
meterProvider = &contribsdkconfig.MeterProvider{
|
||||
Readers: []contribsdkconfig.MetricReader{
|
||||
{Pull: &cfg.Metrics.Readers.Pull},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
sdk, err := contribsdkconfig.NewSDK(
|
||||
contribsdkconfig.WithContext(ctx),
|
||||
contribsdkconfig.WithOpenTelemetryConfiguration(contribsdkconfig.OpenTelemetryConfiguration{
|
||||
LoggerProvider: loggerProvider,
|
||||
TracerProvider: tracerProvider,
|
||||
MeterProvider: meterProvider,
|
||||
Resource: &configResource,
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SDK{
|
||||
sdk: sdk,
|
||||
logger: newLogger(cfg, sdk.LoggerProvider()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *SDK) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *SDK) Stop(ctx context.Context) error {
|
||||
return i.sdk.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (i *SDK) LoggerProvider() sdklog.LoggerProvider {
|
||||
return i.sdk.LoggerProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) Logger() *zap.Logger {
|
||||
return i.logger
|
||||
}
|
||||
|
||||
func (i *SDK) MeterProvider() sdkmetric.MeterProvider {
|
||||
return i.sdk.MeterProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) TracerProvider() sdktrace.TracerProvider {
|
||||
return i.sdk.TracerProvider()
|
||||
}
|
||||
|
||||
func (i *SDK) ToProviderSettings() factory.ProviderSettings {
|
||||
return factory.ProviderSettings{
|
||||
LoggerProvider: i.LoggerProvider(),
|
||||
ZapLogger: i.Logger(),
|
||||
MeterProvider: i.MeterProvider(),
|
||||
TracerProvider: i.TracerProvider(),
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf/sqlite"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
@@ -19,15 +18,6 @@ type Repo struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func (r *Repo) initDB(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) GetConfigHistory(
|
||||
ctx context.Context, typ ElementTypeDef, limit int,
|
||||
) ([]ConfigVersion, *model.ApiError) {
|
||||
|
||||
@@ -39,8 +39,7 @@ type Manager struct {
|
||||
}
|
||||
|
||||
type ManagerOptions struct {
|
||||
DB *sqlx.DB
|
||||
DBEngine string
|
||||
DB *sqlx.DB
|
||||
|
||||
// When acting as opamp.AgentConfigProvider, agent conf recommendations are
|
||||
// applied to the base conf in the order the features have been specified here.
|
||||
@@ -66,10 +65,6 @@ func Initiate(options *ManagerOptions) (*Manager, error) {
|
||||
configSubscribers: map[string]func(){},
|
||||
}
|
||||
|
||||
err := m.initDB(options.DBEngine)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not init agentConf db")
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS agent_config_versions(
|
||||
id TEXT PRIMARY KEY,
|
||||
created_by TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_by TEXT,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
version INTEGER DEFAULT 1,
|
||||
active int,
|
||||
is_valid int,
|
||||
disabled int,
|
||||
element_type VARCHAR(120) NOT NULL,
|
||||
deploy_status VARCHAR(80) NOT NULL DEFAULT 'DIRTY',
|
||||
deploy_sequence INTEGER,
|
||||
deploy_result TEXT,
|
||||
last_hash TEXT,
|
||||
last_config TEXT,
|
||||
UNIQUE(element_type, version)
|
||||
);
|
||||
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS agent_config_versions_u1
|
||||
ON agent_config_versions(element_type, version);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS agent_config_versions_nu1
|
||||
ON agent_config_versions(last_hash);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agent_config_elements(
|
||||
id TEXT PRIMARY KEY,
|
||||
created_by TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_by TEXT,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
element_id TEXT NOT NULL,
|
||||
element_type VARCHAR(120) NOT NULL,
|
||||
version_id TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS agent_config_elements_u1
|
||||
ON agent_config_elements(version_id, element_id, element_type);
|
||||
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error in creating agent config tables")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
koanfJson "github.com/knadh/koanf/parsers/json"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func listCloudProviderServices(
|
||||
cloudProvider string,
|
||||
) ([]CloudServiceDetails, *model.ApiError) {
|
||||
cloudServices := availableServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
))
|
||||
}
|
||||
|
||||
services := maps.Values(cloudServices)
|
||||
sort.Slice(services, func(i, j int) bool {
|
||||
return services[i].Id < services[j].Id
|
||||
})
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getCloudProviderService(
|
||||
cloudProvider string, serviceId string,
|
||||
) (*CloudServiceDetails, *model.ApiError) {
|
||||
cloudServices := availableServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
))
|
||||
}
|
||||
|
||||
svc, exists := cloudServices[serviceId]
|
||||
if !exists {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"%s service not found: %s", cloudProvider, serviceId,
|
||||
))
|
||||
}
|
||||
|
||||
return &svc, nil
|
||||
}
|
||||
|
||||
// End of API. Logic for reading service definition files follows
|
||||
|
||||
// Service details read from ./serviceDefinitions
|
||||
// { "providerName": { "service_id": {...}} }
|
||||
var availableServices map[string]map[string]CloudServiceDetails
|
||||
|
||||
func init() {
|
||||
err := readAllServiceDefinitions()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf(
|
||||
"couldn't read cloud service definitions: %w", err,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed serviceDefinitions/*
|
||||
var serviceDefinitionFiles embed.FS
|
||||
|
||||
func readAllServiceDefinitions() error {
|
||||
availableServices = map[string]map[string]CloudServiceDetails{}
|
||||
|
||||
rootDirName := "serviceDefinitions"
|
||||
|
||||
cloudProviderDirs, err := fs.ReadDir(serviceDefinitionFiles, rootDirName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
|
||||
}
|
||||
|
||||
for _, d := range cloudProviderDirs {
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
cloudProviderDirPath := path.Join(rootDirName, d.Name())
|
||||
cloudServices, err := readServiceDefinitionsFromDir(cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't read %s service definitions", d.Name())
|
||||
}
|
||||
|
||||
if len(cloudServices) < 1 {
|
||||
return fmt.Errorf("no %s services could be read", d.Name())
|
||||
}
|
||||
|
||||
availableServices[d.Name()] = cloudServices
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readServiceDefinitionsFromDir(cloudProviderDirPath string) (
|
||||
map[string]CloudServiceDetails, error,
|
||||
) {
|
||||
svcDefDirs, err := fs.ReadDir(serviceDefinitionFiles, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
|
||||
}
|
||||
|
||||
svcDefs := map[string]CloudServiceDetails{}
|
||||
|
||||
for _, d := range svcDefDirs {
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
svcDirPath := path.Join(cloudProviderDirPath, d.Name())
|
||||
s, err := readServiceDefinition(svcDirPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read svc definition for %s: %w", d.Name(), err)
|
||||
}
|
||||
|
||||
_, exists := svcDefs[s.Id]
|
||||
if exists {
|
||||
return nil, fmt.Errorf(
|
||||
"duplicate service definition for id %s at %s", s.Id, d.Name(),
|
||||
)
|
||||
}
|
||||
svcDefs[s.Id] = *s
|
||||
}
|
||||
|
||||
return svcDefs, nil
|
||||
}
|
||||
|
||||
func readServiceDefinition(dirpath string) (*CloudServiceDetails, error) {
|
||||
integrationJsonPath := path.Join(dirpath, "integration.json")
|
||||
|
||||
serializedSpec, err := serviceDefinitionFiles.ReadFile(integrationJsonPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't find integration.json in %s: %w",
|
||||
dirpath, err,
|
||||
)
|
||||
}
|
||||
|
||||
integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse integration.json from %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
hydrated, err := integrations.HydrateFileUris(
|
||||
integrationSpec, serviceDefinitionFiles, dirpath,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't hydrate files referenced in service definition %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
hydratedSpec := hydrated.(map[string]interface{})
|
||||
hydratedSpecJson, err := koanfJson.Parser().Marshal(hydratedSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize hydrated integration spec back to JSON %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
var serviceDef CloudServiceDetails
|
||||
decoder := json.NewDecoder(bytes.NewReader(hydratedSpecJson))
|
||||
decoder.DisallowUnknownFields()
|
||||
err = decoder.Decode(&serviceDef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse hydrated JSON spec read from %s: %w",
|
||||
integrationJsonPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
err = validateServiceDefinition(serviceDef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
|
||||
}
|
||||
|
||||
return &serviceDef, nil
|
||||
|
||||
}
|
||||
|
||||
func validateServiceDefinition(s CloudServiceDetails) error {
|
||||
// Validate dashboard data
|
||||
seenDashboardIds := map[string]interface{}{}
|
||||
for _, dd := range s.Assets.Dashboards {
|
||||
did, exists := dd["id"]
|
||||
if !exists {
|
||||
return fmt.Errorf("id is required. not specified in dashboard titled %v", dd["title"])
|
||||
}
|
||||
dashboardId, ok := did.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("id must be string in dashboard titled %v", dd["title"])
|
||||
}
|
||||
if _, seen := seenDashboardIds[dashboardId]; seen {
|
||||
return fmt.Errorf("multiple dashboards found with id %s", dashboardId)
|
||||
}
|
||||
seenDashboardIds[dashboardId] = nil
|
||||
}
|
||||
|
||||
// potentially more to follow
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestAvailableServices(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// should be able to list available services.
|
||||
_, apiErr := listCloudProviderServices("bad-cloud-provider")
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
awsSvcs, apiErr := listCloudProviderServices("aws")
|
||||
require.Nil(apiErr)
|
||||
require.Greater(len(awsSvcs), 0)
|
||||
|
||||
// should be able to get details of a service
|
||||
_, apiErr = getCloudProviderService(
|
||||
"aws", "bad-service-id",
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
svc, apiErr := getCloudProviderService(
|
||||
"aws", awsSvcs[0].Id,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(*svc, awsSvcs[0])
|
||||
}
|
||||
@@ -22,26 +22,19 @@ func validateCloudProviderName(name string) *model.ApiError {
|
||||
}
|
||||
|
||||
type Controller struct {
|
||||
accountsRepo cloudProviderAccountsRepository
|
||||
serviceConfigRepo serviceConfigRepository
|
||||
repo cloudProviderAccountsRepository
|
||||
}
|
||||
|
||||
func NewController(db *sqlx.DB) (
|
||||
*Controller, error,
|
||||
) {
|
||||
accountsRepo, err := newCloudProviderAccountsRepository(db)
|
||||
repo, err := newCloudProviderAccountsRepository(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
|
||||
}
|
||||
|
||||
serviceConfigRepo, err := newServiceConfigRepository(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
|
||||
}
|
||||
|
||||
return &Controller{
|
||||
accountsRepo: accountsRepo,
|
||||
serviceConfigRepo: serviceConfigRepo,
|
||||
repo: repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -65,7 +58,7 @@ func (c *Controller) ListConnectedAccounts(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, cloudProvider)
|
||||
accountRecords, apiErr := c.repo.listConnected(ctx, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
||||
}
|
||||
@@ -107,7 +100,7 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
return nil, model.BadRequest(fmt.Errorf("unsupported cloud provider: %s", cloudProvider))
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
account, apiErr := c.repo.upsert(
|
||||
ctx, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -127,9 +120,8 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
}
|
||||
|
||||
type AccountStatusResponse struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status AccountStatus `json:"status"`
|
||||
Id string `json:"id"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Controller) GetAccountStatus(
|
||||
@@ -141,15 +133,14 @@ func (c *Controller) GetAccountStatus(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, cloudProvider, accountId)
|
||||
account, apiErr := c.repo.get(ctx, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
resp := AccountStatusResponse{
|
||||
Id: account.Id,
|
||||
CloudAccountId: account.CloudAccountId,
|
||||
Status: account.status(),
|
||||
Id: account.Id,
|
||||
Status: account.status(),
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
@@ -173,7 +164,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
existingAccount, apiErr := c.accountsRepo.get(ctx, cloudProvider, req.AccountId)
|
||||
existingAccount, apiErr := c.repo.get(ctx, cloudProvider, req.AccountId)
|
||||
if existingAccount != nil && existingAccount.CloudAccountId != nil && *existingAccount.CloudAccountId != req.CloudAccountId {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||
@@ -181,7 +172,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
))
|
||||
}
|
||||
|
||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, cloudProvider, req.CloudAccountId)
|
||||
existingAccount, apiErr = c.repo.getConnectedCloudAccount(ctx, cloudProvider, req.CloudAccountId)
|
||||
if existingAccount != nil && existingAccount.Id != req.AccountId {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||
@@ -194,7 +185,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
Data: req.Data,
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
account, apiErr := c.repo.upsert(
|
||||
ctx, cloudProvider, &req.AccountId, nil, &req.CloudAccountId, &agentReport, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -220,7 +211,7 @@ func (c *Controller) UpdateAccountConfig(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecord, apiErr := c.accountsRepo.upsert(
|
||||
accountRecord, apiErr := c.repo.upsert(
|
||||
ctx, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -239,13 +230,13 @@ func (c *Controller) DisconnectAccount(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, cloudProvider, accountId)
|
||||
account, apiErr := c.repo.get(ctx, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
tsNow := time.Now()
|
||||
account, apiErr = c.accountsRepo.upsert(
|
||||
account, apiErr = c.repo.upsert(
|
||||
ctx, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -254,127 +245,3 @@ func (c *Controller) DisconnectAccount(
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
type ListServicesResponse struct {
|
||||
Services []CloudServiceSummary `json:"services"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListServices(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId *string,
|
||||
) (*ListServicesResponse, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
services, apiErr := listCloudProviderServices(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*CloudServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "couldn't get service configs for cloud account",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
summaries := []CloudServiceSummary{}
|
||||
for _, s := range services {
|
||||
summary := s.CloudServiceSummary
|
||||
summary.Config = svcConfigs[summary.Id]
|
||||
|
||||
summaries = append(summaries, summary)
|
||||
}
|
||||
|
||||
return &ListServicesResponse{
|
||||
Services: summaries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Controller) GetServiceDetails(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
cloudAccountId *string,
|
||||
) (*CloudServiceDetails, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
service, apiErr := getCloudProviderService(cloudProvider, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
config, apiErr := c.serviceConfigRepo.get(
|
||||
ctx, cloudProvider, *cloudAccountId, serviceId,
|
||||
)
|
||||
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
|
||||
}
|
||||
|
||||
if config != nil {
|
||||
service.Config = config
|
||||
}
|
||||
}
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
req UpdateServiceConfigRequest,
|
||||
) (*UpdateServiceConfigResponse, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
// can only update config for a connected cloud account id
|
||||
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, cloudProvider, req.CloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
|
||||
}
|
||||
|
||||
// can only update config for a valid service.
|
||||
_, apiErr = getCloudProviderService(cloudProvider, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "unsupported service")
|
||||
}
|
||||
|
||||
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
|
||||
ctx, cloudProvider, req.CloudAccountId, serviceId, req.Config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't update service config")
|
||||
}
|
||||
|
||||
return &UpdateServiceConfigResponse{
|
||||
Id: serviceId,
|
||||
Config: *updatedConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB, _ := utils.NewTestSqliteDB(t)
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require.NotEmpty(resp1.AccountId)
|
||||
|
||||
testAccountId := resp1.AccountId
|
||||
account, apiErr := controller.accountsRepo.get(
|
||||
account, apiErr := controller.repo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -47,7 +47,7 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountId, resp2.AccountId)
|
||||
|
||||
account, apiErr = controller.accountsRepo.get(
|
||||
account, apiErr = controller.repo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -56,7 +56,7 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
|
||||
func TestAgentCheckIns(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB, _ := utils.NewTestSqliteDB(t)
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// if another connected AccountRecord exists for same cloud account
|
||||
// i.e. there can't be 2 connected account records for the same cloud account id
|
||||
// at any point in time.
|
||||
existingConnected, apiErr := controller.accountsRepo.getConnectedCloudAccount(
|
||||
existingConnected, apiErr := controller.repo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -112,7 +112,7 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
context.TODO(), "aws", testAccountId1,
|
||||
)
|
||||
|
||||
existingConnected, apiErr = controller.accountsRepo.getConnectedCloudAccount(
|
||||
existingConnected, apiErr = controller.repo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(existingConnected)
|
||||
@@ -139,7 +139,7 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
|
||||
func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB, _ := utils.NewTestSqliteDB(t)
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
@@ -151,120 +151,3 @@ func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
require.Nil(account)
|
||||
}
|
||||
|
||||
func TestConfigureService(t *testing.T) {
|
||||
require := require.New(t)
|
||||
testDB, _ := utils.NewTestSqliteDB(t)
|
||||
controller, err := NewController(testDB)
|
||||
require.NoError(err)
|
||||
|
||||
testCloudAccountId := "546311234"
|
||||
|
||||
// should start out without any service config
|
||||
svcListResp, apiErr := controller.ListServices(
|
||||
context.TODO(), "aws", &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
testSvcId := svcListResp.Services[0].Id
|
||||
require.Nil(svcListResp.Services[0].Config)
|
||||
|
||||
svcDetails, apiErr := controller.GetServiceDetails(
|
||||
context.TODO(), "aws", testSvcId, &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, svcDetails.Id)
|
||||
require.Nil(svcDetails.Config)
|
||||
|
||||
// should be able to configure a service for a connected account
|
||||
testConnectedAccount := makeTestConnectedAccount(t, controller, testCloudAccountId)
|
||||
require.Nil(testConnectedAccount.RemovedAt)
|
||||
require.NotNil(testConnectedAccount.CloudAccountId)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.CloudAccountId)
|
||||
|
||||
testSvcConfig := CloudServiceConfig{
|
||||
Metrics: &CloudServiceMetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
updateSvcConfigResp, apiErr := controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId, UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, updateSvcConfigResp.Id)
|
||||
require.Equal(testSvcConfig, updateSvcConfigResp.Config)
|
||||
|
||||
svcDetails, apiErr = controller.GetServiceDetails(
|
||||
context.TODO(), "aws", testSvcId, &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, svcDetails.Id)
|
||||
require.Equal(testSvcConfig, *svcDetails.Config)
|
||||
|
||||
svcListResp, apiErr = controller.ListServices(
|
||||
context.TODO(), "aws", &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
for _, svc := range svcListResp.Services {
|
||||
if svc.Id == testSvcId {
|
||||
require.Equal(testSvcConfig, *svc.Config)
|
||||
}
|
||||
}
|
||||
|
||||
// should not be able to configure service after cloud account has been disconnected
|
||||
_, apiErr = controller.DisconnectAccount(
|
||||
context.TODO(), "aws", testConnectedAccount.Id,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId,
|
||||
UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
|
||||
// should not be able to configure a service for a cloud account id that is not connected yet
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId,
|
||||
UpdateServiceConfigRequest{
|
||||
CloudAccountId: "9999999999",
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
|
||||
// should not be able to set config for an unsupported service
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", "bad-service", UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
|
||||
}
|
||||
|
||||
func makeTestConnectedAccount(t *testing.T, controller *Controller, cloudAccountId string) *AccountRecord {
|
||||
require := require.New(t)
|
||||
|
||||
// a check in from SigNoz agent creates or updates a connected account.
|
||||
testAccountId := uuid.NewString()
|
||||
resp, apiErr := controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: cloudAccountId,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountId, resp.Account.Id)
|
||||
require.Equal(cloudAccountId, *resp.Account.CloudAccountId)
|
||||
|
||||
return &resp.Account
|
||||
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
)
|
||||
|
||||
// Represents a cloud provider account for cloud integrations
|
||||
@@ -117,102 +115,3 @@ func (a *AccountRecord) account() Account {
|
||||
|
||||
return ca
|
||||
}
|
||||
|
||||
type CloudServiceSummary struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceDetails struct {
|
||||
CloudServiceSummary
|
||||
|
||||
Overview string `json:"overview"` // markdown
|
||||
|
||||
Assets CloudServiceAssets `json:"assets"`
|
||||
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
|
||||
DataCollected DataCollectedForService `json:"data_collected"`
|
||||
|
||||
ConnectionStatus *CloudServiceConnectionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud service config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceAssets struct {
|
||||
Dashboards []dashboards.Data `json:"dashboards"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollectedForService struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CloudServiceConnectionStatus struct {
|
||||
Logs *SignalConnectionStatus `json:"logs"`
|
||||
Metrics *SignalConnectionStatus `json:"metrics"`
|
||||
}
|
||||
|
||||
type SignalConnectionStatus struct {
|
||||
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
|
||||
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
||||
}
|
||||
|
||||
@@ -37,42 +37,11 @@ type cloudProviderAccountsRepository interface {
|
||||
func newCloudProviderAccountsRepository(db *sqlx.DB) (
|
||||
*cloudProviderAccountsSQLRepository, error,
|
||||
) {
|
||||
if err := initAccountsSqliteDBIfNeeded(db); err != nil {
|
||||
return nil, fmt.Errorf("could not init sqlite DB for cloudintegrations accounts: %w", err)
|
||||
}
|
||||
|
||||
return &cloudProviderAccountsSQLRepository{
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func initAccountsSqliteDBIfNeeded(db *sqlx.DB) error {
|
||||
if db == nil {
|
||||
return fmt.Errorf("db is required")
|
||||
}
|
||||
|
||||
createTablesStatements := `
|
||||
CREATE TABLE IF NOT EXISTS cloud_integrations_accounts(
|
||||
cloud_provider TEXT NOT NULL,
|
||||
id TEXT NOT NULL,
|
||||
config_json TEXT,
|
||||
cloud_account_id TEXT,
|
||||
last_agent_report_json TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
removed_at TIMESTAMP,
|
||||
UNIQUE(cloud_provider, id)
|
||||
)
|
||||
`
|
||||
_, err := db.Exec(createTablesStatements)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"could not ensure cloud provider accounts schema in sqlite DB: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type cloudProviderAccountsSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type serviceConfigRepository interface {
|
||||
get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
configsBySvcId map[string]*CloudServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
|
||||
func newServiceConfigRepository(db *sqlx.DB) (
|
||||
*serviceConfigSQLRepository, error,
|
||||
) {
|
||||
|
||||
if err := initServiceConfigSqliteDBIfNeeded(db); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not init sqlite DB for cloudintegrations service configs: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return &serviceConfigSQLRepository{
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func initServiceConfigSqliteDBIfNeeded(db *sqlx.DB) error {
|
||||
|
||||
if db == nil {
|
||||
return fmt.Errorf("db is required")
|
||||
}
|
||||
|
||||
createTableStatement := `
|
||||
CREATE TABLE IF NOT EXISTS cloud_integrations_service_configs(
|
||||
cloud_provider TEXT NOT NULL,
|
||||
cloud_account_id TEXT NOT NULL,
|
||||
service_id TEXT NOT NULL,
|
||||
config_json TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
UNIQUE(cloud_provider, cloud_account_id, service_id)
|
||||
)
|
||||
`
|
||||
_, err := db.Exec(createTableStatement)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"could not ensure cloud provider service configs schema in sqlite DB: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type serviceConfigSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
var result CloudServiceConfig
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
config_json
|
||||
from cloud_integrations_service_configs
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
and service_id=$3
|
||||
`,
|
||||
cloudProvider, cloudAccountId, serviceId,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find %s %s config for %s",
|
||||
cloudProvider, serviceId, cloudAccountId,
|
||||
))
|
||||
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
query := `
|
||||
INSERT INTO cloud_integrations_service_configs (
|
||||
cloud_provider,
|
||||
cloud_account_id,
|
||||
service_id,
|
||||
config_json
|
||||
) values ($1, $2, $3, $4)
|
||||
on conflict(cloud_provider, cloud_account_id, service_id)
|
||||
do update set config_json=excluded.config_json
|
||||
`
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, query,
|
||||
cloudProvider, cloudAccountId, serviceId, &config,
|
||||
)
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud service config: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedConfig, apiErr := r.get(ctx, cloudProvider, cloudAccountId, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted service config: %w", apiErr.ToError(),
|
||||
))
|
||||
}
|
||||
|
||||
return upsertedConfig, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
) (map[string]*CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
type ScannedServiceConfigRecord struct {
|
||||
ServiceId string `db:"service_id"`
|
||||
Config CloudServiceConfig `db:"config_json"`
|
||||
}
|
||||
|
||||
records := []ScannedServiceConfigRecord{}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &records, `
|
||||
select
|
||||
service_id,
|
||||
config_json
|
||||
from cloud_integrations_service_configs
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
`,
|
||||
cloudProvider, cloudAccountId,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query service configs from db: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]*CloudServiceConfig{}
|
||||
|
||||
for _, r := range records {
|
||||
result[r.ServiceId] = &r.Config
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none">
|
||||
<path fill="#9D5025" d="M1.702 2.98L1 3.312v9.376l.702.332 2.842-4.777L1.702 2.98z" />
|
||||
<path fill="#F58536" d="M3.339 12.657l-1.637.363V2.98l1.637.353v9.324z" />
|
||||
<path fill="#9D5025" d="M2.476 2.612l.863-.406 4.096 6.216-4.096 5.372-.863-.406V2.612z" />
|
||||
<path fill="#F58536" d="M5.38 13.248l-2.041.546V2.206l2.04.548v10.494z" />
|
||||
<path fill="#9D5025" d="M4.3 1.75l1.08-.512 6.043 7.864-6.043 5.66-1.08-.511V1.749z" />
|
||||
<path fill="#F58536" d="M7.998 13.856l-2.618.906V1.238l2.618.908v11.71z" />
|
||||
<path fill="#9D5025" d="M6.602.66L7.998 0l6.538 8.453L7.998 16l-1.396-.66V.66z" />
|
||||
<path fill="#F58536" d="M15 12.686L7.998 16V0L15 3.314v9.372z" />
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 805 B |
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"id": "ec2",
|
||||
"title": "EC2",
|
||||
"icon": "file://icon.svg",
|
||||
"overview": "file://overview.md",
|
||||
"assets": {
|
||||
"dashboards": []
|
||||
},
|
||||
"supported_signals": {
|
||||
"metrics": true,
|
||||
"logs": false
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
"name": "ec2_cpuutilization_average",
|
||||
"type": "Gauge",
|
||||
"unit": "number",
|
||||
"description": "CloudWatch metric CPUUtilization"
|
||||
},
|
||||
{
|
||||
"name": "ec2_cpuutilization_maximum",
|
||||
"type": "Gauge",
|
||||
"unit": "number",
|
||||
"description": "CloudWatch metric CPUUtilization"
|
||||
}
|
||||
],
|
||||
"logs": []
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
### Monitor EC2 with SigNoz
|
||||
|
||||
Collect key EC2 metrics and view them with an out of the box dashboard.
|
||||
@@ -1,21 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="80px" height="80px" viewBox="0 0 80 80" version="1.1" xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<title>Icon-Architecture/64/Arch_Amazon-RDS_64</title>
|
||||
<desc>Created with Sketch.</desc>
|
||||
<defs>
|
||||
<linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="linearGradient-1">
|
||||
<stop stop-color="#2E27AD" offset="0%"></stop>
|
||||
<stop stop-color="#527FFF" offset="100%"></stop>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<g id="Icon-Architecture/64/Arch_Amazon-RDS_64" stroke="none" stroke-width="1" fill="none"
|
||||
fill-rule="evenodd">
|
||||
<g id="Icon-Architecture-BG/64/Database" fill="url(#linearGradient-1)">
|
||||
<rect id="Rectangle" x="0" y="0" width="80" height="80"></rect>
|
||||
</g>
|
||||
<path
|
||||
d="M15.414,14 L24.707,23.293 L23.293,24.707 L14,15.414 L14,23 L12,23 L12,13 C12,12.448 12.447,12 13,12 L23,12 L23,14 L15.414,14 Z M68,13 L68,23 L66,23 L66,15.414 L56.707,24.707 L55.293,23.293 L64.586,14 L57,14 L57,12 L67,12 C67.553,12 68,12.448 68,13 L68,13 Z M66,57 L68,57 L68,67 C68,67.552 67.553,68 67,68 L57,68 L57,66 L64.586,66 L55.293,56.707 L56.707,55.293 L66,64.586 L66,57 Z M65.5,39.213 C65.5,35.894 61.668,32.615 55.25,30.442 L55.891,28.548 C63.268,31.045 67.5,34.932 67.5,39.213 C67.5,43.495 63.268,47.383 55.89,49.879 L55.249,47.984 C61.668,45.812 65.5,42.534 65.5,39.213 L65.5,39.213 Z M14.556,39.213 C14.556,42.393 18.143,45.585 24.152,47.753 L23.473,49.634 C16.535,47.131 12.556,43.333 12.556,39.213 C12.556,35.094 16.535,31.296 23.473,28.792 L24.152,30.673 C18.143,32.842 14.556,36.034 14.556,39.213 L14.556,39.213 Z M24.707,56.707 L15.414,66 L23,66 L23,68 L13,68 C12.447,68 12,67.552 12,67 L12,57 L14,57 L14,64.586 L23.293,55.293 L24.707,56.707 Z M40,31.286 C32.854,31.286 29,29.44 29,28.686 C29,27.931 32.854,26.086 40,26.086 C47.145,26.086 51,27.931 51,28.686 C51,29.44 47.145,31.286 40,31.286 L40,31.286 Z M40.029,39.031 C33.187,39.031 29,37.162 29,36.145 L29,31.284 C31.463,32.643 35.832,33.286 40,33.286 C44.168,33.286 48.537,32.643 51,31.284 L51,36.145 C51,37.163 46.835,39.031 40.029,39.031 L40.029,39.031 Z M40.029,46.667 C33.187,46.667 29,44.798 29,43.781 L29,38.862 C31.431,40.291 35.742,41.031 40.029,41.031 C44.292,41.031 48.578,40.292 51,38.867 L51,43.781 C51,44.799 46.835,46.667 40.029,46.667 L40.029,46.667 Z M40,53.518 C32.883,53.518 29,51.605 29,50.622 L29,46.498 C31.431,47.927 35.742,48.667 40.029,48.667 C44.292,48.667 48.578,47.929 51,46.503 L51,50.622 C51,51.605 47.117,53.518 40,53.518 L40,53.518 Z M40,24.086 C33.739,24.086 27,25.525 27,28.686 L27,50.622 C27,53.836 33.54,55.518 40,55.518 C46.46,55.518 53,53.836 53,50.622 L53,28.686 C53,25.525 46.261,24.086 40,24.086 L40,24.086 Z"
|
||||
id="Amazon-RDS_Icon_64_Squid" fill="#FFFFFF"></path>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 2.7 KiB |
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"id": "rds-postgres",
|
||||
"title": "RDS Postgres",
|
||||
"icon": "file://icon.svg",
|
||||
"overview": "file://overview.md",
|
||||
"assets": {
|
||||
"dashboards": []
|
||||
},
|
||||
"supported_signals": {
|
||||
"metrics": true,
|
||||
"logs": true
|
||||
},
|
||||
"data_collected": {
|
||||
"metrics": [
|
||||
{
|
||||
"name": "rds_postgres_cpuutilization_average",
|
||||
"type": "Gauge",
|
||||
"unit": "number",
|
||||
"description": "CloudWatch metric CPUUtilization"
|
||||
},
|
||||
{
|
||||
"name": "rds_postgres_cpuutilization_maximum",
|
||||
"type": "Gauge",
|
||||
"unit": "number",
|
||||
"description": "CloudWatch metric CPUUtilization"
|
||||
}
|
||||
],
|
||||
"logs": []
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
### Monitor RDS Postgres with SigNoz
|
||||
|
||||
Collect key RDS Postgres metrics and view them with an out of the box dashboard.
|
||||
@@ -35,126 +35,10 @@ var (
|
||||
)
|
||||
|
||||
// InitDB sets up setting up the connection pool global variable.
|
||||
func InitDB(dataSourceName string) (*sqlx.DB, error) {
|
||||
var err error
|
||||
|
||||
db, err = sqlx.Open("sqlite3", dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS dashboards (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
uuid TEXT NOT NULL UNIQUE,
|
||||
created_at datetime NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating dashboard table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS rules (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
updated_at datetime NOT NULL,
|
||||
deleted INTEGER DEFAULT 0,
|
||||
data TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS notification_channels (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
created_at datetime NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
type TEXT NOT NULL,
|
||||
deleted INTEGER DEFAULT 0,
|
||||
data TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating notification_channles table: %s", err.Error())
|
||||
}
|
||||
|
||||
tableSchema := `CREATE TABLE IF NOT EXISTS planned_maintenance (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
alert_ids TEXT,
|
||||
schedule TEXT NOT NULL,
|
||||
created_at datetime NOT NULL,
|
||||
created_by TEXT NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
updated_by TEXT NOT NULL
|
||||
);`
|
||||
_, err = db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating planned_maintenance table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS ttl_status (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
transaction_id TEXT NOT NULL,
|
||||
created_at datetime NOT NULL,
|
||||
updated_at datetime NOT NULL,
|
||||
table_name TEXT NOT NULL,
|
||||
ttl INTEGER DEFAULT 0,
|
||||
cold_storage_ttl INTEGER DEFAULT 0,
|
||||
status TEXT NOT NULL
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating ttl_status table: %s", err.Error())
|
||||
}
|
||||
|
||||
// sqlite does not support "IF NOT EXISTS"
|
||||
createdAt := `ALTER TABLE rules ADD COLUMN created_at datetime;`
|
||||
_, err = db.Exec(createdAt)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column created_at to rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
createdBy := `ALTER TABLE rules ADD COLUMN created_by TEXT;`
|
||||
_, err = db.Exec(createdBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column created_by to rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
updatedBy := `ALTER TABLE rules ADD COLUMN updated_by TEXT;`
|
||||
_, err = db.Exec(updatedBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column updated_by to rules table: %s", err.Error())
|
||||
}
|
||||
|
||||
createdBy = `ALTER TABLE dashboards ADD COLUMN created_by TEXT;`
|
||||
_, err = db.Exec(createdBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column created_by to dashboards table: %s", err.Error())
|
||||
}
|
||||
|
||||
updatedBy = `ALTER TABLE dashboards ADD COLUMN updated_by TEXT;`
|
||||
_, err = db.Exec(updatedBy)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column updated_by to dashboards table: %s", err.Error())
|
||||
}
|
||||
|
||||
locked := `ALTER TABLE dashboards ADD COLUMN locked INTEGER DEFAULT 0;`
|
||||
_, err = db.Exec(locked)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column name") {
|
||||
return nil, fmt.Errorf("error in adding column locked to dashboards table: %s", err.Error())
|
||||
}
|
||||
|
||||
// @deprecated
|
||||
func InitDB(inputDB *sqlx.DB) {
|
||||
db = inputDB
|
||||
telemetry.GetInstance().SetDashboardsInfoCallback(GetDashboardsInfo)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
|
||||
@@ -33,41 +33,9 @@ type SavedView struct {
|
||||
ExtraData string `json:"extra_data" db:"extra_data"`
|
||||
}
|
||||
|
||||
// InitWithDSN sets up setting up the connection pool global variable.
|
||||
func InitWithDSN(dataSourceName string) (*sqlx.DB, error) {
|
||||
var err error
|
||||
|
||||
db, err = sqlx.Open("sqlite3", dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tableSchema := `CREATE TABLE IF NOT EXISTS saved_views (
|
||||
uuid TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
category TEXT NOT NULL,
|
||||
created_at datetime NOT NULL,
|
||||
created_by TEXT,
|
||||
updated_at datetime NOT NULL,
|
||||
updated_by TEXT,
|
||||
source_page TEXT NOT NULL,
|
||||
tags TEXT,
|
||||
data TEXT NOT NULL,
|
||||
extra_data TEXT
|
||||
);`
|
||||
|
||||
_, err = db.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating saved views table: %s", err.Error())
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func InitWithDB(sqlDB *sqlx.DB) {
|
||||
db = sqlDB
|
||||
telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo)
|
||||
}
|
||||
|
||||
func GetViews() ([]*v3.SavedView, error) {
|
||||
|
||||
@@ -3902,18 +3902,6 @@ func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *Au
|
||||
"/{cloudProvider}/agent-check-in", am.EditAccess(aH.CloudIntegrationsAgentCheckIn),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/services", am.ViewAccess(aH.CloudIntegrationsListServices),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/services/{serviceId}", am.ViewAccess(aH.CloudIntegrationsGetServiceDetails),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{cloudProvider}/services/{serviceId}/config", am.EditAccess(aH.CloudIntegrationsUpdateServiceConfig),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
|
||||
@@ -4037,77 +4025,6 @@ func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsListServices(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
var cloudAccountId *string
|
||||
|
||||
cloudAccountIdQP := r.URL.Query().Get("cloud_account_id")
|
||||
if len(cloudAccountIdQP) > 0 {
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListServices(
|
||||
r.Context(), cloudProvider, cloudAccountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
serviceId := mux.Vars(r)["serviceId"]
|
||||
|
||||
var cloudAccountId *string
|
||||
|
||||
cloudAccountIdQP := r.URL.Query().Get("cloud_account_id")
|
||||
if len(cloudAccountIdQP) > 0 {
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetServiceDetails(
|
||||
r.Context(), cloudProvider, serviceId, cloudAccountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
serviceId := mux.Vars(r)["serviceId"]
|
||||
|
||||
req := cloudintegrations.UpdateServiceConfigRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateServiceConfig(
|
||||
r.Context(), cloudProvider, serviceId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, result)
|
||||
}
|
||||
|
||||
// logs
|
||||
func (aH *APIHandler) RegisterLogsRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
subRouter := router.PathPrefix("/api/v1/logs").Subrouter()
|
||||
|
||||
@@ -338,7 +338,5 @@ func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterList
|
||||
resp.Total = len(allClusterGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -440,7 +440,5 @@ func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonS
|
||||
resp.Total = len(allDaemonSetGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -440,7 +440,5 @@ func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.Deplo
|
||||
resp.Total = len(allDeploymentGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -494,7 +494,5 @@ func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (mo
|
||||
resp.Total = len(allJobGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -341,7 +341,5 @@ func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.Namespa
|
||||
resp.Total = len(allNamespaceGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ var (
|
||||
|
||||
nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid", "k8s_cluster_name"}
|
||||
|
||||
k8sNodeGroupAttrKey = "k8s_node_name"
|
||||
k8sNodeUIDAttrKey = "k8s_node_uid"
|
||||
|
||||
queryNamesForNodes = map[string][]string{
|
||||
"cpu": {"A"},
|
||||
@@ -125,7 +125,7 @@ func (p *NodesRepo) getMetadataAttributes(ctx context.Context, req model.NodeLis
|
||||
}
|
||||
}
|
||||
|
||||
nodeUID := stringData[k8sNodeGroupAttrKey]
|
||||
nodeUID := stringData[k8sNodeUIDAttrKey]
|
||||
if _, ok := nodeAttrs[nodeUID]; !ok {
|
||||
nodeAttrs[nodeUID] = map[string]string{}
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
}
|
||||
|
||||
if req.GroupBy == nil {
|
||||
req.GroupBy = []v3.AttributeKey{{Key: k8sNodeGroupAttrKey}}
|
||||
req.GroupBy = []v3.AttributeKey{{Key: k8sNodeUIDAttrKey}}
|
||||
resp.Type = model.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = model.ResponseTypeGroupedList
|
||||
@@ -306,7 +306,7 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
NodeMemoryAllocatable: -1,
|
||||
}
|
||||
|
||||
if nodeUID, ok := row.Data[k8sNodeGroupAttrKey].(string); ok {
|
||||
if nodeUID, ok := row.Data[k8sNodeUIDAttrKey].(string); ok {
|
||||
record.NodeUID = nodeUID
|
||||
}
|
||||
|
||||
@@ -354,6 +354,5 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
|
||||
resp.Total = len(allNodeGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -46,7 +46,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -72,7 +72,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -98,7 +98,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -132,7 +132,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -166,7 +166,7 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: k8sNodeGroupAttrKey,
|
||||
Key: k8sNodeUIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
|
||||
@@ -404,7 +404,5 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo
|
||||
resp.Total = len(allPodGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -374,7 +374,5 @@ func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest)
|
||||
resp.Total = len(allVolumeGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -440,7 +440,5 @@ func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.Sta
|
||||
resp.Total = len(allStatefulSetGroups)
|
||||
resp.Records = records
|
||||
|
||||
resp.SortBy(req.OrderBy)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ func readBuiltInIntegration(dirpath string) (
|
||||
)
|
||||
}
|
||||
|
||||
hydrated, err := HydrateFileUris(integrationSpec, integrationFiles, dirpath)
|
||||
hydrated, err := hydrateFileUris(integrationSpec, dirpath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't hydrate files referenced in integration %s: %w", integrationJsonPath, err,
|
||||
@@ -172,11 +172,11 @@ func validateIntegration(i IntegrationDetails) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func HydrateFileUris(spec interface{}, fs embed.FS, basedir string) (interface{}, error) {
|
||||
func hydrateFileUris(spec interface{}, basedir string) (interface{}, error) {
|
||||
if specMap, ok := spec.(map[string]interface{}); ok {
|
||||
result := map[string]interface{}{}
|
||||
for k, v := range specMap {
|
||||
hydrated, err := HydrateFileUris(v, fs, basedir)
|
||||
hydrated, err := hydrateFileUris(v, basedir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func HydrateFileUris(spec interface{}, fs embed.FS, basedir string) (interface{}
|
||||
} else if specSlice, ok := spec.([]interface{}); ok {
|
||||
result := []interface{}{}
|
||||
for _, v := range specSlice {
|
||||
hydrated, err := HydrateFileUris(v, fs, basedir)
|
||||
hydrated, err := hydrateFileUris(v, basedir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -196,14 +196,14 @@ func HydrateFileUris(spec interface{}, fs embed.FS, basedir string) (interface{}
|
||||
return result, nil
|
||||
|
||||
} else if maybeFileUri, ok := spec.(string); ok {
|
||||
return readFileIfUri(fs, maybeFileUri, basedir)
|
||||
return readFileIfUri(maybeFileUri, basedir)
|
||||
}
|
||||
|
||||
return spec, nil
|
||||
|
||||
}
|
||||
|
||||
func readFileIfUri(fs embed.FS, maybeFileUri string, basedir string) (interface{}, error) {
|
||||
func readFileIfUri(maybeFileUri string, basedir string) (interface{}, error) {
|
||||
fileUriPrefix := "file://"
|
||||
if !strings.HasPrefix(maybeFileUri, fileUriPrefix) {
|
||||
return maybeFileUri, nil
|
||||
@@ -212,7 +212,7 @@ func readFileIfUri(fs embed.FS, maybeFileUri string, basedir string) (interface{
|
||||
relativePath := maybeFileUri[len(fileUriPrefix):]
|
||||
fullPath := path.Join(basedir, relativePath)
|
||||
|
||||
fileContents, err := fs.ReadFile(fullPath)
|
||||
fileContents, err := integrationFiles.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read referenced file: %w", err)
|
||||
}
|
||||
|
||||
@@ -123,12 +123,7 @@ type Manager struct {
|
||||
}
|
||||
|
||||
func NewManager(db *sqlx.DB) (*Manager, error) {
|
||||
iiRepo, err := NewInstalledIntegrationsSqliteRepo(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not init sqlite DB for installed integrations: %w", err,
|
||||
)
|
||||
}
|
||||
iiRepo := NewInstalledIntegrationsSqliteRepo(db)
|
||||
|
||||
return &Manager{
|
||||
availableIntegrationsRepo: &BuiltInIntegrations{},
|
||||
|
||||
@@ -9,45 +9,14 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func InitSqliteDBIfNeeded(db *sqlx.DB) error {
|
||||
if db == nil {
|
||||
return fmt.Errorf("db is required")
|
||||
}
|
||||
|
||||
createTablesStatements := `
|
||||
CREATE TABLE IF NOT EXISTS integrations_installed(
|
||||
integration_id TEXT PRIMARY KEY,
|
||||
config_json TEXT,
|
||||
installed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`
|
||||
_, err := db.Exec(createTablesStatements)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"could not ensure integrations schema in sqlite DB: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type InstalledIntegrationsSqliteRepo struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) (
|
||||
*InstalledIntegrationsSqliteRepo, error,
|
||||
) {
|
||||
err := InitSqliteDBIfNeeded(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't ensure sqlite schema for installed integrations: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) *InstalledIntegrationsSqliteRepo {
|
||||
return &InstalledIntegrationsSqliteRepo{
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
|
||||
@@ -15,11 +15,7 @@ import (
|
||||
|
||||
func NewTestIntegrationsManager(t *testing.T) *Manager {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
|
||||
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not init sqlite DB for installed integrations: %v", err)
|
||||
}
|
||||
installedIntegrationsRepo := NewInstalledIntegrationsSqliteRepo(testDB)
|
||||
|
||||
return &Manager{
|
||||
availableIntegrationsRepo: &TestAvailableIntegrationsRepo{},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user