mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-26 02:12:34 +00:00
Compare commits
6 Commits
pipelinesv
...
move-pkg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8b28edaf2 | ||
|
|
fdbbbaa43b | ||
|
|
7605775a38 | ||
|
|
cb1a2a8a13 | ||
|
|
1a5d37b25a | ||
|
|
bc4273f2f8 |
2
.github/workflows/integrationci.yaml
vendored
2
.github/workflows/integrationci.yaml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 25.5.6
|
||||
- 25.10.5
|
||||
- 25.12.5
|
||||
schema-migrator-version:
|
||||
- v0.142.0
|
||||
postgres-version:
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/formatter"
|
||||
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
|
||||
|
||||
@@ -308,3 +308,15 @@ export const PublicDashboardPage = Loadable(
|
||||
/* webpackChunkName: "Public Dashboard Page" */ 'pages/PublicDashboard'
|
||||
),
|
||||
);
|
||||
|
||||
export const AlertTypeSelectionPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "Alert Type Selection Page" */ 'pages/AlertTypeSelection'
|
||||
),
|
||||
);
|
||||
|
||||
export const MeterExplorerPage = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "Meter Explorer Page" */ 'pages/MeterExplorer'),
|
||||
);
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import { RouteProps } from 'react-router-dom';
|
||||
import ROUTES from 'constants/routes';
|
||||
import AlertTypeSelectionPage from 'pages/AlertTypeSelection';
|
||||
import MessagingQueues from 'pages/MessagingQueues';
|
||||
import MeterExplorer from 'pages/MeterExplorer';
|
||||
|
||||
import {
|
||||
AlertHistory,
|
||||
AlertOverview,
|
||||
AlertTypeSelectionPage,
|
||||
AllAlertChannels,
|
||||
AllErrors,
|
||||
ApiMonitoring,
|
||||
@@ -29,6 +27,8 @@ import {
|
||||
LogsExplorer,
|
||||
LogsIndexToFields,
|
||||
LogsSaveViews,
|
||||
MessagingQueuesMainPage,
|
||||
MeterExplorerPage,
|
||||
MetricsExplorer,
|
||||
OldLogsExplorer,
|
||||
Onboarding,
|
||||
@@ -399,28 +399,28 @@ const routes: AppRoutes[] = [
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_KAFKA,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_KAFKA',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_CELERY_TASK,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_CELERY_TASK',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_OVERVIEW,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_OVERVIEW',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_KAFKA_DETAIL,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
component: MessagingQueuesMainPage,
|
||||
key: 'MESSAGING_QUEUES_KAFKA_DETAIL',
|
||||
isPrivate: true,
|
||||
},
|
||||
@@ -463,21 +463,21 @@ const routes: AppRoutes[] = [
|
||||
{
|
||||
path: ROUTES.METER,
|
||||
exact: true,
|
||||
component: MeterExplorer,
|
||||
component: MeterExplorerPage,
|
||||
key: 'METER',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.METER_EXPLORER,
|
||||
exact: true,
|
||||
component: MeterExplorer,
|
||||
component: MeterExplorerPage,
|
||||
key: 'METER_EXPLORER',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.METER_EXPLORER_VIEWS,
|
||||
exact: true,
|
||||
component: MeterExplorer,
|
||||
component: MeterExplorerPage,
|
||||
key: 'METER_EXPLORER_VIEWS',
|
||||
isPrivate: true,
|
||||
},
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package signozapiserver
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (provider *provider) addLogspipelineRoutes(router *mux.Router) error {
|
||||
router.HandleFunc("/api/v2/pipelines", provider.logspipelineHandler.ListPipelines).Methods(http.MethodGet)
|
||||
return nil
|
||||
}
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/fields"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
@@ -49,7 +48,6 @@ type provider struct {
|
||||
authzHandler authz.Handler
|
||||
zeusHandler zeus.Handler
|
||||
querierHandler querier.Handler
|
||||
logspipelineHandler logspipeline.Handler
|
||||
}
|
||||
|
||||
func NewFactory(
|
||||
@@ -71,7 +69,6 @@ func NewFactory(
|
||||
authzHandler authz.Handler,
|
||||
zeusHandler zeus.Handler,
|
||||
querierHandler querier.Handler,
|
||||
logspipelineHandler logspipeline.Handler,
|
||||
) factory.ProviderFactory[apiserver.APIServer, apiserver.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("signoz"), func(ctx context.Context, providerSettings factory.ProviderSettings, config apiserver.Config) (apiserver.APIServer, error) {
|
||||
return newProvider(
|
||||
@@ -96,7 +93,6 @@ func NewFactory(
|
||||
authzHandler,
|
||||
zeusHandler,
|
||||
querierHandler,
|
||||
logspipelineHandler,
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -123,7 +119,6 @@ func newProvider(
|
||||
authzHandler authz.Handler,
|
||||
zeusHandler zeus.Handler,
|
||||
querierHandler querier.Handler,
|
||||
logspipelineHandler logspipeline.Handler,
|
||||
) (apiserver.APIServer, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/apiserver/signozapiserver")
|
||||
router := mux.NewRouter().UseEncodedPath()
|
||||
@@ -148,7 +143,6 @@ func newProvider(
|
||||
authzHandler: authzHandler,
|
||||
zeusHandler: zeusHandler,
|
||||
querierHandler: querierHandler,
|
||||
logspipelineHandler: logspipelineHandler,
|
||||
}
|
||||
|
||||
provider.authZ = middleware.NewAuthZ(settings.Logger(), orgGetter, authz)
|
||||
@@ -229,14 +223,9 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.addLogspipelineRoutes(router); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
func newSecuritySchemes(role types.Role) []handler.OpenAPISecurityScheme {
|
||||
return []handler.OpenAPISecurityScheme{
|
||||
{Name: ctxtypes.AuthTypeAPIKey.StringValue(), Scopes: []string{role.String()}},
|
||||
|
||||
@@ -3,7 +3,7 @@ package formatter
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/converter"
|
||||
"github.com/SigNoz/signoz/pkg/converter"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@ package formatter
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/converter"
|
||||
"github.com/SigNoz/signoz/pkg/converter"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
@@ -78,9 +78,8 @@ func toFixed(value float64, decimals DecimalCount) string {
|
||||
}
|
||||
|
||||
decimalPos := strings.Index(formatted, ".")
|
||||
precision := 0
|
||||
if decimalPos != -1 {
|
||||
precision = len(formatted) - decimalPos - 1
|
||||
precision := len(formatted) - decimalPos - 1
|
||||
if precision < *decimals {
|
||||
return formatted + strings.Repeat("0", *decimals-precision)
|
||||
}
|
||||
@@ -89,8 +88,8 @@ func toFixed(value float64, decimals DecimalCount) string {
|
||||
return formatted
|
||||
}
|
||||
|
||||
func toFixedScaled(value float64, decimals DecimalCount, scaleFormat string) string {
|
||||
return toFixed(value, decimals) + scaleFormat
|
||||
func toFixedScaled(value float64, scaleFormat string) string {
|
||||
return toFixed(value, nil) + scaleFormat
|
||||
}
|
||||
|
||||
func getDecimalsForValue(value float64) int {
|
||||
@@ -13,35 +13,35 @@ func (*throughputFormatter) Name() string {
|
||||
return "throughput"
|
||||
}
|
||||
|
||||
func simpleCountUnit(value float64, decimals *int, symbol string) string {
|
||||
func simpleCountUnit(value float64, symbol string) string {
|
||||
units := []string{"", "K", "M", "B", "T"}
|
||||
scaler := scaledUnits(1000, units, 0)
|
||||
|
||||
return scaler(value, decimals) + " " + symbol
|
||||
return scaler(value, nil) + " " + symbol
|
||||
}
|
||||
|
||||
func (f *throughputFormatter) Format(value float64, unit string) string {
|
||||
switch unit {
|
||||
case "cps", "{count}/s":
|
||||
return simpleCountUnit(value, nil, "c/s")
|
||||
return simpleCountUnit(value, "c/s")
|
||||
case "ops", "{ops}/s":
|
||||
return simpleCountUnit(value, nil, "op/s")
|
||||
return simpleCountUnit(value, "op/s")
|
||||
case "reqps", "{req}/s":
|
||||
return simpleCountUnit(value, nil, "req/s")
|
||||
return simpleCountUnit(value, "req/s")
|
||||
case "rps", "{read}/s":
|
||||
return simpleCountUnit(value, nil, "r/s")
|
||||
return simpleCountUnit(value, "r/s")
|
||||
case "wps", "{write}/s":
|
||||
return simpleCountUnit(value, nil, "w/s")
|
||||
return simpleCountUnit(value, "w/s")
|
||||
case "iops", "{iops}/s":
|
||||
return simpleCountUnit(value, nil, "iops")
|
||||
return simpleCountUnit(value, "iops")
|
||||
case "cpm", "{count}/min":
|
||||
return simpleCountUnit(value, nil, "c/m")
|
||||
return simpleCountUnit(value, "c/m")
|
||||
case "opm", "{ops}/min":
|
||||
return simpleCountUnit(value, nil, "op/m")
|
||||
return simpleCountUnit(value, "op/m")
|
||||
case "rpm", "{read}/min":
|
||||
return simpleCountUnit(value, nil, "r/m")
|
||||
return simpleCountUnit(value, "r/m")
|
||||
case "wpm", "{write}/min":
|
||||
return simpleCountUnit(value, nil, "w/m")
|
||||
return simpleCountUnit(value, "w/m")
|
||||
}
|
||||
// When unit is not matched, return the value as it is.
|
||||
return fmt.Sprintf("%v", value)
|
||||
@@ -46,17 +46,17 @@ func toNanoSeconds(value float64) string {
|
||||
if absValue < 1000 {
|
||||
return toFixed(value, nil) + " ns"
|
||||
} else if absValue < 1000000 { // 2000 ns is better represented as 2 µs
|
||||
return toFixedScaled(value/1000, nil, " µs")
|
||||
return toFixedScaled(value/1000, " µs")
|
||||
} else if absValue < 1000000000 { // 2000000 ns is better represented as 2 ms
|
||||
return toFixedScaled(value/1000000, nil, " ms")
|
||||
return toFixedScaled(value/1000000, " ms")
|
||||
} else if absValue < 60000000000 {
|
||||
return toFixedScaled(value/1000000000, nil, " s")
|
||||
return toFixedScaled(value/1000000000, " s")
|
||||
} else if absValue < 3600000000000 {
|
||||
return toFixedScaled(value/60000000000, nil, " min")
|
||||
return toFixedScaled(value/60000000000, " min")
|
||||
} else if absValue < 86400000000000 {
|
||||
return toFixedScaled(value/3600000000000, nil, " hour")
|
||||
return toFixedScaled(value/3600000000000, " hour")
|
||||
} else {
|
||||
return toFixedScaled(value/86400000000000, nil, " day")
|
||||
return toFixedScaled(value/86400000000000, " day")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,9 +66,9 @@ func toMicroSeconds(value float64) string {
|
||||
if absValue < 1000 {
|
||||
return toFixed(value, nil) + " µs"
|
||||
} else if absValue < 1000000 { // 2000 µs is better represented as 2 ms
|
||||
return toFixedScaled(value/1000, nil, " ms")
|
||||
return toFixedScaled(value/1000, " ms")
|
||||
} else {
|
||||
return toFixedScaled(value/1000000, nil, " s")
|
||||
return toFixedScaled(value/1000000, " s")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,16 +80,16 @@ func toMilliSeconds(value float64) string {
|
||||
if absValue < 1000 {
|
||||
return toFixed(value, nil) + " ms"
|
||||
} else if absValue < 60000 {
|
||||
return toFixedScaled(value/1000, nil, " s")
|
||||
return toFixedScaled(value/1000, " s")
|
||||
} else if absValue < 3600000 {
|
||||
return toFixedScaled(value/60000, nil, " min")
|
||||
return toFixedScaled(value/60000, " min")
|
||||
} else if absValue < 86400000 { // 172800000 ms is better represented as 2 day
|
||||
return toFixedScaled(value/3600000, nil, " hour")
|
||||
return toFixedScaled(value/3600000, " hour")
|
||||
} else if absValue < 31536000000 {
|
||||
return toFixedScaled(value/86400000, nil, " day")
|
||||
return toFixedScaled(value/86400000, " day")
|
||||
}
|
||||
|
||||
return toFixedScaled(value/31536000000, nil, " year")
|
||||
return toFixedScaled(value/31536000000, " year")
|
||||
}
|
||||
|
||||
// toSeconds returns a easy to read string representation of the given value in seconds
|
||||
@@ -97,24 +97,24 @@ func toSeconds(value float64) string {
|
||||
absValue := math.Abs(value)
|
||||
|
||||
if absValue < 0.000001 {
|
||||
return toFixedScaled(value*1e9, nil, " ns")
|
||||
return toFixedScaled(value*1e9, " ns")
|
||||
} else if absValue < 0.001 {
|
||||
return toFixedScaled(value*1e6, nil, " µs")
|
||||
return toFixedScaled(value*1e6, " µs")
|
||||
} else if absValue < 1 {
|
||||
return toFixedScaled(value*1e3, nil, " ms")
|
||||
return toFixedScaled(value*1e3, " ms")
|
||||
} else if absValue < 60 {
|
||||
return toFixed(value, nil) + " s"
|
||||
} else if absValue < 3600 {
|
||||
return toFixedScaled(value/60, nil, " min")
|
||||
return toFixedScaled(value/60, " min")
|
||||
} else if absValue < 86400 { // 56000 s is better represented as 15.56 hour
|
||||
return toFixedScaled(value/3600, nil, " hour")
|
||||
return toFixedScaled(value/3600, " hour")
|
||||
} else if absValue < 604800 {
|
||||
return toFixedScaled(value/86400, nil, " day")
|
||||
return toFixedScaled(value/86400, " day")
|
||||
} else if absValue < 31536000 {
|
||||
return toFixedScaled(value/604800, nil, " week")
|
||||
return toFixedScaled(value/604800, " week")
|
||||
}
|
||||
|
||||
return toFixedScaled(value/3.15569e7, nil, " year")
|
||||
return toFixedScaled(value/3.15569e7, " year")
|
||||
}
|
||||
|
||||
// toMinutes returns a easy to read string representation of the given value in minutes
|
||||
@@ -124,13 +124,13 @@ func toMinutes(value float64) string {
|
||||
if absValue < 60 {
|
||||
return toFixed(value, nil) + " min"
|
||||
} else if absValue < 1440 {
|
||||
return toFixedScaled(value/60, nil, " hour")
|
||||
return toFixedScaled(value/60, " hour")
|
||||
} else if absValue < 10080 {
|
||||
return toFixedScaled(value/1440, nil, " day")
|
||||
return toFixedScaled(value/1440, " day")
|
||||
} else if absValue < 604800 {
|
||||
return toFixedScaled(value/10080, nil, " week")
|
||||
return toFixedScaled(value/10080, " week")
|
||||
} else {
|
||||
return toFixedScaled(value/5.25948e5, nil, " year")
|
||||
return toFixedScaled(value/5.25948e5, " year")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,11 +142,11 @@ func toHours(value float64) string {
|
||||
if absValue < 24 {
|
||||
return toFixed(value, nil) + " hour"
|
||||
} else if absValue < 168 {
|
||||
return toFixedScaled(value/24, nil, " day")
|
||||
return toFixedScaled(value/24, " day")
|
||||
} else if absValue < 8760 {
|
||||
return toFixedScaled(value/168, nil, " week")
|
||||
return toFixedScaled(value/168, " week")
|
||||
} else {
|
||||
return toFixedScaled(value/8760, nil, " year")
|
||||
return toFixedScaled(value/8760, " year")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,9 +157,9 @@ func toDays(value float64) string {
|
||||
if absValue < 7 {
|
||||
return toFixed(value, nil) + " day"
|
||||
} else if absValue < 365 {
|
||||
return toFixedScaled(value/7, nil, " week")
|
||||
return toFixedScaled(value/7, " week")
|
||||
} else {
|
||||
return toFixedScaled(value/365, nil, " year")
|
||||
return toFixedScaled(value/365, " year")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,6 +170,6 @@ func toWeeks(value float64) string {
|
||||
if absValue < 52 {
|
||||
return toFixed(value, nil) + " week"
|
||||
} else {
|
||||
return toFixedScaled(value/52, nil, " year")
|
||||
return toFixedScaled(value/52, " year")
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package impllogspipeline
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
module logspipeline.Module
|
||||
}
|
||||
|
||||
func NewHandler(module logspipeline.Module) logspipeline.Handler {
|
||||
return &handler{module: module}
|
||||
}
|
||||
|
||||
func (h *handler) ListPipelines(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, errv2 := valuer.NewUUID(claims.OrgID)
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
version, err := ParseAgentConfigVersion(r)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if version != -1 {
|
||||
pipelines, err := h.module.ListPipelinesByVersion(r.Context(), orgID, version)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
render.Success(w, http.StatusOK, pipelines)
|
||||
return
|
||||
}
|
||||
|
||||
pipelines, err := h.module.ListPipelines(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, pipelines)
|
||||
}
|
||||
|
||||
func (h *handler) GetPipeline(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (h *handler) CreatePipeline(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (h *handler) UpdatePipeline(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (h *handler) DeletePipeline(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func ParseAgentConfigVersion(r *http.Request) (int, error) {
|
||||
versionString := mux.Vars(r)["version"]
|
||||
|
||||
if versionString == "latest" {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
version64, err := strconv.ParseInt(versionString, 0, 8)
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid version number")
|
||||
}
|
||||
|
||||
if version64 <= 0 {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid version number")
|
||||
}
|
||||
|
||||
return int(version64), nil
|
||||
}
|
||||
@@ -1,322 +0,0 @@
|
||||
package impllogspipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type module struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewModule(sqlstore sqlstore.SQLStore) logspipeline.Module {
|
||||
return &module{sqlstore: sqlstore}
|
||||
}
|
||||
|
||||
func (m *module) ListPipelines(ctx context.Context, orgID valuer.UUID) ([]pipelinetypes.GettablePipeline, error) {
|
||||
latestVersion := -1
|
||||
// get latest agent config
|
||||
lastestConfig, err := agentConf.GetLatestVersion(ctx, orgID, opamptypes.ElementTypeLogPipelines)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lastestConfig != nil {
|
||||
latestVersion = lastestConfig.Version
|
||||
}
|
||||
return m.ListPipelinesByVersion(ctx, orgID, latestVersion)
|
||||
}
|
||||
|
||||
func (m *module) ListPipelinesByVersion(ctx context.Context, orgID valuer.UUID, version int) ([]pipelinetypes.GettablePipeline, error) {
|
||||
var stored []pipelinetypes.StoreablePipeline
|
||||
err := m.sqlstore.BunDB().NewSelect().
|
||||
Model(&stored).
|
||||
Join("JOIN agent_config_element e ON p.id = e.element_id").
|
||||
Join("JOIN agent_config_version v ON v.id = e.version_id").
|
||||
Where("e.element_type = ?", opamptypes.ElementTypeLogPipelines.StringValue()).
|
||||
Where("v.version = ?", version).
|
||||
Where("v.org_id = ?", orgID.StringValue()).
|
||||
Order("p.order_id ASC").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pipelines := make([]pipelinetypes.GettablePipeline, len(stored))
|
||||
if len(stored) == 0 {
|
||||
return pipelines, nil
|
||||
}
|
||||
|
||||
for i := range stored {
|
||||
pipelines[i].StoreablePipeline = stored[i]
|
||||
if err := pipelines[i].ParseRawConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := pipelines[i].ParseFilter(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return pipelines, nil
|
||||
}
|
||||
|
||||
func (m *module) GetPipeline(ctx context.Context, orgID valuer.UUID, id string) (*pipelinetypes.GettablePipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *module) CreatePipeline(ctx context.Context, orgID valuer.UUID, claims *authtypes.Claims, pipeline *pipelinetypes.PostablePipeline) (*pipelinetypes.GettablePipeline, error) {
|
||||
storeable, err := pipeline.ToStoreablePipeline()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// regenerate the id and set other fields
|
||||
storeable.Identifiable.ID = valuer.GenerateUUID()
|
||||
storeable.OrgID = orgID.String()
|
||||
storeable.TimeAuditable = types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
storeable.UserAuditable = types.UserAuditable{
|
||||
CreatedBy: claims.Email,
|
||||
}
|
||||
|
||||
_, err = m.sqlstore.BunDB().NewInsert().
|
||||
Model(&storeable).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("error in inserting pipeline data", zap.Error(err))
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to insert pipeline")
|
||||
}
|
||||
|
||||
return &pipelinetypes.GettablePipeline{
|
||||
StoreablePipeline: *storeable,
|
||||
Filter: pipeline.Filter,
|
||||
Config: pipeline.Config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *module) UpdatePipeline(ctx context.Context, orgID valuer.UUID, claims *authtypes.Claims, pipeline *pipelinetypes.PostablePipeline) (*pipelinetypes.GettablePipeline, error) {
|
||||
if err := pipeline.IsValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storeable, err := pipeline.ToStoreablePipeline()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storeable.OrgID = orgID.String()
|
||||
storeable.TimeAuditable = types.TimeAuditable{
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
storeable.UserAuditable = types.UserAuditable{
|
||||
UpdatedBy: claims.Email,
|
||||
}
|
||||
|
||||
// get id from storeable pipeline
|
||||
id := storeable.ID.StringValue()
|
||||
|
||||
// depending on the order_id update the rest of the table
|
||||
// example 1: total available pipelines are 6, and order_id 5 is moved to 2, then we need to update the rest of the table
|
||||
// old: 1, 2, 3, 4, 5, 6
|
||||
// ^ |
|
||||
// |_________|
|
||||
// So pipelines starting from 2nd position till 4th position shift to right (or increase their order_id) by 1 position
|
||||
// example 2: total available pipelines are 6, and order_id 2 is moved to 4, then we need to update the rest of the table
|
||||
// old: 1, 2, 3, 4, 5, 6
|
||||
// | ^
|
||||
// |_____|
|
||||
// So pipelines starting from 3rd position till 4th position shift to left (or decrease their order_id) by 1 position
|
||||
if err := m.sqlstore.RunInTxCtx(ctx, nil, func(ctx context.Context) error {
|
||||
db := m.sqlstore.BunDBCtx(ctx)
|
||||
|
||||
var existing pipelinetypes.StoreablePipeline
|
||||
if err := db.NewSelect().
|
||||
Column("order_id", "enabled").
|
||||
Model(&existing).
|
||||
Where("id = ?", id).
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Scan(ctx); err != nil {
|
||||
return m.sqlstore.WrapNotFoundErrf(
|
||||
err,
|
||||
errors.CodeNotFound,
|
||||
"pipeline with id %s does not exist in org %s",
|
||||
id,
|
||||
orgID.StringValue(),
|
||||
)
|
||||
}
|
||||
|
||||
oldOrderID := existing.OrderID
|
||||
newOrderID := storeable.OrderID
|
||||
|
||||
// Reorder other pipelines if the order has changed.
|
||||
if newOrderID != oldOrderID {
|
||||
if err := reorderPipelinesInTx(ctx, db, orgID.StringValue(), oldOrderID, newOrderID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve primary key and immutable fields.
|
||||
storeable.ID = existing.ID
|
||||
|
||||
// Persist the updated pipeline (including its new order).
|
||||
if _, err := db.NewUpdate().
|
||||
Model(storeable).
|
||||
Where("id = ?", id).
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply pipelines if the enabled state has changed
|
||||
if existing.Enabled != storeable.Enabled {
|
||||
if err := m.applyPipelinesInTx(ctx, db, orgID, claims); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pipelinetypes.GettablePipeline{
|
||||
StoreablePipeline: *storeable,
|
||||
Filter: pipeline.Filter,
|
||||
Config: pipeline.Config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *module) applyPipelinesInTx(ctx context.Context, tx bun.IDB, orgID valuer.UUID, claims *authtypes.Claims) error {
|
||||
// Get ids pipelines for the given org
|
||||
var pipelines []pipelinetypes.StoreablePipeline
|
||||
if err := tx.NewSelect().
|
||||
Column("id").
|
||||
Model(&pipelines).
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Scan(ctx); err != nil {
|
||||
return m.sqlstore.WrapNotFoundErrf(
|
||||
err,
|
||||
errors.CodeNotFound,
|
||||
"no pipelines found for org %s",
|
||||
orgID.StringValue(),
|
||||
)
|
||||
}
|
||||
|
||||
// prepare config elements
|
||||
elements := make([]string, len(pipelines))
|
||||
for i, p := range pipelines {
|
||||
elements[i] = p.ID.StringValue()
|
||||
}
|
||||
|
||||
cfg, err := agentConf.StartNewVersion(ctx, tx, orgID, claims, opamptypes.ElementTypeLogPipelines, elements)
|
||||
if err != nil || cfg == nil {
|
||||
return errors.WithAdditionalf(err, "failed to start new version for org %s", orgID.StringValue())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reorderPipelinesInTx updates order_id of other pipelines in a transaction-aware way.
|
||||
// It assumes that all pipelines for a given org have consecutive order_id values starting from 1.
|
||||
// The logic is:
|
||||
// - When moving a pipeline from a higher position to a lower position (e.g., 5 -> 2),
|
||||
// all pipelines in [newOrderID, oldOrderID) are shifted right by +1.
|
||||
// - When moving from a lower position to a higher position (e.g., 2 -> 4),
|
||||
// all pipelines in (oldOrderID, newOrderID] are shifted left by -1.
|
||||
func reorderPipelinesInTx(ctx context.Context, tx bun.IDB, orgID string, oldOrderID, newOrderID int) error {
|
||||
switch {
|
||||
case newOrderID < oldOrderID:
|
||||
// Move up: shift affected pipelines down (order_id + 1).
|
||||
_, err := tx.NewUpdate().
|
||||
Model((*pipelinetypes.StoreablePipeline)(nil)).
|
||||
Set("order_id = order_id + 1").
|
||||
Where("org_id = ?", orgID).
|
||||
Where("order_id >= ?", newOrderID).
|
||||
Where("order_id < ?", oldOrderID).
|
||||
Exec(ctx)
|
||||
return err
|
||||
case newOrderID > oldOrderID:
|
||||
// Move down: shift affected pipelines up (order_id - 1).
|
||||
_, err := tx.NewUpdate().
|
||||
Model((*pipelinetypes.StoreablePipeline)(nil)).
|
||||
Set("order_id = order_id - 1").
|
||||
Where("org_id = ?", orgID).
|
||||
Where("order_id > ?", oldOrderID).
|
||||
Where("order_id <= ?", newOrderID).
|
||||
Exec(ctx)
|
||||
return err
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *module) DeletePipeline(ctx context.Context, orgID valuer.UUID, claims *authtypes.Claims, pipeline *pipelinetypes.PostablePipeline) error {
|
||||
if err := pipeline.IsValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.sqlstore.RunInTxCtx(ctx, nil, func(ctx context.Context) error {
|
||||
db := m.sqlstore.BunDBCtx(ctx)
|
||||
|
||||
// Fetch existing pipeline to determine its current order_id.
|
||||
var existing pipelinetypes.StoreablePipeline
|
||||
if err := db.NewSelect().
|
||||
Model(&existing).
|
||||
Column("order_id", "enabled").
|
||||
Where("id = ?", pipeline.ID).
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Scan(ctx); err != nil {
|
||||
return m.sqlstore.WrapNotFoundErrf(
|
||||
err,
|
||||
errors.CodeNotFound,
|
||||
"pipeline with id %s does not exist in org %s",
|
||||
pipeline.ID,
|
||||
orgID.StringValue(),
|
||||
)
|
||||
}
|
||||
|
||||
if _, err := db.NewDelete().
|
||||
Model((*pipelinetypes.StoreablePipeline)(nil)).
|
||||
Where("id = ?", pipeline.ID).
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set order_ids of other pipelines by collapsing the gap left by the deleted pipeline.
|
||||
if _, err := db.NewUpdate().
|
||||
Model((*pipelinetypes.StoreablePipeline)(nil)).
|
||||
Set("order_id = order_id - 1").
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Where("order_id > ?", existing.OrderID).
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply pipelines if the deleted pipeline was enabled
|
||||
if existing.Enabled {
|
||||
if err := m.applyPipelinesInTx(ctx, db, orgID, claims); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package logspipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Module interface {
|
||||
ListPipelines(ctx context.Context, orgID valuer.UUID) ([]pipelinetypes.GettablePipeline, error)
|
||||
ListPipelinesByVersion(ctx context.Context, orgID valuer.UUID, version int) ([]pipelinetypes.GettablePipeline, error)
|
||||
GetPipeline(ctx context.Context, orgID valuer.UUID, id string) (*pipelinetypes.GettablePipeline, error)
|
||||
CreatePipeline(ctx context.Context, orgID valuer.UUID, claims *authtypes.Claims, pipeline *pipelinetypes.PostablePipeline) (*pipelinetypes.GettablePipeline, error)
|
||||
UpdatePipeline(ctx context.Context, orgID valuer.UUID, claims *authtypes.Claims, pipeline *pipelinetypes.PostablePipeline) (*pipelinetypes.GettablePipeline, error)
|
||||
DeletePipeline(ctx context.Context, orgID valuer.UUID, claims *authtypes.Claims, pipeline *pipelinetypes.PostablePipeline) error
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
ListPipelines(w http.ResponseWriter, r *http.Request)
|
||||
GetPipeline(w http.ResponseWriter, r *http.Request)
|
||||
CreatePipeline(w http.ResponseWriter, r *http.Request)
|
||||
UpdatePipeline(w http.ResponseWriter, r *http.Request)
|
||||
DeletePipeline(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
@@ -120,6 +120,8 @@ func FilterResponse(results []*qbtypes.QueryRangeResponse) []*qbtypes.QueryRange
|
||||
}
|
||||
}
|
||||
resultData.Rows = filteredRows
|
||||
case *qbtypes.ScalarData:
|
||||
resultData.Data = filterScalarDataIPs(resultData.Columns, resultData.Data)
|
||||
}
|
||||
|
||||
filteredData = append(filteredData, result)
|
||||
@@ -145,6 +147,39 @@ func shouldIncludeSeries(series *qbtypes.TimeSeries) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func filterScalarDataIPs(columns []*qbtypes.ColumnDescriptor, data [][]any) [][]any {
|
||||
// Find column indices for server address fields
|
||||
serverColIndices := make([]int, 0)
|
||||
for i, col := range columns {
|
||||
if col.Name == derivedKeyHTTPHost {
|
||||
serverColIndices = append(serverColIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(serverColIndices) == 0 {
|
||||
return data
|
||||
}
|
||||
|
||||
filtered := make([][]any, 0, len(data))
|
||||
for _, row := range data {
|
||||
includeRow := true
|
||||
for _, colIdx := range serverColIndices {
|
||||
if colIdx < len(row) {
|
||||
if strVal, ok := row[colIdx].(string); ok {
|
||||
if net.ParseIP(strVal) != nil {
|
||||
includeRow = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if includeRow {
|
||||
filtered = append(filtered, row)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func shouldIncludeRow(row *qbtypes.RawRow) bool {
|
||||
if row.Data != nil {
|
||||
if domainVal, ok := row.Data[derivedKeyHTTPHost]; ok {
|
||||
|
||||
@@ -117,6 +117,59 @@ func TestFilterResponse(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should filter out IP addresses from scalar data",
|
||||
input: []*qbtypes.QueryRangeResponse{
|
||||
{
|
||||
Data: qbtypes.QueryData{
|
||||
Results: []any{
|
||||
&qbtypes.ScalarData{
|
||||
QueryName: "endpoints",
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"192.168.1.1", 10},
|
||||
{"example.com", 20},
|
||||
{"10.0.0.1", 5},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*qbtypes.QueryRangeResponse{
|
||||
{
|
||||
Data: qbtypes.QueryData{
|
||||
Results: []any{
|
||||
&qbtypes.ScalarData{
|
||||
QueryName: "endpoints",
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"example.com", 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
@@ -89,10 +88,10 @@ func (r *Repo) GetConfigVersion(
|
||||
}
|
||||
|
||||
func (r *Repo) GetLatestVersion(
|
||||
ctx context.Context, tx bun.IDB, orgId valuer.UUID, typ opamptypes.ElementType,
|
||||
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType,
|
||||
) (*opamptypes.AgentConfigVersion, error) {
|
||||
var c opamptypes.AgentConfigVersion
|
||||
err := tx.NewSelect().
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&c).
|
||||
ColumnExpr("id, version, element_type, deploy_status, deploy_result, created_at").
|
||||
ColumnExpr("COALESCE(created_by, '') as created_by").
|
||||
@@ -112,8 +111,8 @@ func (r *Repo) GetLatestVersion(
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func (r *Repo) insertConfigInTx(
|
||||
ctx context.Context, tx bun.IDB, orgId valuer.UUID, c *opamptypes.AgentConfigVersion, elements []string,
|
||||
func (r *Repo) insertConfig(
|
||||
ctx context.Context, orgId valuer.UUID, userId valuer.UUID, c *opamptypes.AgentConfigVersion, elements []string,
|
||||
) error {
|
||||
|
||||
if c.ElementType.StringValue() == "" {
|
||||
@@ -122,6 +121,7 @@ func (r *Repo) insertConfigInTx(
|
||||
|
||||
// allowing empty elements for logs - use case is deleting all pipelines
|
||||
if len(elements) == 0 && c.ElementType != opamptypes.ElementTypeLogPipelines {
|
||||
zap.L().Error("insert config called with no elements ", zap.String("ElementType", c.ElementType.StringValue()))
|
||||
return errors.NewInvalidInputf(CodeConfigElementsRequired, "config must have atleast one element")
|
||||
}
|
||||
|
||||
@@ -129,11 +129,13 @@ func (r *Repo) insertConfigInTx(
|
||||
// the version can not be set by the user, we want to auto-assign the versions
|
||||
// in a monotonically increasing order starting with 1. hence, we reject insert
|
||||
// requests with version anything other than 0. here, 0 indicates un-assigned
|
||||
zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", c.ElementType.StringValue()))
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "user defined versions are not supported in the agent config")
|
||||
}
|
||||
|
||||
configVersion, err := r.GetLatestVersion(ctx, tx, orgId, c.ElementType)
|
||||
configVersion, err := r.GetLatestVersion(ctx, orgId, c.ElementType)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
zap.L().Error("failed to fetch latest config version", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -144,10 +146,31 @@ func (r *Repo) insertConfigInTx(
|
||||
c.Version = 1
|
||||
}
|
||||
|
||||
_, dbErr := tx.NewInsert().
|
||||
// Track whether we've successfully finished the insert operation
|
||||
success := false
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
// remove all the damage (invalid rows from db)
|
||||
// Delete elements first, then version (to respect potential foreign key constraints)
|
||||
_, delErr := r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigElement)).Where("version_id = ?", c.ID).Exec(ctx)
|
||||
if delErr != nil {
|
||||
zap.L().Error("failed to delete config elements during cleanup", zap.Error(delErr), zap.String("version_id", c.ID.String()))
|
||||
}
|
||||
_, delErr = r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigVersion)).Where("id = ?", c.ID).Where("org_id = ?", orgId).Exec(ctx)
|
||||
if delErr != nil {
|
||||
zap.L().Error("failed to delete config version during cleanup", zap.Error(delErr), zap.String("version_id", c.ID.String()))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
_, dbErr := r.store.
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(c).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
zap.L().Error("error in inserting config version: ", zap.Error(dbErr))
|
||||
return errors.WrapInternalf(dbErr, CodeConfigVersionInsertFailed, "failed to insert config version")
|
||||
}
|
||||
|
||||
@@ -162,12 +185,13 @@ func (r *Repo) insertConfigInTx(
|
||||
ElementType: c.ElementType.StringValue(),
|
||||
ElementID: e,
|
||||
}
|
||||
_, dbErr = tx.NewInsert().Model(agentConfigElement).Exec(ctx)
|
||||
_, dbErr = r.store.BunDB().NewInsert().Model(agentConfigElement).Exec(ctx)
|
||||
if dbErr != nil {
|
||||
return errors.WrapInternalf(dbErr, CodeConfigElementInsertFailed, "failed to insert config element")
|
||||
}
|
||||
}
|
||||
|
||||
success = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -14,11 +14,9 @@ import (
|
||||
tsp "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/tailsampler"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/uptrace/bun"
|
||||
"go.uber.org/zap"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
)
|
||||
@@ -183,7 +181,7 @@ func (m *Manager) ReportConfigDeploymentStatus(
|
||||
func GetLatestVersion(
|
||||
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType,
|
||||
) (*opamptypes.AgentConfigVersion, error) {
|
||||
return m.GetLatestVersion(ctx, m.store.BunDBCtx(ctx), orgId, elementType)
|
||||
return m.GetLatestVersion(ctx, orgId, elementType)
|
||||
}
|
||||
|
||||
func GetConfigVersion(
|
||||
@@ -200,14 +198,14 @@ func GetConfigHistory(
|
||||
|
||||
// StartNewVersion launches a new config version for given set of elements
|
||||
func StartNewVersion(
|
||||
ctx context.Context, tx bun.IDB, orgId valuer.UUID, claims *authtypes.Claims, eleType opamptypes.ElementType, elementIds []string,
|
||||
ctx context.Context, orgId valuer.UUID, userId valuer.UUID, eleType opamptypes.ElementType, elementIds []string,
|
||||
) (*opamptypes.AgentConfigVersion, error) {
|
||||
|
||||
// create a new version
|
||||
cfg := opamptypes.NewAgentConfigVersion(orgId, claims, eleType)
|
||||
cfg := opamptypes.NewAgentConfigVersion(orgId, userId, eleType)
|
||||
|
||||
// insert new config and elements into database
|
||||
err := m.insertConfigInTx(ctx, tx, orgId, cfg, elementIds)
|
||||
err := m.insertConfig(ctx, orgId, userId, cfg, elementIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline/impllogspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
|
||||
@@ -4049,6 +4048,26 @@ func (aH *APIHandler) logAggregate(w http.ResponseWriter, r *http.Request) {
|
||||
aH.WriteJSON(w, r, model.GetLogsAggregatesResponse{})
|
||||
}
|
||||
|
||||
func parseAgentConfigVersion(r *http.Request) (int, error) {
|
||||
versionString := mux.Vars(r)["version"]
|
||||
|
||||
if versionString == "latest" {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
version64, err := strconv.ParseInt(versionString, 0, 8)
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid version number")
|
||||
}
|
||||
|
||||
if version64 <= 0 {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid version number")
|
||||
}
|
||||
|
||||
return int(version64), nil
|
||||
}
|
||||
|
||||
func (aH *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
req := logparsingpipeline.PipelinesPreviewRequest{}
|
||||
|
||||
@@ -4079,7 +4098,7 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
version, err := impllogspipeline.ParseAgentConfigVersion(r)
|
||||
version, err := parseAgentConfigVersion(r)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
@@ -4162,6 +4181,11 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
userID, errv2 := valuer.NewUUID(claims.UserID)
|
||||
if errv2 != nil {
|
||||
render.Error(w, errv2)
|
||||
return
|
||||
}
|
||||
|
||||
req := pipelinetypes.PostablePipelines{}
|
||||
|
||||
@@ -4183,7 +4207,7 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, orgID, &claims, postable)
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, orgID, userID, postable)
|
||||
}
|
||||
|
||||
res, err := createPipeline(r.Context(), req.Pipelines)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -59,7 +58,7 @@ type PipelinesResponse struct {
|
||||
func (ic *LogParsingPipelineController) ApplyPipelines(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
claims *authtypes.Claims,
|
||||
userID valuer.UUID,
|
||||
postable []pipelinetypes.PostablePipeline,
|
||||
) (*PipelinesResponse, error) {
|
||||
var pipelines []pipelinetypes.GettablePipeline
|
||||
@@ -90,7 +89,7 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
|
||||
elements[i] = p.ID.StringValue()
|
||||
}
|
||||
|
||||
cfg, err := agentConf.StartNewVersion(ctx, ic.sqlStore.BunDBCtx(ctx), orgID, claims, opamptypes.ElementTypeLogPipelines, elements)
|
||||
cfg, err := agentConf.StartNewVersion(ctx, orgID, userID, opamptypes.ElementTypeLogPipelines, elements)
|
||||
if err != nil || cfg == nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to start new version: %w", err))
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
logsv3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/formatter"
|
||||
|
||||
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline/impllogspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
@@ -68,7 +66,6 @@ type Modules struct {
|
||||
SpanPercentile spanpercentile.Module
|
||||
MetricsExplorer metricsexplorer.Module
|
||||
Promote promote.Module
|
||||
LogsPipeline logspipeline.Module
|
||||
}
|
||||
|
||||
func NewModules(
|
||||
@@ -113,6 +110,5 @@ func NewModules(
|
||||
Services: implservices.NewModule(querier, telemetryStore),
|
||||
MetricsExplorer: implmetricsexplorer.NewModule(telemetryStore, telemetryMetadataStore, cache, ruleStore, dashboard, providerSettings, config.MetricsExplorer),
|
||||
Promote: implpromote.NewModule(telemetryMetadataStore, telemetryStore),
|
||||
LogsPipeline: impllogspipeline.NewModule(sqlstore),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/fields"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
@@ -60,7 +59,6 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
|
||||
struct{ authz.Handler }{},
|
||||
struct{ zeus.Handler }{},
|
||||
struct{ querier.Handler }{},
|
||||
struct{ logspipeline.Handler }{},
|
||||
).New(ctx, instrumentation.ToProviderSettings(), apiserver.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/global/signozglobal"
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/logspipeline/impllogspipeline"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference/implpreference"
|
||||
@@ -170,6 +169,7 @@ func NewSQLMigrationProviderFactories(
|
||||
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
|
||||
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -255,7 +255,6 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
|
||||
handlers.AuthzHandler,
|
||||
handlers.ZeusHandler,
|
||||
handlers.QuerierHandler,
|
||||
impllogspipeline.NewHandler(modules.LogsPipeline),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
209
pkg/sqlmigration/066_migrate_rules_v4_to_v5_post_deprecation.go
Normal file
209
pkg/sqlmigration/066_migrate_rules_v4_to_v5_post_deprecation.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type migrateRulesV4ToV5 struct {
|
||||
store sqlstore.SQLStore
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func NewMigrateRulesV4ToV5Factory(
|
||||
store sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(
|
||||
factory.MustNewName("migrate_rules_post_deprecation"),
|
||||
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &migrateRulesV4ToV5{
|
||||
store: store,
|
||||
telemetryStore: telemetryStore,
|
||||
logger: ps.Logger,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT name
|
||||
FROM (
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
|
||||
INTERSECT
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
|
||||
)
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT tagKey
|
||||
FROM signoz_traces.distributed_span_attributes_keys
|
||||
WHERE tagType IN ('tag', 'resource')
|
||||
GROUP BY tagKey
|
||||
HAVING COUNT(DISTINCT tagType) > 1
|
||||
ORDER BY tagKey
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
|
||||
logsKeys, err := migration.getLogDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
var rules []struct {
|
||||
ID string `bun:"id"`
|
||||
Data map[string]any `bun:"data"`
|
||||
}
|
||||
|
||||
err = tx.NewSelect().
|
||||
Table("rule").
|
||||
Column("id", "data").
|
||||
Scan(ctx, &rules)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||
|
||||
count := 0
|
||||
|
||||
for _, rule := range rules {
|
||||
version, _ := rule.Data["version"].(string)
|
||||
|
||||
if version == "v5" {
|
||||
continue
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
|
||||
}
|
||||
|
||||
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
|
||||
|
||||
// Check if the queries envelope already exists and is non-empty
|
||||
hasQueriesEnvelope := false
|
||||
if condition, ok := rule.Data["condition"].(map[string]any); ok {
|
||||
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
|
||||
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
|
||||
hasQueriesEnvelope = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hasQueriesEnvelope {
|
||||
// already has queries envelope, just bump version
|
||||
// this is because user made a mistake of choosing version
|
||||
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
|
||||
rule.Data["version"] = "v5"
|
||||
} else {
|
||||
// old format, run full migration
|
||||
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
|
||||
updated := alertsMigrator.Migrate(ctx, rule.Data)
|
||||
if !updated {
|
||||
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
|
||||
continue
|
||||
}
|
||||
rule.Data["version"] = "v5"
|
||||
}
|
||||
|
||||
dataJSON, err := json.Marshal(rule.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.NewUpdate().
|
||||
Table("rule").
|
||||
Set("data = ?", string(dataJSON)).
|
||||
Where("id = ?", rule.ID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 0 {
|
||||
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -99,13 +98,13 @@ type AgentConfigVersion struct {
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
}
|
||||
|
||||
func NewAgentConfigVersion(orgId valuer.UUID, claims *authtypes.Claims, elementType ElementType) *AgentConfigVersion {
|
||||
func NewAgentConfigVersion(orgId valuer.UUID, userId valuer.UUID, elementType ElementType) *AgentConfigVersion {
|
||||
return &AgentConfigVersion{
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
UserAuditable: types.UserAuditable{CreatedBy: claims.Email, UpdatedBy: claims.Email},
|
||||
UserAuditable: types.UserAuditable{CreatedBy: userId.String(), UpdatedBy: userId.String()},
|
||||
OrgID: orgId,
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
ElementType: elementType,
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/queryBuilderToExpr"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
@@ -267,37 +266,6 @@ func (p *PostablePipeline) IsValid() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostablePipeline) ToStoreablePipeline() (*StoreablePipeline, error) {
|
||||
rawConfig, err := json.Marshal(p.Config)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to unmarshal postable pipeline config")
|
||||
}
|
||||
|
||||
filter, err := json.Marshal(p.Filter)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to marshal postable pipeline filter")
|
||||
}
|
||||
identifier := valuer.GenerateUUID()
|
||||
if p.ID != "" {
|
||||
identifier, err = valuer.NewUUID(p.ID)
|
||||
if err != nil {
|
||||
return nil, errors.WithAdditionalf(err, "failed to parse postable pipeline id")
|
||||
}
|
||||
}
|
||||
return &StoreablePipeline{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: identifier,
|
||||
},
|
||||
OrderID: p.OrderID,
|
||||
Enabled: p.Enabled,
|
||||
Name: p.Name,
|
||||
Alias: p.Alias,
|
||||
Description: p.Description,
|
||||
FilterString: string(filter),
|
||||
ConfigJSON: string(rawConfig),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isValidOperator(op PipelineOperator) error {
|
||||
if op.ID == "" {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "PipelineOperator.ID is required")
|
||||
|
||||
@@ -355,6 +355,10 @@ func (r *PostableRule) validate() error {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
|
||||
}
|
||||
|
||||
if r.Version != "v5" {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
|
||||
}
|
||||
|
||||
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
|
||||
}
|
||||
|
||||
@@ -108,6 +108,7 @@ func TestParseIntoRule(t *testing.T) {
|
||||
"ruleType": "threshold_rule",
|
||||
"evalWindow": "5m",
|
||||
"frequency": "1m",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -150,6 +151,7 @@ func TestParseIntoRule(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "DefaultsRule",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -187,6 +189,7 @@ func TestParseIntoRule(t *testing.T) {
|
||||
initRule: PostableRule{},
|
||||
content: []byte(`{
|
||||
"alert": "PromQLRule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "promql",
|
||||
@@ -256,6 +259,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "SeverityLabelTest",
|
||||
"schemaVersion": "v1",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -344,6 +348,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "NoLabelsTest",
|
||||
"schemaVersion": "v1",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -384,6 +389,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "OverwriteTest",
|
||||
"schemaVersion": "v1",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -474,6 +480,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
content: []byte(`{
|
||||
"alert": "V2Test",
|
||||
"schemaVersion": "v2",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -517,6 +524,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
initRule: PostableRule{},
|
||||
content: []byte(`{
|
||||
"alert": "DefaultSchemaTest",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -569,6 +577,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
||||
func TestParseIntoRuleThresholdGeneration(t *testing.T) {
|
||||
content := []byte(`{
|
||||
"alert": "TestThresholds",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -639,6 +648,7 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
|
||||
"schemaVersion": "v2",
|
||||
"alert": "MultiThresholdAlert",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -732,6 +742,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -766,6 +777,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -799,6 +811,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyAboveTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -833,6 +846,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyAboveTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -866,6 +880,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowAllTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -901,6 +916,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyBelowAllTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -935,6 +951,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "AnomalyOutOfBoundsTest",
|
||||
"ruleType": "anomaly_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -969,6 +986,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "ThresholdTest",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
@@ -1003,6 +1021,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
ruleJSON: []byte(`{
|
||||
"alert": "ThresholdTest",
|
||||
"ruleType": "threshold_rule",
|
||||
"version": "v5",
|
||||
"condition": {
|
||||
"compositeQuery": {
|
||||
"queryType": "builder",
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/converter"
|
||||
"github.com/SigNoz/signoz/pkg/converter"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
Reference in New Issue
Block a user