mirror of
https://github.com/SigNoz/signoz.git
synced 2026-05-11 20:50:35 +01:00
Compare commits
5 Commits
service-fg
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0766ab31c0 | ||
|
|
3787715fd8 | ||
|
|
0fbfff353a | ||
|
|
ca8cc30c34 | ||
|
|
85a8611f2e |
@@ -18,16 +18,19 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
@@ -109,6 +112,9 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
func(_ licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]] {
|
||||
return signoz.NewAuditorProviderFactories()
|
||||
},
|
||||
func(_ context.Context, _ factory.ProviderSettings, _ flagger.Flagger, _ licensing.Licensing, _ telemetrystore.TelemetryStore, _ retention.Getter, _ organization.Getter, _ zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
|
||||
return signoz.NewMeterReporterProviderFactories(), "noop"
|
||||
},
|
||||
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
|
||||
return querier.NewHandler(ps, q, a)
|
||||
},
|
||||
|
||||
55
cmd/enterprise/meter.go
Normal file
55
cmd/enterprise/meter.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/metercollector"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/zeustypes"
|
||||
)
|
||||
|
||||
var meterConfigs = []metercollector.Config{
|
||||
{
|
||||
Provider: metercollector.ProviderStatic,
|
||||
Static: metercollector.StaticConfig{
|
||||
Name: zeustypes.MeterPlatformActive,
|
||||
Unit: zeustypes.MeterUnitCount,
|
||||
Aggregation: zeustypes.MeterAggregationMax,
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Provider: metercollector.ProviderTelemetry,
|
||||
Telemetry: metercollector.TelemetryConfig{
|
||||
Name: zeustypes.MeterLogSize,
|
||||
Unit: zeustypes.MeterUnitBytes,
|
||||
Aggregation: zeustypes.MeterAggregationSum,
|
||||
DBName: telemetrylogs.DBName,
|
||||
TableName: telemetrylogs.LogsV2LocalTableName,
|
||||
DefaultRetentionDays: retentiontypes.DefaultLogsRetentionDays,
|
||||
},
|
||||
},
|
||||
{
|
||||
Provider: metercollector.ProviderTelemetry,
|
||||
Telemetry: metercollector.TelemetryConfig{
|
||||
Name: zeustypes.MeterSpanSize,
|
||||
Unit: zeustypes.MeterUnitBytes,
|
||||
Aggregation: zeustypes.MeterAggregationSum,
|
||||
DBName: telemetrytraces.DBName,
|
||||
TableName: telemetrytraces.SpanIndexV3LocalTableName,
|
||||
DefaultRetentionDays: retentiontypes.DefaultTracesRetentionDays,
|
||||
},
|
||||
},
|
||||
{
|
||||
Provider: metercollector.ProviderTelemetry,
|
||||
Telemetry: metercollector.TelemetryConfig{
|
||||
Name: zeustypes.MeterDatapointCount,
|
||||
Unit: zeustypes.MeterUnitCount,
|
||||
Aggregation: zeustypes.MeterAggregationSum,
|
||||
DBName: telemetrymetrics.DBName,
|
||||
TableName: telemetrymetrics.SamplesV4LocalTableName,
|
||||
DefaultRetentionDays: retentiontypes.DefaultMetricsRetentionDays,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -18,6 +18,9 @@ import (
|
||||
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
|
||||
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
"github.com/SigNoz/signoz/ee/metercollector/staticmetercollector"
|
||||
"github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector"
|
||||
"github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter"
|
||||
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration"
|
||||
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration/implcloudprovider"
|
||||
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
|
||||
@@ -36,14 +39,17 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
pkgflagger "github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
|
||||
pkgcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
@@ -161,6 +167,20 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
}
|
||||
return factories
|
||||
},
|
||||
func(ctx context.Context, providerSettings factory.ProviderSettings, flagger pkgflagger.Flagger, licensing licensing.Licensing, telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter, orgGetter organization.Getter, zeus zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
|
||||
factories := signoz.NewMeterReporterProviderFactories()
|
||||
|
||||
collectorFactories := factory.MustNewNamedMap(
|
||||
staticmetercollector.NewFactory(),
|
||||
telemetrymetercollector.NewFactory(telemetryStore, retentionGetter),
|
||||
)
|
||||
|
||||
if err := factories.Add(httpmeterreporter.NewFactory(collectorFactories, meterConfigs, flagger, licensing, orgGetter, zeus)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return factories, "http"
|
||||
},
|
||||
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
|
||||
communityHandler := querier.NewHandler(ps, q, a)
|
||||
return eequerier.NewHandler(ps, q, communityHandler)
|
||||
|
||||
@@ -429,3 +429,10 @@ authz:
|
||||
openfga:
|
||||
# maximum tuples allowed per openfga write operation.
|
||||
max_tuples_per_write: 300
|
||||
|
||||
##################### Meter Reporter #####################
|
||||
meterreporter:
|
||||
# The interval between collection ticks. Minimum 5m.
|
||||
interval: 6h
|
||||
# Whether to backfill sealed days from the license creation day.
|
||||
backfill: true
|
||||
|
||||
@@ -2520,6 +2520,65 @@ components:
|
||||
enabled:
|
||||
type: boolean
|
||||
type: object
|
||||
InframonitoringtypesClusterRecord:
|
||||
properties:
|
||||
clusterCPU:
|
||||
format: double
|
||||
type: number
|
||||
clusterCPUAllocatable:
|
||||
format: double
|
||||
type: number
|
||||
clusterMemory:
|
||||
format: double
|
||||
type: number
|
||||
clusterMemoryAllocatable:
|
||||
format: double
|
||||
type: number
|
||||
clusterName:
|
||||
type: string
|
||||
meta:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
nodeCountsByReadiness:
|
||||
$ref: '#/components/schemas/InframonitoringtypesNodeCountsByReadiness'
|
||||
podCountsByPhase:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPodCountsByPhase'
|
||||
required:
|
||||
- clusterName
|
||||
- clusterCPU
|
||||
- clusterCPUAllocatable
|
||||
- clusterMemory
|
||||
- clusterMemoryAllocatable
|
||||
- nodeCountsByReadiness
|
||||
- podCountsByPhase
|
||||
- meta
|
||||
type: object
|
||||
InframonitoringtypesClusters:
|
||||
properties:
|
||||
endTimeBeforeRetention:
|
||||
type: boolean
|
||||
records:
|
||||
items:
|
||||
$ref: '#/components/schemas/InframonitoringtypesClusterRecord'
|
||||
nullable: true
|
||||
type: array
|
||||
requiredMetricsCheck:
|
||||
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
|
||||
total:
|
||||
type: integer
|
||||
type:
|
||||
$ref: '#/components/schemas/InframonitoringtypesResponseType'
|
||||
warning:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
|
||||
required:
|
||||
- type
|
||||
- records
|
||||
- total
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
InframonitoringtypesHostFilter:
|
||||
properties:
|
||||
expression:
|
||||
@@ -2824,6 +2883,32 @@ components:
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
InframonitoringtypesPostableClusters:
|
||||
properties:
|
||||
end:
|
||||
format: int64
|
||||
type: integer
|
||||
filter:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5Filter'
|
||||
groupBy:
|
||||
items:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
|
||||
nullable: true
|
||||
type: array
|
||||
limit:
|
||||
type: integer
|
||||
offset:
|
||||
type: integer
|
||||
orderBy:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
|
||||
start:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- start
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesPostableHosts:
|
||||
properties:
|
||||
end:
|
||||
@@ -11746,6 +11831,77 @@ paths:
|
||||
summary: Health check
|
||||
tags:
|
||||
- health
|
||||
/api/v2/infra_monitoring/clusters:
|
||||
post:
|
||||
deprecated: false
|
||||
description: 'Returns a paginated list of Kubernetes clusters with key aggregated
|
||||
metrics derived by summing per-node values within the group: CPU usage, CPU
|
||||
allocatable, memory working set, memory allocatable. Each row also reports
|
||||
per-group nodeCountsByReadiness ({ ready, notReady } from each node''s latest
|
||||
k8s.node.condition_ready value) and per-group podCountsByPhase ({ pending,
|
||||
running, succeeded, failed, unknown } from each pod''s latest k8s.pod.phase
|
||||
value). Each cluster includes metadata attributes (k8s.cluster.name). The
|
||||
response type is ''list'' for the default k8s.cluster.name grouping or ''grouped_list''
|
||||
for custom groupBy keys; in both modes every row aggregates nodes and pods
|
||||
in the group. Supports filtering via a filter expression, custom groupBy,
|
||||
ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination
|
||||
via offset/limit. Also reports missing required metrics and whether the requested
|
||||
time range falls before the data retention boundary. Numeric metric fields
|
||||
(clusterCPU, clusterCPUAllocatable, clusterMemory, clusterMemoryAllocatable)
|
||||
return -1 as a sentinel when no data is available for that field.'
|
||||
operationId: ListClusters
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPostableClusters'
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/InframonitoringtypesClusters'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: List Clusters for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/infra_monitoring/hosts:
|
||||
post:
|
||||
deprecated: false
|
||||
|
||||
61
ee/metercollector/staticmetercollector/provider.go
Normal file
61
ee/metercollector/staticmetercollector/provider.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Package staticmetercollector emits a fixed-value meter reading per org per window.
|
||||
package staticmetercollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/metercollector"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/zeustypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
var _ metercollector.MeterCollector = (*Provider)(nil)
|
||||
|
||||
type Provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
config metercollector.StaticConfig
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderStatic), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
|
||||
return newProvider(providerSettings, config.Static), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func newProvider(providerSettings factory.ProviderSettings, config metercollector.StaticConfig) *Provider {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/staticmetercollector")
|
||||
|
||||
return &Provider{
|
||||
settings: settings,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
|
||||
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
|
||||
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
|
||||
return provider.config.Aggregation
|
||||
}
|
||||
|
||||
func (provider *Provider) Origin(_ context.Context, _ valuer.UUID, license *licensetypes.License, _ time.Time) (time.Time, error) {
|
||||
if license == nil || license.CreatedAt.IsZero() {
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
||||
createdAt := license.CreatedAt.UTC()
|
||||
return time.Date(createdAt.Year(), createdAt.Month(), createdAt.Day(), 0, 0, 0, 0, time.UTC), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) Collect(_ context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error) {
|
||||
if license == nil || license.Key == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return []zeustypes.Meter{
|
||||
zeustypes.NewMeter(provider.config.Name, provider.config.Value, provider.config.Unit, provider.config.Aggregation, window, zeustypes.NewDimensions(zeustypes.OrganizationID.String(orgID.StringValue()))),
|
||||
}, nil
|
||||
}
|
||||
247
ee/metercollector/telemetrymetercollector/provider.go
Normal file
247
ee/metercollector/telemetrymetercollector/provider.go
Normal file
@@ -0,0 +1,247 @@
|
||||
// Package telemetrymetercollector collects telemetry meters (logs, traces, metrics)
|
||||
// by retention. One Provider materializes per TelemetryConfig.
|
||||
package telemetrymetercollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/metercollector"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymeter"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/zeustypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
var (
|
||||
labelKeyPattern = regexp.MustCompile(`^[A-Za-z0-9_.\-]+$`)
|
||||
labelValuePattern = regexp.MustCompile(`^[A-Za-z0-9_.\-:]+$`)
|
||||
)
|
||||
|
||||
var _ metercollector.MeterCollector = (*Provider)(nil)
|
||||
|
||||
type Provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
config metercollector.TelemetryConfig
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
retentionGetter retention.Getter
|
||||
}
|
||||
|
||||
func NewFactory(telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter) factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderTelemetry), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
|
||||
return newProvider(providerSettings, config.Telemetry, telemetryStore, retentionGetter), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func newProvider(
|
||||
providerSettings factory.ProviderSettings,
|
||||
config metercollector.TelemetryConfig,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
retentionGetter retention.Getter,
|
||||
) *Provider {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector")
|
||||
|
||||
return &Provider{
|
||||
settings: settings,
|
||||
config: config,
|
||||
telemetryStore: telemetryStore,
|
||||
retentionGetter: retentionGetter,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
|
||||
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
|
||||
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
|
||||
return provider.config.Aggregation
|
||||
}
|
||||
|
||||
func (provider *Provider) Origin(ctx context.Context, _ valuer.UUID, _ *licensetypes.License, todayStart time.Time) (time.Time, error) {
|
||||
query, args := buildOriginQuery(provider.config.Name.String())
|
||||
|
||||
var minMs int64
|
||||
if err := provider.telemetryStore.ClickhouseDB().QueryRow(ctx, query, args...).Scan(&minMs); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if minMs == 0 {
|
||||
return todayStart, nil
|
||||
}
|
||||
|
||||
minDay := time.UnixMilli(minMs).UTC()
|
||||
return time.Date(minDay.Year(), minDay.Month(), minDay.Day(), 0, 0, 0, 0, time.UTC), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) Collect(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
_ *licensetypes.License,
|
||||
window zeustypes.MeterWindow,
|
||||
) ([]zeustypes.Meter, error) {
|
||||
meterName := provider.config.Name.String()
|
||||
|
||||
segments, err := provider.retentionGetter.GetRetentionPolicySegments(
|
||||
ctx,
|
||||
orgID,
|
||||
provider.config.DBName,
|
||||
provider.config.TableName,
|
||||
provider.config.DefaultRetentionDays,
|
||||
window.StartUnixMilli,
|
||||
window.EndUnixMilli,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
valuesByRetentionDays := make(map[int]int64)
|
||||
|
||||
for _, segment := range segments {
|
||||
query, args, err := buildQuery(meterName, segment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows, err := provider.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var retentionDays int32
|
||||
var value int64
|
||||
|
||||
if err := rows.Scan(&retentionDays, &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valuesByRetentionDays[int(retentionDays)] += value
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
meters := make([]zeustypes.Meter, 0, len(valuesByRetentionDays))
|
||||
for retentionDays, value := range valuesByRetentionDays {
|
||||
meters = append(meters, zeustypes.NewMeter(provider.config.Name, value, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, retentionDays)))
|
||||
}
|
||||
|
||||
// Empty windows still emit a sentinel so checkpoints can advance.
|
||||
if len(meters) == 0 && len(segments) > 0 {
|
||||
meters = append(meters, zeustypes.NewMeter(provider.config.Name, 0, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, segments[len(segments)-1].DefaultDays)))
|
||||
}
|
||||
|
||||
return meters, nil
|
||||
}
|
||||
|
||||
func buildOriginQuery(meterName string) (string, []any) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("toInt64(ifNull(min(unix_milli), 0))")
|
||||
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
|
||||
sb.Where(sb.Equal("metric_name", meterName))
|
||||
return sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
}
|
||||
|
||||
func buildQuery(meterName string, segment *retentiontypes.RetentionPolicySegment) (string, []any, error) {
|
||||
retentionExpr, err := buildRetentionMultiIfSQL(segment.Rules, segment.DefaultDays)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
selects := []string{
|
||||
retentionExpr + " AS retention_days",
|
||||
"toInt64(ifNull(sum(value), 0)) AS value",
|
||||
}
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select(selects...)
|
||||
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
|
||||
sb.Where(
|
||||
sb.Equal("metric_name", meterName),
|
||||
sb.GTE("unix_milli", segment.StartMs),
|
||||
sb.LT("unix_milli", segment.EndMs),
|
||||
)
|
||||
sb.GroupBy("retention_days")
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return query, args, nil
|
||||
}
|
||||
|
||||
func buildRetentionMultiIfSQL(rules []retentiontypes.CustomRetentionRule, defaultDays int) (string, error) {
|
||||
if defaultDays <= 0 {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "non-positive default retention %d", defaultDays)
|
||||
}
|
||||
|
||||
if len(rules) == 0 {
|
||||
return "toInt32(" + strconv.Itoa(defaultDays) + ")", nil
|
||||
}
|
||||
|
||||
arms := make([]string, 0, 2*len(rules)+1)
|
||||
for ruleIndex, rule := range rules {
|
||||
if rule.TTLDays <= 0 {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has non-positive ttl_days %d", ruleIndex, rule.TTLDays)
|
||||
}
|
||||
conditionExpr, err := buildRuleConditionSQL(ruleIndex, rule)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
arms = append(arms, conditionExpr)
|
||||
arms = append(arms, strconv.Itoa(rule.TTLDays))
|
||||
}
|
||||
arms = append(arms, strconv.Itoa(defaultDays))
|
||||
|
||||
return "toInt32(multiIf(" + strings.Join(arms, ", ") + "))", nil
|
||||
}
|
||||
|
||||
func buildRuleConditionSQL(ruleIndex int, rule retentiontypes.CustomRetentionRule) (string, error) {
|
||||
if len(rule.Filters) == 0 {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has no filters", ruleIndex)
|
||||
}
|
||||
|
||||
filterExprs := make([]string, 0, len(rule.Filters))
|
||||
for filterIndex, filter := range rule.Filters {
|
||||
if !labelKeyPattern.MatchString(filter.Key) {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has invalid key %q", ruleIndex, filterIndex, filter.Key)
|
||||
}
|
||||
if len(filter.Values) == 0 {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has no values", ruleIndex, filterIndex)
|
||||
}
|
||||
|
||||
quoted := make([]string, len(filter.Values))
|
||||
for valueIndex, value := range filter.Values {
|
||||
if !labelValuePattern.MatchString(value) {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d value %d is invalid %q", ruleIndex, filterIndex, valueIndex, value)
|
||||
}
|
||||
quoted[valueIndex] = "'" + value + "'"
|
||||
}
|
||||
|
||||
filterExprs = append(filterExprs, fmt.Sprintf("JSONExtractString(labels, '%s') IN (%s)", filter.Key, strings.Join(quoted, ", ")))
|
||||
}
|
||||
|
||||
return strings.Join(filterExprs, " AND "), nil
|
||||
}
|
||||
|
||||
func buildDimensions(orgID valuer.UUID, retentionDays int) map[string]string {
|
||||
retentionDurationSeconds := int64(retentionDays) * 24 * 60 * 60 // seconds
|
||||
|
||||
return zeustypes.NewDimensions(
|
||||
zeustypes.OrganizationID.String(orgID.StringValue()),
|
||||
zeustypes.RetentionDuration.String(strconv.FormatInt(retentionDurationSeconds, 10)),
|
||||
)
|
||||
}
|
||||
318
ee/meterreporter/httpmeterreporter/provider.go
Normal file
318
ee/meterreporter/httpmeterreporter/provider.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package httpmeterreporter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/metercollector"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/zeustypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var _ factory.ServiceWithHealthy = (*Provider)(nil)
|
||||
|
||||
type Provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
config meterreporter.Config
|
||||
collectorsByName map[zeustypes.MeterName]metercollector.MeterCollector
|
||||
flagger flagger.Flagger
|
||||
licensing licensing.Licensing
|
||||
orgGetter organization.Getter
|
||||
zeus zeus.Zeus
|
||||
healthyC chan struct{}
|
||||
stopC chan struct{}
|
||||
metrics *reporterMetrics
|
||||
}
|
||||
|
||||
func NewFactory(collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]], collectorConfigs []metercollector.Config, flagger flagger.Flagger, licensing licensing.Licensing, orgGetter organization.Getter, zeus zeus.Zeus) factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config meterreporter.Config) (meterreporter.Reporter, error) {
|
||||
return newProvider(ctx, providerSettings, config, collectorFactories, collectorConfigs, flagger, licensing, orgGetter, zeus)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func newProvider(
|
||||
ctx context.Context,
|
||||
providerSettings factory.ProviderSettings,
|
||||
config meterreporter.Config,
|
||||
collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]],
|
||||
collectorConfigs []metercollector.Config,
|
||||
flagger flagger.Flagger,
|
||||
licensing licensing.Licensing,
|
||||
orgGetter organization.Getter,
|
||||
zeus zeus.Zeus,
|
||||
) (*Provider, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter")
|
||||
|
||||
collectorsByName := map[zeustypes.MeterName]metercollector.MeterCollector{}
|
||||
for _, collectorConfig := range collectorConfigs {
|
||||
collector, err := factory.NewProviderFromNamedMap(ctx, providerSettings, collectorConfig, collectorFactories, collectorConfig.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, exists := collectorsByName[collector.Name()]; exists {
|
||||
return nil, errors.Newf(errors.TypeAlreadyExists, errors.CodeAlreadyExists, "duplicate meter collector %q", collector.Name())
|
||||
}
|
||||
collectorsByName[collector.Name()] = collector
|
||||
}
|
||||
|
||||
metrics, err := newReporterMetrics(settings.Meter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Provider{
|
||||
settings: settings,
|
||||
config: config,
|
||||
collectorsByName: collectorsByName,
|
||||
flagger: flagger,
|
||||
licensing: licensing,
|
||||
orgGetter: orgGetter,
|
||||
zeus: zeus,
|
||||
healthyC: make(chan struct{}),
|
||||
stopC: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) Start(ctx context.Context) error {
|
||||
close(provider.healthyC)
|
||||
|
||||
provider.collect(ctx)
|
||||
|
||||
ticker := time.NewTicker(provider.config.Interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-provider.stopC:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
provider.collect(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) collect(ctx context.Context) {
|
||||
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.Collect", trace.WithAttributes(attribute.String("meterreporter.provider", "http")))
|
||||
defer span.End()
|
||||
|
||||
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to get orgs data", errors.Attr(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, org := range orgs {
|
||||
evalCtx := featuretypes.NewFlaggerEvaluationContext(org.ID)
|
||||
if !provider.flagger.BooleanOrEmpty(ctx, flagger.FeatureUseMeterReporter, evalCtx) {
|
||||
provider.settings.Logger().DebugContext(ctx, "meter reporter disabled for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
|
||||
continue
|
||||
}
|
||||
|
||||
license, err := provider.licensing.GetActive(ctx, org.ID)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
provider.settings.Logger().DebugContext(ctx, "no active license found for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
|
||||
continue
|
||||
}
|
||||
|
||||
span.RecordError(err)
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to fetch active license for org", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
|
||||
return
|
||||
}
|
||||
|
||||
if err := provider.collectOrg(ctx, org, license); err != nil {
|
||||
span.RecordError(err)
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to collect meters", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) Stop(ctx context.Context) error {
|
||||
close(provider.stopC)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) Healthy() <-chan struct{} {
|
||||
return provider.healthyC
|
||||
}
|
||||
|
||||
func (provider *Provider) collectOrg(ctx context.Context, org *types.Organization, license *licensetypes.License) error {
|
||||
now := time.Now().UTC()
|
||||
// Use one timestamp so a tick cannot straddle midnight.
|
||||
todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
|
||||
|
||||
if provider.config.Backfill {
|
||||
checkpointsByMeter, err := provider.checkpoints(ctx, license.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nextByCollector := provider.nextDays(license, todayStart, checkpointsByMeter)
|
||||
|
||||
start, end, ok := backfillRange(nextByCollector, todayStart)
|
||||
if ok {
|
||||
for day := start; !day.After(end); day = day.AddDate(0, 0, 1) {
|
||||
eligible := eligibleCollectors(provider.collectorsByName, nextByCollector, day)
|
||||
if len(eligible) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
window, err := zeustypes.NewMeterWindow(day.UnixMilli(), day.AddDate(0, 0, 1).UnixMilli(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.report(ctx, org.ID, license, window, eligible); err != nil {
|
||||
provider.settings.Logger().WarnContext(ctx, "failed to backfill for day", errors.Attr(err), slog.String("date", day.Format("2006-01-02")))
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Today's partial window: every collector is always eligible (next <= today).
|
||||
if now.UnixMilli() > todayStart.UnixMilli() {
|
||||
todayWindow, err := zeustypes.NewMeterWindow(todayStart.UnixMilli(), now.UnixMilli(), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return provider.report(ctx, org.ID, license, todayWindow, provider.collectorsByName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) checkpoints(ctx context.Context, licenseKey string) (map[string]time.Time, error) {
|
||||
list, err := provider.zeus.ListMeterCheckpoints(ctx, licenseKey)
|
||||
if err != nil {
|
||||
provider.metrics.checkpoints.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
provider.metrics.checkpoints.Add(ctx, 1)
|
||||
|
||||
checkpointsByMeter := make(map[string]time.Time, len(list))
|
||||
for _, checkpoint := range list {
|
||||
checkpointsByMeter[checkpoint.Name] = checkpoint.StartDate.UTC()
|
||||
}
|
||||
|
||||
return checkpointsByMeter, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) nextDays(license *licensetypes.License, todayStart time.Time, checkpointsByMeter map[string]time.Time) map[zeustypes.MeterName]time.Time {
|
||||
nextByCollector := make(map[zeustypes.MeterName]time.Time, len(provider.collectorsByName))
|
||||
licenseCreatedAt := license.CreatedAt.UTC()
|
||||
licenseCreatedAtDay := time.Date(licenseCreatedAt.Year(), licenseCreatedAt.Month(), licenseCreatedAt.Day(), 0, 0, 0, 0, time.UTC)
|
||||
|
||||
for _, collector := range provider.collectorsByName {
|
||||
checkpoint, hasCheckpoint := checkpointsByMeter[collector.Name().String()]
|
||||
nextByCollector[collector.Name()] = nextReportableDay(licenseCreatedAtDay, todayStart, checkpoint, hasCheckpoint)
|
||||
}
|
||||
|
||||
return nextByCollector
|
||||
}
|
||||
|
||||
func nextReportableDay(licenseCreatedAtDay time.Time, todayStart time.Time, checkpoint time.Time, hasCheckpoint bool) time.Time {
|
||||
next := licenseCreatedAtDay
|
||||
if next.IsZero() {
|
||||
next = todayStart
|
||||
}
|
||||
|
||||
if hasCheckpoint {
|
||||
checkpointNext := checkpoint.AddDate(0, 0, 1)
|
||||
if checkpointNext.After(next) {
|
||||
next = checkpointNext
|
||||
}
|
||||
}
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
func (provider *Provider) report(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow, collectors map[zeustypes.MeterName]metercollector.MeterCollector) error {
|
||||
date := time.UnixMilli(window.StartUnixMilli).UTC().Format("2006-01-02")
|
||||
|
||||
meters := make([]zeustypes.Meter, 0, len(collectors))
|
||||
for _, collector := range collectors {
|
||||
meterAttr := attribute.String("signoz.meter.name", collector.Name().String())
|
||||
collectedReadings, err := collector.Collect(ctx, orgID, license, window)
|
||||
if err != nil {
|
||||
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr, errors.TypeAttr(err)))
|
||||
continue
|
||||
}
|
||||
|
||||
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr))
|
||||
meters = append(meters, collectedReadings...)
|
||||
}
|
||||
|
||||
if len(meters) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
idempotencyKey := fmt.Sprintf("meterreporter:%s", date)
|
||||
|
||||
body, err := json.Marshal(meters)
|
||||
if err != nil {
|
||||
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.zeus.PutMetersV3(ctx, license.Key, idempotencyKey, body); err != nil {
|
||||
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
|
||||
return err
|
||||
}
|
||||
|
||||
provider.metrics.reports.Add(ctx, 1)
|
||||
provider.metrics.meters.Add(ctx, int64(len(meters)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// backfillRange returns the inclusive sealed-day range ending at yesterday.
|
||||
func backfillRange(nextByCollector map[zeustypes.MeterName]time.Time, todayStart time.Time) (start, end time.Time, ok bool) {
|
||||
yesterday := todayStart.AddDate(0, 0, -1)
|
||||
|
||||
for _, next := range nextByCollector {
|
||||
if !next.Before(todayStart) {
|
||||
continue
|
||||
}
|
||||
if start.IsZero() || next.Before(start) {
|
||||
start = next
|
||||
}
|
||||
}
|
||||
|
||||
if start.IsZero() || start.After(yesterday) {
|
||||
return time.Time{}, time.Time{}, false
|
||||
}
|
||||
|
||||
return start, yesterday, true
|
||||
}
|
||||
|
||||
func eligibleCollectors(collectors map[zeustypes.MeterName]metercollector.MeterCollector, nextByCollector map[zeustypes.MeterName]time.Time, day time.Time) map[zeustypes.MeterName]metercollector.MeterCollector {
|
||||
eligible := make(map[zeustypes.MeterName]metercollector.MeterCollector, len(collectors))
|
||||
for name, collector := range collectors {
|
||||
if !nextByCollector[name].After(day) {
|
||||
eligible[name] = collector
|
||||
}
|
||||
}
|
||||
|
||||
return eligible
|
||||
}
|
||||
48
ee/meterreporter/httpmeterreporter/telemetry.go
Normal file
48
ee/meterreporter/httpmeterreporter/telemetry.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package httpmeterreporter
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
type reporterMetrics struct {
|
||||
checkpoints metric.Int64Counter
|
||||
reports metric.Int64Counter
|
||||
collections metric.Int64Counter
|
||||
meters metric.Int64Counter
|
||||
}
|
||||
|
||||
func newReporterMetrics(meter metric.Meter) (*reporterMetrics, error) {
|
||||
var errs error
|
||||
|
||||
checkpoints, err := meter.Int64Counter("signoz.meterreporter.checkpoints", metric.WithDescription("Zeus meter checkpoint fetches."), metric.WithUnit("{checkpoint}"))
|
||||
if err != nil {
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
reports, err := meter.Int64Counter("signoz.meterreporter.reports", metric.WithDescription("Meter reports shipped to Zeus."), metric.WithUnit("{report}"))
|
||||
if err != nil {
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
collections, err := meter.Int64Counter("signoz.meterreporter.collections", metric.WithDescription("Per-meter collect calls."), metric.WithUnit("{collection}"))
|
||||
if err != nil {
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
meters, err := meter.Int64Counter("signoz.meterreporter.meters", metric.WithDescription("Meter readings shipped to Zeus."), metric.WithUnit("{meter}"))
|
||||
if err != nil {
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
return &reporterMetrics{
|
||||
checkpoints: checkpoints,
|
||||
reports: reports,
|
||||
collections: collections,
|
||||
meters: meters,
|
||||
}, nil
|
||||
}
|
||||
@@ -150,6 +150,72 @@ func (provider *Provider) PutMetersV2(ctx context.Context, key string, data []by
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) PutMetersV3(ctx context.Context, key string, idempotencyKey string, data []byte) error {
|
||||
headers := http.Header{}
|
||||
if idempotencyKey != "" {
|
||||
headers.Set("X-Idempotency-Key", idempotencyKey)
|
||||
}
|
||||
|
||||
_, err := provider.doWithHeaders(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/meters"),
|
||||
http.MethodPost,
|
||||
key,
|
||||
data,
|
||||
headers,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) ListMeterCheckpoints(ctx context.Context, key string) ([]zeustypes.MeterCheckpoint, error) {
|
||||
response, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/meters/checkpoints"),
|
||||
http.MethodGet,
|
||||
key,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checkpointValues := gjson.GetBytes(response, "data")
|
||||
if !checkpointValues.Exists() || checkpointValues.Type == gjson.Null {
|
||||
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints are required")
|
||||
}
|
||||
|
||||
if !checkpointValues.IsArray() {
|
||||
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints must be an array")
|
||||
}
|
||||
|
||||
checkpointResults := checkpointValues.Array()
|
||||
checkpoints := make([]zeustypes.MeterCheckpoint, 0, len(checkpointResults))
|
||||
for _, checkpointValue := range checkpointResults {
|
||||
name := checkpointValue.Get("name").String()
|
||||
if name == "" {
|
||||
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint name is required")
|
||||
}
|
||||
|
||||
startDateString := checkpointValue.Get("start_date").String()
|
||||
if startDateString == "" {
|
||||
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint start_date is required for %q", name)
|
||||
}
|
||||
|
||||
startDate, err := time.Parse("2006-01-02", startDateString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, zeus.ErrCodeResponseMalformed, "parse meter checkpoint start_date %q for %q", startDateString, name)
|
||||
}
|
||||
|
||||
checkpoints = append(checkpoints, zeustypes.MeterCheckpoint{
|
||||
Name: name,
|
||||
StartDate: startDate,
|
||||
})
|
||||
}
|
||||
|
||||
return checkpoints, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) PutProfile(ctx context.Context, key string, profile *zeustypes.PostableProfile) error {
|
||||
body, err := json.Marshal(profile)
|
||||
if err != nil {
|
||||
@@ -185,12 +251,21 @@ func (provider *Provider) PutHost(ctx context.Context, key string, host *zeustyp
|
||||
}
|
||||
|
||||
func (provider *Provider) do(ctx context.Context, url *url.URL, method string, key string, requestBody []byte) ([]byte, error) {
|
||||
return provider.doWithHeaders(ctx, url, method, key, requestBody, nil)
|
||||
}
|
||||
|
||||
func (provider *Provider) doWithHeaders(ctx context.Context, url *url.URL, method string, key string, requestBody []byte, extraHeaders http.Header) ([]byte, error) {
|
||||
request, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
request.Header.Set("X-Signoz-Cloud-Api-Key", key)
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
for k, vs := range extraHeaders {
|
||||
for _, v := range vs {
|
||||
request.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
response, err := provider.httpClient.Do(request)
|
||||
if err != nil {
|
||||
|
||||
@@ -12,10 +12,12 @@ import type {
|
||||
} from 'react-query';
|
||||
|
||||
import type {
|
||||
InframonitoringtypesPostableClustersDTO,
|
||||
InframonitoringtypesPostableHostsDTO,
|
||||
InframonitoringtypesPostableNamespacesDTO,
|
||||
InframonitoringtypesPostableNodesDTO,
|
||||
InframonitoringtypesPostablePodsDTO,
|
||||
ListClusters200,
|
||||
ListHosts200,
|
||||
ListNamespaces200,
|
||||
ListNodes200,
|
||||
@@ -26,6 +28,90 @@ import type {
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type { ErrorType, BodyType } from '../../../generatedAPIInstance';
|
||||
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes clusters with key aggregated metrics derived by summing per-node values within the group: CPU usage, CPU allocatable, memory working set, memory allocatable. Each row also reports per-group nodeCountsByReadiness ({ ready, notReady } from each node's latest k8s.node.condition_ready value) and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each cluster includes metadata attributes (k8s.cluster.name). The response type is 'list' for the default k8s.cluster.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates nodes and pods in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (clusterCPU, clusterCPUAllocatable, clusterMemory, clusterMemoryAllocatable) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Clusters for Infra Monitoring
|
||||
*/
|
||||
export const listClusters = (
|
||||
inframonitoringtypesPostableClustersDTO: BodyType<InframonitoringtypesPostableClustersDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<ListClusters200>({
|
||||
url: `/api/v2/infra_monitoring/clusters`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesPostableClustersDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getListClustersMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listClusters>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listClusters>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['listClusters'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof listClusters>>,
|
||||
{ data: BodyType<InframonitoringtypesPostableClustersDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return listClusters(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type ListClustersMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof listClusters>>
|
||||
>;
|
||||
export type ListClustersMutationBody =
|
||||
BodyType<InframonitoringtypesPostableClustersDTO>;
|
||||
export type ListClustersMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List Clusters for Infra Monitoring
|
||||
*/
|
||||
export const useListClusters = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listClusters>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof listClusters>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getListClustersMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Hosts for Infra Monitoring
|
||||
|
||||
@@ -4568,6 +4568,66 @@ export interface GlobaltypesTokenizerConfigDTO {
|
||||
enabled?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type InframonitoringtypesClusterRecordDTOMeta = {
|
||||
[key: string]: string;
|
||||
} | null;
|
||||
|
||||
export interface InframonitoringtypesClusterRecordDTO {
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
clusterCPU: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
clusterCPUAllocatable: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
clusterMemory: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
clusterMemoryAllocatable: number;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
clusterName: string;
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
meta: InframonitoringtypesClusterRecordDTOMeta;
|
||||
nodeCountsByReadiness: InframonitoringtypesNodeCountsByReadinessDTO;
|
||||
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesClustersDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
*/
|
||||
endTimeBeforeRetention: boolean;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
records: InframonitoringtypesClusterRecordDTO[] | null;
|
||||
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
total: number;
|
||||
type: InframonitoringtypesResponseTypeDTO;
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesHostFilterDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -4885,6 +4945,34 @@ export interface InframonitoringtypesPodsDTO {
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostableClustersDTO {
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
end: number;
|
||||
filter?: Querybuildertypesv5FilterDTO;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
limit: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
offset?: number;
|
||||
orderBy?: Querybuildertypesv5OrderByDTO;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostableHostsDTO {
|
||||
/**
|
||||
* @type integer
|
||||
@@ -9324,6 +9412,14 @@ export type Healthz503 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListClusters200 = {
|
||||
data: InframonitoringtypesClustersDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListHosts200 = {
|
||||
data: InframonitoringtypesHostsDTO;
|
||||
/**
|
||||
|
||||
@@ -2,8 +2,9 @@ import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||
import { useMutation } from 'react-query';
|
||||
// eslint-disable-next-line no-restricted-imports
|
||||
import { useSelector } from 'react-redux';
|
||||
import { LoadingOutlined, SearchOutlined } from '@ant-design/icons';
|
||||
import { SearchOutlined } from '@ant-design/icons';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import {
|
||||
Button,
|
||||
Input,
|
||||
@@ -516,7 +517,9 @@ export default function CeleryOverviewTable({
|
||||
bordered={false}
|
||||
loading={{
|
||||
spinning: isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
indicator: (
|
||||
<Spin indicator={<Loader size={14} className="animate-spin" />} />
|
||||
),
|
||||
}}
|
||||
locale={{
|
||||
emptyText: isLoading ? null : <Typography.Text>No data</Typography.Text>,
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import { ReactNode, useEffect, useState } from 'react';
|
||||
import { useHistory } from 'react-router-dom';
|
||||
import { CaretDownOutlined, LoadingOutlined } from '@ant-design/icons';
|
||||
import { CaretDownOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Modal, Select, Spin, Tooltip, Tree, TreeDataNode } from 'antd';
|
||||
import { OnboardingStatusResponse } from 'api/messagingQueues/onboarding/getOnboardingStatus';
|
||||
import { QueryParams } from 'constants/query';
|
||||
@@ -222,7 +223,7 @@ function AttributeCheckList({
|
||||
>
|
||||
{loading ? (
|
||||
<div className="loader-container">
|
||||
<Spin indicator={<LoadingOutlined spin />} size="large" />
|
||||
<Spin indicator={<Loader className="animate-spin" />} size="large" />
|
||||
</div>
|
||||
) : (
|
||||
<div className="modal-content">
|
||||
|
||||
@@ -7,12 +7,9 @@ import React, {
|
||||
useState,
|
||||
} from 'react';
|
||||
import { Virtuoso } from 'react-virtuoso';
|
||||
import {
|
||||
DownOutlined,
|
||||
LoadingOutlined,
|
||||
ReloadOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import { DownOutlined, ReloadOutlined } from '@ant-design/icons';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Button, Checkbox, Select } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import cx from 'classnames';
|
||||
@@ -1700,7 +1697,7 @@ const CustomMultiSelect: React.FC<CustomMultiSelectProps> = ({
|
||||
{loading && (
|
||||
<div className="navigation-loading">
|
||||
<div className="navigation-icons">
|
||||
<LoadingOutlined />
|
||||
<Loader className="animate-spin" />
|
||||
</div>
|
||||
<div className="navigation-text">Refreshing values...</div>
|
||||
</div>
|
||||
@@ -1708,7 +1705,7 @@ const CustomMultiSelect: React.FC<CustomMultiSelectProps> = ({
|
||||
{!loading && waitingMessage && (
|
||||
<div className="navigation-loading">
|
||||
<div className="navigation-icons">
|
||||
<LoadingOutlined />
|
||||
<Loader className="animate-spin" />
|
||||
</div>
|
||||
<div className="navigation-text" title={waitingMessage}>
|
||||
{waitingMessage}
|
||||
|
||||
@@ -6,13 +6,9 @@ import React, {
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react';
|
||||
import {
|
||||
CloseOutlined,
|
||||
DownOutlined,
|
||||
LoadingOutlined,
|
||||
ReloadOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import { CloseOutlined, DownOutlined, ReloadOutlined } from '@ant-design/icons';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Select } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import TextToolTip from 'components/TextToolTip';
|
||||
@@ -581,7 +577,7 @@ const CustomSelect: React.FC<CustomSelectProps> = ({
|
||||
{loading && (
|
||||
<div className="navigation-loading">
|
||||
<div className="navigation-icons">
|
||||
<LoadingOutlined />
|
||||
<Loader className="animate-spin" />
|
||||
</div>
|
||||
<div className="navigation-text">Refreshing values...</div>
|
||||
</div>
|
||||
@@ -589,7 +585,7 @@ const CustomSelect: React.FC<CustomSelectProps> = ({
|
||||
{!loading && waitingMessage && (
|
||||
<div className="navigation-loading">
|
||||
<div className="navigation-icons">
|
||||
<LoadingOutlined />
|
||||
<Loader className="animate-spin" />
|
||||
</div>
|
||||
<div className="navigation-text" title={waitingMessage}>
|
||||
{waitingMessage}
|
||||
|
||||
@@ -16,8 +16,8 @@ interface OverviewTabProps {
|
||||
account: ServiceAccountRow;
|
||||
localName: string;
|
||||
onNameChange: (v: string) => void;
|
||||
localRole: string;
|
||||
onRoleChange: (v: string | undefined) => void;
|
||||
localRoles: string[];
|
||||
onRolesChange: (v: string[]) => void;
|
||||
isDisabled: boolean;
|
||||
availableRoles: AuthtypesRoleDTO[];
|
||||
rolesLoading?: boolean;
|
||||
@@ -31,8 +31,8 @@ function OverviewTab({
|
||||
account,
|
||||
localName,
|
||||
onNameChange,
|
||||
localRole,
|
||||
onRoleChange,
|
||||
localRoles,
|
||||
onRolesChange,
|
||||
isDisabled,
|
||||
availableRoles,
|
||||
rolesLoading,
|
||||
@@ -95,10 +95,15 @@ function OverviewTab({
|
||||
{isDisabled ? (
|
||||
<div className="sa-drawer__input-wrapper sa-drawer__input-wrapper--disabled">
|
||||
<div className="sa-drawer__disabled-roles">
|
||||
{localRole ? (
|
||||
<Badge color="vanilla">
|
||||
{availableRoles.find((r) => r.id === localRole)?.name ?? localRole}
|
||||
</Badge>
|
||||
{localRoles.length > 0 ? (
|
||||
localRoles.map((roleId) => {
|
||||
const role = availableRoles.find((r) => r.id === roleId);
|
||||
return (
|
||||
<Badge key={roleId} color="vanilla">
|
||||
{role?.name ?? roleId}
|
||||
</Badge>
|
||||
);
|
||||
})
|
||||
) : (
|
||||
<span className="sa-drawer__input-text">—</span>
|
||||
)}
|
||||
@@ -108,14 +113,15 @@ function OverviewTab({
|
||||
) : (
|
||||
<RolesSelect
|
||||
id="sa-roles"
|
||||
mode="multiple"
|
||||
roles={availableRoles}
|
||||
loading={rolesLoading}
|
||||
isError={rolesError}
|
||||
error={rolesErrorObj}
|
||||
onRefetch={onRefetchRoles}
|
||||
value={localRole}
|
||||
onChange={onRoleChange}
|
||||
placeholder="Select role"
|
||||
value={localRoles}
|
||||
onChange={onRolesChange}
|
||||
placeholder="Select roles"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -8,9 +8,7 @@ import { ToggleGroup, ToggleGroupItem } from '@signozhq/ui/toggle-group';
|
||||
import { Pagination, Skeleton } from 'antd';
|
||||
import { convertToApiError } from 'api/ErrorResponseHandlerForGeneratedAPIs';
|
||||
import {
|
||||
getGetServiceAccountRolesQueryKey,
|
||||
getListServiceAccountsQueryKey,
|
||||
useDeleteServiceAccountRole,
|
||||
useGetServiceAccount,
|
||||
useListServiceAccountKeys,
|
||||
useUpdateServiceAccount,
|
||||
@@ -37,7 +35,7 @@ import {
|
||||
useQueryState,
|
||||
} from 'nuqs';
|
||||
import APIError from 'types/api/error';
|
||||
import { retryOn429, toAPIError } from 'utils/errorUtils';
|
||||
import { toAPIError } from 'utils/errorUtils';
|
||||
|
||||
import AddKeyModal from './AddKeyModal';
|
||||
import DeleteAccountModal from './DeleteAccountModal';
|
||||
@@ -92,7 +90,7 @@ function ServiceAccountDrawer({
|
||||
parseAsBoolean.withDefault(false),
|
||||
);
|
||||
const [localName, setLocalName] = useState('');
|
||||
const [localRole, setLocalRole] = useState('');
|
||||
const [localRoles, setLocalRoles] = useState<string[]>([]);
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
const [saveErrors, setSaveErrors] = useState<SaveError[]>([]);
|
||||
|
||||
@@ -140,7 +138,7 @@ function ServiceAccountDrawer({
|
||||
if (!account?.id) {
|
||||
roleSessionRef.current = null;
|
||||
} else if (account.id !== roleSessionRef.current && !isRolesLoading) {
|
||||
setLocalRole(currentRoles[0]?.id ?? '');
|
||||
setLocalRoles(currentRoles.map((r) => r.id).filter(Boolean) as string[]);
|
||||
roleSessionRef.current = account.id;
|
||||
}
|
||||
}, [account?.id, currentRoles, isRolesLoading]);
|
||||
@@ -151,7 +149,13 @@ function ServiceAccountDrawer({
|
||||
const isDirty =
|
||||
account !== null &&
|
||||
(localName !== (account.name ?? '') ||
|
||||
localRole !== (currentRoles[0]?.id ?? ''));
|
||||
JSON.stringify([...localRoles].sort()) !==
|
||||
JSON.stringify(
|
||||
currentRoles
|
||||
.map((r) => r.id)
|
||||
.filter(Boolean)
|
||||
.sort(),
|
||||
));
|
||||
|
||||
const {
|
||||
roles: availableRoles,
|
||||
@@ -179,27 +183,6 @@ function ServiceAccountDrawer({
|
||||
|
||||
// the retry for this mutation is safe due to the api being idempotent on backend
|
||||
const { mutateAsync: updateMutateAsync } = useUpdateServiceAccount();
|
||||
const { mutateAsync: deleteRole } = useDeleteServiceAccountRole({
|
||||
mutation: {
|
||||
retry: retryOn429,
|
||||
},
|
||||
});
|
||||
|
||||
const executeRolesOperation = useCallback(
|
||||
async (accountId: string): Promise<RoleUpdateFailure[]> => {
|
||||
if (localRole === '' && currentRoles[0]?.id) {
|
||||
await deleteRole({
|
||||
pathParams: { id: accountId, rid: currentRoles[0].id },
|
||||
});
|
||||
await queryClient.invalidateQueries(
|
||||
getGetServiceAccountRolesQueryKey({ id: accountId }),
|
||||
);
|
||||
return [];
|
||||
}
|
||||
return applyDiff([localRole].filter(Boolean), availableRoles);
|
||||
},
|
||||
[localRole, currentRoles, availableRoles, applyDiff, deleteRole, queryClient],
|
||||
);
|
||||
|
||||
const retryNameUpdate = useCallback(async (): Promise<void> => {
|
||||
if (!account) {
|
||||
@@ -267,7 +250,7 @@ function ServiceAccountDrawer({
|
||||
|
||||
const retryRolesUpdate = useCallback(async (): Promise<void> => {
|
||||
try {
|
||||
const failures = await executeRolesOperation(selectedAccountId ?? '');
|
||||
const failures = await applyDiff([...localRoles], availableRoles);
|
||||
if (failures.length === 0) {
|
||||
setSaveErrors((prev) => prev.filter((e) => e.context !== 'Roles update'));
|
||||
} else {
|
||||
@@ -283,7 +266,7 @@ function ServiceAccountDrawer({
|
||||
),
|
||||
);
|
||||
}
|
||||
}, [selectedAccountId, executeRolesOperation, failuresToSaveErrors]);
|
||||
}, [localRoles, availableRoles, applyDiff, failuresToSaveErrors]);
|
||||
|
||||
const handleSave = useCallback(async (): Promise<void> => {
|
||||
if (!account || !isDirty) {
|
||||
@@ -302,7 +285,7 @@ function ServiceAccountDrawer({
|
||||
|
||||
const [nameResult, rolesResult] = await Promise.allSettled([
|
||||
namePromise,
|
||||
executeRolesOperation(account.id),
|
||||
applyDiff([...localRoles], availableRoles),
|
||||
]);
|
||||
|
||||
const errors: SaveError[] = [];
|
||||
@@ -343,8 +326,10 @@ function ServiceAccountDrawer({
|
||||
account,
|
||||
isDirty,
|
||||
localName,
|
||||
localRoles,
|
||||
availableRoles,
|
||||
updateMutateAsync,
|
||||
executeRolesOperation,
|
||||
applyDiff,
|
||||
refetchAccount,
|
||||
onSuccess,
|
||||
queryClient,
|
||||
@@ -443,9 +428,9 @@ function ServiceAccountDrawer({
|
||||
account={account}
|
||||
localName={localName}
|
||||
onNameChange={handleNameChange}
|
||||
localRole={localRole}
|
||||
onRoleChange={(role): void => {
|
||||
setLocalRole(role ?? '');
|
||||
localRoles={localRoles}
|
||||
onRolesChange={(roles): void => {
|
||||
setLocalRoles(roles);
|
||||
clearRoleErrors();
|
||||
}}
|
||||
isDisabled={isDeleted}
|
||||
|
||||
@@ -151,7 +151,7 @@ describe('ServiceAccountDrawer', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('changing roles enables Save; clicking Save sends role add request without delete', async () => {
|
||||
it('adding a role fires POST for the new role and no DELETE for existing roles', async () => {
|
||||
const roleSpy = jest.fn();
|
||||
const deleteSpy = jest.fn();
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
@@ -171,6 +171,7 @@ describe('ServiceAccountDrawer', () => {
|
||||
|
||||
await screen.findByDisplayValue('CI Bot');
|
||||
|
||||
// Add signoz-viewer while keeping signoz-admin selected
|
||||
await user.click(screen.getByLabelText('Roles'));
|
||||
await user.click(await screen.findByTitle('signoz-viewer'));
|
||||
|
||||
@@ -188,6 +189,43 @@ describe('ServiceAccountDrawer', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('removing a role fires DELETE for the removed role and no POST', async () => {
|
||||
const roleSpy = jest.fn();
|
||||
const deleteSpy = jest.fn();
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
|
||||
server.use(
|
||||
rest.post(SA_ROLES_ENDPOINT, async (req, res, ctx) => {
|
||||
roleSpy(await req.json());
|
||||
return res(ctx.status(200), ctx.json({ status: 'success', data: {} }));
|
||||
}),
|
||||
rest.delete(SA_ROLE_DELETE_ENDPOINT, (_, res, ctx) => {
|
||||
deleteSpy();
|
||||
return res(ctx.status(200), ctx.json({ status: 'success', data: {} }));
|
||||
}),
|
||||
);
|
||||
|
||||
renderDrawer();
|
||||
|
||||
await screen.findByDisplayValue('CI Bot');
|
||||
|
||||
// Remove the signoz-admin tag from the multi-select
|
||||
const adminTag = await screen.findByTitle('signoz-admin');
|
||||
const removeBtn = adminTag.querySelector(
|
||||
'.ant-select-selection-item-remove',
|
||||
) as Element;
|
||||
await user.click(removeBtn);
|
||||
|
||||
const saveBtn = screen.getByRole('button', { name: /Save Changes/i });
|
||||
await waitFor(() => expect(saveBtn).not.toBeDisabled());
|
||||
await user.click(saveBtn);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(deleteSpy).toHaveBeenCalled();
|
||||
expect(roleSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
it('"Delete Service Account" opens confirm dialog; confirming sends delete request', async () => {
|
||||
const deleteSpy = jest.fn();
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { CSSProperties } from 'react';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Spin, SpinProps } from 'antd';
|
||||
|
||||
import { SpinerStyle } from './styles';
|
||||
@@ -7,7 +7,14 @@ import { SpinerStyle } from './styles';
|
||||
function Spinner({ size, tip, height, style }: SpinnerProps): JSX.Element {
|
||||
return (
|
||||
<SpinerStyle height={height} style={style}>
|
||||
<Spin spinning size={size} tip={tip} indicator={<LoadingOutlined spin />} />
|
||||
<Spin
|
||||
spinning
|
||||
size={size}
|
||||
tip={tip}
|
||||
indicator={
|
||||
<Loader className="animate-spin" role="img" aria-label="loading" />
|
||||
}
|
||||
/>
|
||||
</SpinerStyle>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
} from 'react';
|
||||
import type { TableComponents } from 'react-virtuoso';
|
||||
import { TableVirtuoso, TableVirtuosoHandle } from 'react-virtuoso';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { DndContext, pointerWithin } from '@dnd-kit/core';
|
||||
import {
|
||||
horizontalListSortingStrategy,
|
||||
@@ -580,7 +580,10 @@ function TanStackTableInner<TData>(
|
||||
className={viewStyles.tanstackLoadingOverlay}
|
||||
data-testid="tanstack-infinite-loader"
|
||||
>
|
||||
<Spin indicator={<LoadingOutlined spin />} tip="Loading more..." />
|
||||
<Spin
|
||||
indicator={<Loader className="animate-spin" />}
|
||||
tip="Loading more..."
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{showPagination && pagination && (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useMemo, useState } from 'react';
|
||||
import { QueryFunctionContext, useQueries, useQuery } from 'react-query';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Spin, Switch, Table, Tooltip } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import { getQueryRangeV5 } from 'api/v5/queryRange/getQueryRange';
|
||||
@@ -202,7 +202,9 @@ function TopErrors({
|
||||
columns={topErrorsColumnsConfig}
|
||||
loading={{
|
||||
spinning: isLoading || isRefetching,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
indicator: (
|
||||
<Spin indicator={<Loader size={14} className="animate-spin" />} />
|
||||
),
|
||||
}}
|
||||
dataSource={isLoading || isRefetching ? [] : formattedTopErrorsData}
|
||||
locale={{
|
||||
|
||||
@@ -2,7 +2,7 @@ import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useQueryClient } from 'react-query';
|
||||
// eslint-disable-next-line no-restricted-imports
|
||||
import { useSelector } from 'react-redux';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Spin, Table } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import emptyStateUrl from 'assets/Icons/emptyState.svg';
|
||||
@@ -205,7 +205,9 @@ function DomainList(): JSX.Element {
|
||||
columns={columnsConfig}
|
||||
loading={{
|
||||
spinning: isFetching || isLoading,
|
||||
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
|
||||
indicator: (
|
||||
<Spin indicator={<Loader size={14} className="animate-spin" />} />
|
||||
),
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
tableLayout="fixed"
|
||||
|
||||
@@ -2,9 +2,8 @@ import { Fragment, useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { UseQueryResult } from 'react-query';
|
||||
import { useInterval } from 'react-use';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Compass, ScrollText } from '@signozhq/icons';
|
||||
import { Button } from '@signozhq/ui/button';
|
||||
import { Button } from '@signozhq/ui';
|
||||
import { Compass, Loader, ScrollText } from '@signozhq/icons';
|
||||
import { Modal, Spin } from 'antd';
|
||||
import setRetentionApi from 'api/settings/setRetention';
|
||||
import setRetentionApiV2 from 'api/settings/setRetentionV2';
|
||||
@@ -480,7 +479,11 @@ function GeneralSettings({
|
||||
saveButtonText:
|
||||
metricsTtlValuesPayload.status === 'pending' ? (
|
||||
<span>
|
||||
<Spin spinning size="small" indicator={<LoadingOutlined spin />} />{' '}
|
||||
<Spin
|
||||
spinning
|
||||
size="small"
|
||||
indicator={<Loader className="animate-spin" />}
|
||||
/>{' '}
|
||||
{t('retention_save_button.pending', { name: 'metrics' })}
|
||||
</span>
|
||||
) : (
|
||||
@@ -523,7 +526,11 @@ function GeneralSettings({
|
||||
saveButtonText:
|
||||
tracesTtlValuesPayload.status === 'pending' ? (
|
||||
<span>
|
||||
<Spin spinning size="small" indicator={<LoadingOutlined spin />} />{' '}
|
||||
<Spin
|
||||
spinning
|
||||
size="small"
|
||||
indicator={<Loader className="animate-spin" />}
|
||||
/>{' '}
|
||||
{t('retention_save_button.pending', { name: 'traces' })}
|
||||
</span>
|
||||
) : (
|
||||
@@ -565,7 +572,11 @@ function GeneralSettings({
|
||||
saveButtonText:
|
||||
logsTtlValuesPayload.status === 'pending' ? (
|
||||
<span>
|
||||
<Spin spinning size="small" indicator={<LoadingOutlined spin />} />{' '}
|
||||
<Spin
|
||||
spinning
|
||||
size="small"
|
||||
indicator={<Loader className="animate-spin" />}
|
||||
/>{' '}
|
||||
{t('retention_save_button.pending', { name: 'logs' })}
|
||||
</span>
|
||||
) : (
|
||||
|
||||
@@ -9,11 +9,8 @@ import React, {
|
||||
import { useQueryClient } from 'react-query';
|
||||
// eslint-disable-next-line no-restricted-imports
|
||||
import { useSelector } from 'react-redux'; // old code, TODO: fix this correctly
|
||||
import {
|
||||
LoadingOutlined,
|
||||
SearchOutlined,
|
||||
SyncOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import { SearchOutlined, SyncOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Button, Input, Spin } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import { ToggleGraphProps } from 'components/Graph/types';
|
||||
@@ -338,7 +335,7 @@ function FullView({
|
||||
)}
|
||||
<div className="time-container">
|
||||
{response.isFetching && (
|
||||
<Spin spinning indicator={<LoadingOutlined spin />} />
|
||||
<Spin spinning indicator={<Loader className="animate-spin" />} />
|
||||
)}
|
||||
<TimePreference
|
||||
selectedTime={selectedTime}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { LoaderCircle } from '@signozhq/icons';
|
||||
import { Button, Input, Space } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
@@ -79,7 +79,7 @@ export function RequestDashboardBtn(): JSX.Element {
|
||||
className="periscope-btn primary"
|
||||
icon={
|
||||
isSubmittingRequestForDashboard ? (
|
||||
<LoadingOutlined />
|
||||
<LoaderCircle className="animate-spin" size={12} />
|
||||
) : (
|
||||
<Check size={12} />
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React, { useMemo } from 'react';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { LoaderCircle } from '@signozhq/icons';
|
||||
import { Spin } from 'antd';
|
||||
import { CircleCheck } from 'lucide-react';
|
||||
|
||||
@@ -21,7 +21,13 @@ export default function QueryStatus(
|
||||
|
||||
const content = useMemo((): React.ReactElement => {
|
||||
if (loading) {
|
||||
return <Spin spinning size="small" indicator={<LoadingOutlined spin />} />;
|
||||
return (
|
||||
<Spin
|
||||
spinning
|
||||
size="small"
|
||||
indicator={<LoaderCircle className="animate-spin" />}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (error) {
|
||||
return (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { ReactNode, useCallback, useMemo, useState } from 'react';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Button, Popover, Spin } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
@@ -32,7 +32,13 @@ function FieldItem({
|
||||
|
||||
const renderContent = useMemo(() => {
|
||||
if (isLoading) {
|
||||
return <Spin spinning size="small" indicator={<LoadingOutlined spin />} />;
|
||||
return (
|
||||
<Spin
|
||||
spinning
|
||||
size="small"
|
||||
indicator={<Loader className="animate-spin" />}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
if (isHovered) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useCallback } from 'react';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Spin, Table, TablePaginationConfig, TableProps, Tooltip } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import type { SorterResult } from 'antd/es/table/interface';
|
||||
@@ -79,7 +79,7 @@ function MetricsTable({
|
||||
indicator: (
|
||||
<Spin
|
||||
data-testid="metrics-table-loading-state"
|
||||
indicator={<LoadingOutlined size={14} spin />}
|
||||
indicator={<Loader size={14} className="animate-spin" />}
|
||||
/>
|
||||
),
|
||||
}}
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
// eslint-disable-next-line no-restricted-imports
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
import {
|
||||
CheckCircleTwoTone,
|
||||
CloseCircleTwoTone,
|
||||
LoadingOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import { CheckCircleTwoTone, CloseCircleTwoTone } from '@ant-design/icons';
|
||||
import { LoaderCircle } from '@signozhq/icons';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import MessagingQueueHealthCheck from 'components/MessagingQueueHealthCheck/MessagingQueueHealthCheck';
|
||||
import { QueryParams } from 'constants/query';
|
||||
@@ -341,7 +338,7 @@ export default function ConnectionStatus(): JSX.Element {
|
||||
<div className="label"> Status </div>
|
||||
|
||||
<div className="status">
|
||||
{isQueryServiceLoading && <LoadingOutlined />}
|
||||
{isQueryServiceLoading && <LoaderCircle className="animate-spin" />}
|
||||
{!isQueryServiceLoading &&
|
||||
isReceivingData &&
|
||||
(getStartedSource !== 'kafka' ? (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { LoaderCircle } from '@signozhq/icons';
|
||||
import { Button, Card, Form, Input, Select, Space } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
@@ -272,7 +272,7 @@ export default function DataSource(): JSX.Element {
|
||||
className="periscope-btn primary"
|
||||
icon={
|
||||
isSubmittingRequestForDataSource ? (
|
||||
<LoadingOutlined />
|
||||
<LoaderCircle className="animate-spin" />
|
||||
) : (
|
||||
<Check size={12} />
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { LoaderCircle } from '@signozhq/icons';
|
||||
import { Button, Card, Form, Input, Space } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
@@ -185,7 +185,7 @@ export default function EnvironmentDetails(): JSX.Element {
|
||||
className="periscope-btn primary"
|
||||
icon={
|
||||
isSubmittingRequestForEnvironment ? (
|
||||
<LoadingOutlined />
|
||||
<LoaderCircle className="animate-spin" />
|
||||
) : (
|
||||
<Check size={12} />
|
||||
)
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import {
|
||||
CheckCircleTwoTone,
|
||||
CloseCircleTwoTone,
|
||||
LoadingOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import { CheckCircleTwoTone, CloseCircleTwoTone } from '@ant-design/icons';
|
||||
import { LoaderCircle } from '@signozhq/icons';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { DEFAULT_ENTITY_VERSION } from 'constants/app';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
@@ -245,7 +242,7 @@ export default function LogsConnectionStatus(): JSX.Element {
|
||||
<div className="label"> Status </div>
|
||||
|
||||
<div className="status">
|
||||
{(loading || isFetching) && <LoadingOutlined />}
|
||||
{(loading || isFetching) && <LoaderCircle className="animate-spin" />}
|
||||
{!(loading || isFetching) && isReceivingData && (
|
||||
<>
|
||||
<CheckCircleTwoTone twoToneColor="#52c41a" />
|
||||
|
||||
@@ -2,9 +2,9 @@ import {
|
||||
CheckCircleFilled,
|
||||
CloseCircleFilled,
|
||||
ExclamationCircleFilled,
|
||||
LoadingOutlined,
|
||||
MinusCircleFilled,
|
||||
} from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Spin } from 'antd';
|
||||
|
||||
export function getDeploymentStage(value: string): string {
|
||||
@@ -28,7 +28,17 @@ export function getDeploymentStageIcon(value: string): JSX.Element {
|
||||
switch (value) {
|
||||
case 'in_progress':
|
||||
return (
|
||||
<Spin indicator={<LoadingOutlined style={{ fontSize: 15 }} spin />} />
|
||||
<Spin
|
||||
indicator={
|
||||
<Loader
|
||||
size="large"
|
||||
className="animate-spin"
|
||||
role="img"
|
||||
aria-label="loading"
|
||||
data-icon="loading"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
);
|
||||
case 'deployed':
|
||||
return <CheckCircleFilled />;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
import { LinkOutlined, LoadingOutlined } from '@ant-design/icons';
|
||||
import { LinkOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar';
|
||||
import { QueryParams } from 'constants/query';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
@@ -230,7 +231,7 @@ const useBaseAggregateOptions = ({
|
||||
key={key}
|
||||
icon={
|
||||
isLoading ? (
|
||||
<LoadingOutlined spin />
|
||||
<Loader className="animate-spin" />
|
||||
) : (
|
||||
<span style={{ color: aggregateData?.seriesColor }}>{icon}</span>
|
||||
)
|
||||
|
||||
@@ -29,6 +29,18 @@
|
||||
border-bottom: 1px solid var(--l1-border);
|
||||
}
|
||||
|
||||
&__close {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
padding: 0;
|
||||
color: var(--foreground);
|
||||
flex-shrink: 0;
|
||||
|
||||
&:hover {
|
||||
color: var(--l1-foreground);
|
||||
}
|
||||
}
|
||||
|
||||
&__header-divider {
|
||||
display: block;
|
||||
width: 1px;
|
||||
|
||||
@@ -78,50 +78,39 @@ function ResourceRow({
|
||||
<RadioGroupLabel htmlFor={`${resource.id}-all`}>All</RadioGroupLabel>
|
||||
</div>
|
||||
|
||||
{resource.type === 'metaresources' ? (
|
||||
<div className="psp-resource__radio-item">
|
||||
<RadioGroupItem
|
||||
value={PermissionScope.ONLY_SELECTED}
|
||||
id={`${resource.id}-none`}
|
||||
/>
|
||||
<RadioGroupLabel htmlFor={`${resource.id}-none`}>None</RadioGroupLabel>
|
||||
</div>
|
||||
) : (
|
||||
<div className="psp-resource__radio-item">
|
||||
<RadioGroupItem
|
||||
value={PermissionScope.ONLY_SELECTED}
|
||||
id={`${resource.id}-only-selected`}
|
||||
/>
|
||||
<RadioGroupLabel htmlFor={`${resource.id}-only-selected`}>
|
||||
Only selected
|
||||
</RadioGroupLabel>
|
||||
</div>
|
||||
)}
|
||||
<div className="psp-resource__radio-item">
|
||||
<RadioGroupItem
|
||||
value={PermissionScope.ONLY_SELECTED}
|
||||
id={`${resource.id}-only-selected`}
|
||||
/>
|
||||
<RadioGroupLabel htmlFor={`${resource.id}-only-selected`}>
|
||||
Only selected
|
||||
</RadioGroupLabel>
|
||||
</div>
|
||||
</RadioGroup>
|
||||
|
||||
{config.scope === PermissionScope.ONLY_SELECTED &&
|
||||
resource.type !== 'metaresources' && (
|
||||
<div className="psp-resource__select-wrapper">
|
||||
{/* TODO: right now made to only accept user input, we need to give it proper resource based value fetching from APIs */}
|
||||
<Select
|
||||
mode="tags"
|
||||
value={config.selectedIds}
|
||||
onChange={(vals: string[]): void =>
|
||||
onSelectedIdsChange(resource.id, vals)
|
||||
}
|
||||
options={resource.options ?? []}
|
||||
placeholder="Select resources..."
|
||||
className="psp-resource__select"
|
||||
popupClassName="psp-resource__select-popup"
|
||||
showSearch
|
||||
filterOption={(input, option): boolean =>
|
||||
String(option?.label ?? '')
|
||||
.toLowerCase()
|
||||
.includes(input.toLowerCase())
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{config.scope === PermissionScope.ONLY_SELECTED && (
|
||||
<div className="psp-resource__select-wrapper">
|
||||
{/* TODO: right now made to only accept user input, we need to give it proper resource based value fetching from APIs */}
|
||||
<Select
|
||||
mode="tags"
|
||||
value={config.selectedIds}
|
||||
onChange={(vals: string[]): void =>
|
||||
onSelectedIdsChange(resource.id, vals)
|
||||
}
|
||||
options={resource.options ?? []}
|
||||
placeholder="Select resources..."
|
||||
className="psp-resource__select"
|
||||
popupClassName="psp-resource__select-popup"
|
||||
showSearch
|
||||
filterOption={(input, option): boolean =>
|
||||
String(option?.label ?? '')
|
||||
.toLowerCase()
|
||||
.includes(input.toLowerCase())
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@@ -224,13 +213,13 @@ function PermissionSidePanel({
|
||||
<div className="permission-side-panel">
|
||||
<div className="permission-side-panel__header">
|
||||
<Button
|
||||
variant="link"
|
||||
color="secondary"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="permission-side-panel__close"
|
||||
onClick={onClose}
|
||||
aria-label="Close panel"
|
||||
>
|
||||
<X size={14} />
|
||||
<X size={16} />
|
||||
</Button>
|
||||
<span className="permission-side-panel__header-divider" />
|
||||
<span className="permission-side-panel__title">
|
||||
|
||||
@@ -5,8 +5,6 @@ export interface ResourceOption {
|
||||
|
||||
export interface ResourceDefinition {
|
||||
id: string;
|
||||
kind: string;
|
||||
type: string;
|
||||
label: string;
|
||||
options?: ResourceOption[];
|
||||
}
|
||||
|
||||
@@ -282,6 +282,30 @@
|
||||
}
|
||||
}
|
||||
|
||||
.role-details-delete-action-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
min-width: 32px;
|
||||
border: none;
|
||||
border-radius: 2px;
|
||||
background: transparent;
|
||||
color: var(--destructive);
|
||||
opacity: 0.6;
|
||||
padding: 0;
|
||||
transition:
|
||||
background-color 0.2s,
|
||||
opacity 0.2s;
|
||||
box-shadow: none;
|
||||
|
||||
&:hover {
|
||||
background: color-mix(in srgb, var(--danger-background) 10%, transparent);
|
||||
opacity: 0.9;
|
||||
}
|
||||
}
|
||||
|
||||
.role-details-delete-modal {
|
||||
width: calc(100% - 30px) !important;
|
||||
max-width: 384px;
|
||||
|
||||
@@ -110,6 +110,7 @@ function RoleDetailsPage(): JSX.Element {
|
||||
getGetObjectsQueryKey({ id: roleId, relation: activePermission }),
|
||||
);
|
||||
}
|
||||
setActivePermission(null);
|
||||
};
|
||||
|
||||
const { mutate: patchObjects, isLoading: isSaving } = usePatchObjects({
|
||||
@@ -212,16 +213,18 @@ function RoleDetailsPage(): JSX.Element {
|
||||
{!isManaged && (
|
||||
<div className="role-details-actions">
|
||||
<Button
|
||||
variant="link"
|
||||
variant="ghost"
|
||||
color="destructive"
|
||||
className="role-details-delete-action-btn"
|
||||
onClick={(): void => setIsDeleteModalOpen(true)}
|
||||
aria-label="Delete role"
|
||||
>
|
||||
<Trash2 size={12} />
|
||||
<Trash2 size={14} />
|
||||
</Button>
|
||||
<Button
|
||||
variant="solid"
|
||||
color="secondary"
|
||||
size="sm"
|
||||
onClick={(): void => setIsEditModalOpen(true)}
|
||||
>
|
||||
Edit Role Details
|
||||
|
||||
@@ -27,8 +27,9 @@ function DeleteRoleModal({
|
||||
<Button
|
||||
key="cancel"
|
||||
className="cancel-btn"
|
||||
prefix={<X size={14} />}
|
||||
prefix={<X size={16} />}
|
||||
onClick={onCancel}
|
||||
size="sm"
|
||||
variant="solid"
|
||||
color="secondary"
|
||||
>
|
||||
@@ -37,9 +38,10 @@ function DeleteRoleModal({
|
||||
<Button
|
||||
key="delete"
|
||||
className="delete-btn"
|
||||
prefix={<Trash2 size={14} />}
|
||||
prefix={<Trash2 size={16} />}
|
||||
onClick={onConfirm}
|
||||
loading={isDeleting}
|
||||
size="sm"
|
||||
variant="solid"
|
||||
color="destructive"
|
||||
>
|
||||
|
||||
@@ -285,19 +285,12 @@
|
||||
}
|
||||
}
|
||||
|
||||
input {
|
||||
&::placeholder {
|
||||
opacity: 0.4;
|
||||
}
|
||||
}
|
||||
|
||||
// todo: https://github.com/SigNoz/components/issues/116
|
||||
input,
|
||||
textarea {
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
min-height: 100px;
|
||||
resize: vertical;
|
||||
background: var(--input-background, transparent);
|
||||
border: 1px solid var(--border);
|
||||
background: var(--l3-background);
|
||||
border: 1px solid var(--l1-border);
|
||||
border-radius: 2px;
|
||||
padding: 6px 8px;
|
||||
font-family: Inter;
|
||||
@@ -310,7 +303,7 @@
|
||||
box-shadow: none;
|
||||
|
||||
&::placeholder {
|
||||
color: var(--muted-foreground);
|
||||
color: var(--l3-foreground);
|
||||
opacity: 0.4;
|
||||
}
|
||||
|
||||
@@ -320,6 +313,25 @@
|
||||
box-shadow: none;
|
||||
}
|
||||
}
|
||||
|
||||
input {
|
||||
height: 32px;
|
||||
}
|
||||
|
||||
input:disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
|
||||
&:hover {
|
||||
border-color: var(--l1-border);
|
||||
box-shadow: none;
|
||||
}
|
||||
}
|
||||
|
||||
textarea {
|
||||
min-height: 100px;
|
||||
resize: vertical;
|
||||
}
|
||||
}
|
||||
|
||||
.ant-modal-footer {
|
||||
|
||||
@@ -58,18 +58,8 @@ const baseAuthzResources: AuthzResources = {
|
||||
};
|
||||
|
||||
const resourceDefs: ResourceDefinition[] = [
|
||||
{
|
||||
id: 'metaresource:dashboard',
|
||||
kind: 'dashboard',
|
||||
type: 'metaresource',
|
||||
label: 'Dashboard',
|
||||
},
|
||||
{
|
||||
id: 'metaresource:alert',
|
||||
kind: 'alert',
|
||||
type: 'metaresource',
|
||||
label: 'Alert',
|
||||
},
|
||||
{ id: 'dashboard', label: 'Dashboard' },
|
||||
{ id: 'alert', label: 'Alert' },
|
||||
];
|
||||
|
||||
const ID_A = 'aaaaaaaa-0000-0000-0000-000000000001';
|
||||
@@ -79,24 +69,15 @@ const ID_C = 'cccccccc-0000-0000-0000-000000000003';
|
||||
describe('buildPatchPayload', () => {
|
||||
it('sends only the added selector as an addition', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [ID_A] },
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
const newConfig: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
|
||||
const result = buildPatchPayload({
|
||||
@@ -114,24 +95,18 @@ describe('buildPatchPayload', () => {
|
||||
|
||||
it('sends only the removed selector as a deletion', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B, ID_C],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
const newConfig: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_C],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
|
||||
const result = buildPatchPayload({
|
||||
@@ -149,24 +124,18 @@ describe('buildPatchPayload', () => {
|
||||
|
||||
it('treats selector order as irrelevant — produces no payload when IDs are identical', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
const newConfig: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_B, ID_A],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
|
||||
const result = buildPatchPayload({
|
||||
@@ -182,21 +151,15 @@ describe('buildPatchPayload', () => {
|
||||
|
||||
it('replaces wildcard with specific IDs when switching all → only_selected', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
const newConfig: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
|
||||
const result = buildPatchPayload({
|
||||
@@ -216,21 +179,12 @@ describe('buildPatchPayload', () => {
|
||||
|
||||
it('only deletes wildcard when switching all → only_selected with empty selector list', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
const newConfig: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
|
||||
const result = buildPatchPayload({
|
||||
@@ -248,18 +202,12 @@ describe('buildPatchPayload', () => {
|
||||
|
||||
it('only includes resources that actually changed', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [ID_A] },
|
||||
};
|
||||
const newConfig: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] }, // unchanged
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
}, // added ID_B
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] }, // unchanged
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [ID_A, ID_B] }, // added ID_B
|
||||
};
|
||||
|
||||
const result = buildPatchPayload({
|
||||
@@ -284,7 +232,7 @@ describe('objectsToPermissionConfig', () => {
|
||||
|
||||
const result = objectsToPermissionConfig(objects, resourceDefs);
|
||||
|
||||
expect(result['metaresource:dashboard']).toStrictEqual({
|
||||
expect(result.dashboard).toStrictEqual({
|
||||
scope: PermissionScope.ALL,
|
||||
selectedIds: [],
|
||||
});
|
||||
@@ -297,7 +245,7 @@ describe('objectsToPermissionConfig', () => {
|
||||
|
||||
const result = objectsToPermissionConfig(objects, resourceDefs);
|
||||
|
||||
expect(result['metaresource:dashboard']).toStrictEqual({
|
||||
expect(result.dashboard).toStrictEqual({
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
});
|
||||
@@ -306,11 +254,11 @@ describe('objectsToPermissionConfig', () => {
|
||||
it('defaults to ONLY_SELECTED with empty selectedIds when resource is absent from API response', () => {
|
||||
const result = objectsToPermissionConfig([], resourceDefs);
|
||||
|
||||
expect(result['metaresource:dashboard']).toStrictEqual({
|
||||
expect(result.dashboard).toStrictEqual({
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
});
|
||||
expect(result['metaresource:alert']).toStrictEqual({
|
||||
expect(result.alert).toStrictEqual({
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
});
|
||||
@@ -320,11 +268,8 @@ describe('objectsToPermissionConfig', () => {
|
||||
describe('configsEqual', () => {
|
||||
it('returns true for identical configs', () => {
|
||||
const config: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
'metaresource:alert': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
alert: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [ID_A] },
|
||||
};
|
||||
|
||||
expect(configsEqual(config, { ...config })).toBe(true);
|
||||
@@ -332,25 +277,22 @@ describe('configsEqual', () => {
|
||||
|
||||
it('returns false when configs differ', () => {
|
||||
const a: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
};
|
||||
const b: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [],
|
||||
},
|
||||
dashboard: { scope: PermissionScope.ONLY_SELECTED, selectedIds: [] },
|
||||
};
|
||||
|
||||
expect(configsEqual(a, b)).toBe(false);
|
||||
|
||||
const c: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_C, ID_B],
|
||||
},
|
||||
};
|
||||
const d: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
},
|
||||
@@ -361,13 +303,13 @@ describe('configsEqual', () => {
|
||||
|
||||
it('returns true when selectedIds are the same but in different order', () => {
|
||||
const a: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_A, ID_B],
|
||||
},
|
||||
};
|
||||
const b: PermissionConfig = {
|
||||
'metaresource:dashboard': {
|
||||
dashboard: {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
selectedIds: [ID_B, ID_A],
|
||||
},
|
||||
@@ -380,25 +322,23 @@ describe('configsEqual', () => {
|
||||
describe('buildConfig', () => {
|
||||
it('uses initial values when provided and defaults for resources not in initial', () => {
|
||||
const initial: PermissionConfig = {
|
||||
'metaresource:dashboard': { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
dashboard: { scope: PermissionScope.ALL, selectedIds: [] },
|
||||
};
|
||||
|
||||
const result = buildConfig(resourceDefs, initial);
|
||||
|
||||
expect(result['metaresource:dashboard']).toStrictEqual({
|
||||
expect(result.dashboard).toStrictEqual({
|
||||
scope: PermissionScope.ALL,
|
||||
selectedIds: [],
|
||||
});
|
||||
expect(result['metaresource:alert']).toStrictEqual(DEFAULT_RESOURCE_CONFIG);
|
||||
expect(result.alert).toStrictEqual(DEFAULT_RESOURCE_CONFIG);
|
||||
});
|
||||
|
||||
it('applies DEFAULT_RESOURCE_CONFIG to all resources when no initial is provided', () => {
|
||||
const result = buildConfig(resourceDefs);
|
||||
|
||||
expect(result['metaresource:dashboard']).toStrictEqual(
|
||||
DEFAULT_RESOURCE_CONFIG,
|
||||
);
|
||||
expect(result['metaresource:alert']).toStrictEqual(DEFAULT_RESOURCE_CONFIG);
|
||||
expect(result.dashboard).toStrictEqual(DEFAULT_RESOURCE_CONFIG);
|
||||
expect(result.alert).toStrictEqual(DEFAULT_RESOURCE_CONFIG);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -435,10 +375,7 @@ describe('deriveResourcesForRelation', () => {
|
||||
const result = deriveResourcesForRelation(baseAuthzResources, 'create');
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result.map((r) => r.id)).toStrictEqual([
|
||||
'metaresource:dashboard',
|
||||
'metaresource:alert',
|
||||
]);
|
||||
expect(result.map((r) => r.id)).toStrictEqual(['dashboard', 'alert']);
|
||||
});
|
||||
|
||||
it('returns an empty array when authzResources is null', () => {
|
||||
|
||||
@@ -1 +1 @@
|
||||
export const IS_ROLE_DETAILS_AND_CRUD_ENABLED = true;
|
||||
export const IS_ROLE_DETAILS_AND_CRUD_ENABLED = false;
|
||||
|
||||
@@ -70,9 +70,7 @@ export function deriveResourcesForRelation(
|
||||
return authzResources.resources
|
||||
.filter((r) => supportedTypes.includes(r.type))
|
||||
.map((r) => ({
|
||||
id: `${r.type}:${r.kind}`,
|
||||
kind: r.kind,
|
||||
type: r.type,
|
||||
id: r.kind,
|
||||
label: capitalize(r.kind).replaceAll('_', ' '),
|
||||
options: [],
|
||||
}));
|
||||
@@ -84,9 +82,7 @@ export function objectsToPermissionConfig(
|
||||
): PermissionConfig {
|
||||
const config: PermissionConfig = {};
|
||||
for (const res of resources) {
|
||||
const obj = objects.find(
|
||||
(o) => o.resource.kind === res.kind && o.resource.type === res.type,
|
||||
);
|
||||
const obj = objects.find((o) => o.resource.kind === res.id);
|
||||
if (!obj) {
|
||||
config[res.id] = {
|
||||
scope: PermissionScope.ONLY_SELECTED,
|
||||
@@ -122,9 +118,7 @@ export function buildPatchPayload({
|
||||
for (const res of resources) {
|
||||
const initial = initialConfig[res.id];
|
||||
const current = newConfig[res.id];
|
||||
const resourceDef = authzRes.resources.find(
|
||||
(r) => r.kind === res.kind && r.type === res.type,
|
||||
);
|
||||
const resourceDef = authzRes.resources.find((r) => r.kind === res.id);
|
||||
if (!resourceDef) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { ChangeEvent, useEffect, useMemo, useState } from 'react';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Button, Input, Spin } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar';
|
||||
@@ -215,7 +215,7 @@ function AddSpanToFunnelModal({
|
||||
<Spin
|
||||
className="add-span-to-funnel-modal__loading-spinner"
|
||||
spinning={isFunnelDetailsLoading || isFunnelDetailsFetching}
|
||||
indicator={<LoadingOutlined spin />}
|
||||
indicator={<Loader className="animate-spin" />}
|
||||
>
|
||||
{selectedFunnelId && funnelDetails?.payload && (
|
||||
<FunnelProvider
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { useCallback, useState } from 'react';
|
||||
import { useHistory, useLocation } from 'react-router-dom';
|
||||
import { InfoCircleOutlined, LoadingOutlined } from '@ant-design/icons';
|
||||
import { InfoCircleOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Button, Spin, Tooltip } from 'antd';
|
||||
import { Typography } from '@signozhq/ui/typography';
|
||||
import { AxiosError } from 'axios';
|
||||
@@ -188,7 +189,9 @@ function Filters({
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{isFetching && <Spin indicator={<LoadingOutlined spin />} size="small" />}
|
||||
{isFetching && (
|
||||
<Spin indicator={<Loader className="animate-spin" />} size="small" />
|
||||
)}
|
||||
{error && (
|
||||
<Tooltip title={(error as AxiosError)?.message || 'Something went wrong'}>
|
||||
<InfoCircleOutlined size={14} />
|
||||
|
||||
@@ -3,6 +3,7 @@ import { useQueryClient } from 'react-query';
|
||||
import {
|
||||
getGetServiceAccountRolesQueryKey,
|
||||
useCreateServiceAccountRole,
|
||||
useDeleteServiceAccountRole,
|
||||
useGetServiceAccountRoles,
|
||||
} from 'api/generated/services/serviceaccount';
|
||||
import type { AuthtypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
|
||||
@@ -44,6 +45,9 @@ export function useServiceAccountRoleManager(
|
||||
const { mutateAsync: createRole } = useCreateServiceAccountRole({
|
||||
mutation: { retry: retryOn429 },
|
||||
});
|
||||
const { mutateAsync: deleteRole } = useDeleteServiceAccountRole({
|
||||
mutation: { retry: retryOn429 },
|
||||
});
|
||||
|
||||
const invalidateRoles = useCallback(
|
||||
() =>
|
||||
@@ -68,14 +72,21 @@ export function useServiceAccountRoleManager(
|
||||
const addedRoles = availableRoles.filter(
|
||||
(r) => r.id && desiredRoleIds.has(r.id) && !currentRoleIds.has(r.id),
|
||||
);
|
||||
const removedRoles = currentRoles.filter(
|
||||
(r) => r.id && !desiredRoleIds.has(r.id),
|
||||
);
|
||||
|
||||
// TODO: re-enable deletes once BE for this is streamlined
|
||||
const allOperations = [
|
||||
...addedRoles.map((role) => ({
|
||||
role,
|
||||
run: (): ReturnType<typeof createRole> =>
|
||||
createRole({ pathParams: { id: accountId }, data: { id: role.id } }),
|
||||
})),
|
||||
...removedRoles.map((role) => ({
|
||||
role,
|
||||
run: (): ReturnType<typeof deleteRole> =>
|
||||
deleteRole({ pathParams: { id: accountId, rid: role.id ?? '' } }),
|
||||
})),
|
||||
];
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
@@ -106,7 +117,7 @@ export function useServiceAccountRoleManager(
|
||||
|
||||
return failures;
|
||||
},
|
||||
[accountId, currentRoles, createRole, invalidateRoles],
|
||||
[accountId, currentRoles, createRole, deleteRole, invalidateRoles],
|
||||
);
|
||||
|
||||
return {
|
||||
|
||||
@@ -25,7 +25,7 @@ describe('computeVisualLayout', () => {
|
||||
it('should handle empty input', () => {
|
||||
const layout = computeVisualLayout([]);
|
||||
expect(layout.totalVisualRows).toBe(0);
|
||||
expect(layout.visualRows).toEqual([]);
|
||||
expect(layout.visualRows).toStrictEqual([]);
|
||||
});
|
||||
|
||||
it('should handle single root, no children — 1 visual row', () => {
|
||||
@@ -36,7 +36,7 @@ describe('computeVisualLayout', () => {
|
||||
});
|
||||
const layout = computeVisualLayout([[root]]);
|
||||
expect(layout.totalVisualRows).toBe(1);
|
||||
expect(layout.visualRows[0]).toEqual([root]);
|
||||
expect(layout.visualRows[0]).toStrictEqual([root]);
|
||||
expect(layout.spanToVisualRow['root']).toBe(0);
|
||||
});
|
||||
|
||||
|
||||
@@ -44,12 +44,12 @@ function makeSpan(
|
||||
|
||||
describe('getVisibleSpans', () => {
|
||||
it('returns empty array for empty input', () => {
|
||||
expect(getVisibleSpans([], new Set())).toEqual([]);
|
||||
expect(getVisibleSpans([], new Set())).toStrictEqual([]);
|
||||
});
|
||||
|
||||
it('returns single root span with no children', () => {
|
||||
const spans = [makeSpan({ span_id: 'root', level: 0 })];
|
||||
expect(getVisibleSpans(spans, new Set())).toEqual(spans);
|
||||
expect(getVisibleSpans(spans, new Set())).toStrictEqual(spans);
|
||||
});
|
||||
|
||||
it('returns all spans for flat tree (all level 0, no children)', () => {
|
||||
@@ -58,7 +58,7 @@ describe('getVisibleSpans', () => {
|
||||
makeSpan({ span_id: 'b', level: 0 }),
|
||||
makeSpan({ span_id: 'c', level: 0 }),
|
||||
];
|
||||
expect(getVisibleSpans(spans, new Set())).toEqual(spans);
|
||||
expect(getVisibleSpans(spans, new Set())).toStrictEqual(spans);
|
||||
});
|
||||
|
||||
it('returns all spans when all parents are expanded', () => {
|
||||
@@ -68,7 +68,7 @@ describe('getVisibleSpans', () => {
|
||||
makeSpan({ span_id: 'b', level: 1 }),
|
||||
];
|
||||
const uncollapsed = new Set(['root']);
|
||||
expect(getVisibleSpans(spans, uncollapsed)).toEqual(spans);
|
||||
expect(getVisibleSpans(spans, uncollapsed)).toStrictEqual(spans);
|
||||
});
|
||||
|
||||
it('hides children when root is collapsed', () => {
|
||||
@@ -99,7 +99,7 @@ describe('getVisibleSpans', () => {
|
||||
// root and A expanded, but A is collapsed
|
||||
const uncollapsed = new Set(['root']); // A not in set → collapsed
|
||||
const result = getVisibleSpans(spans, uncollapsed);
|
||||
expect(result.map((s) => s.span_id)).toEqual(['root', 'A']);
|
||||
expect(result.map((s) => s.span_id)).toStrictEqual(['root', 'A']);
|
||||
});
|
||||
|
||||
it('collapses one subtree while sibling subtree stays visible', () => {
|
||||
@@ -116,7 +116,7 @@ describe('getVisibleSpans', () => {
|
||||
// root and childB expanded, childA collapsed
|
||||
const uncollapsed = new Set(['root', 'childB']);
|
||||
const result = getVisibleSpans(spans, uncollapsed);
|
||||
expect(result.map((s) => s.span_id)).toEqual([
|
||||
expect(result.map((s) => s.span_id)).toStrictEqual([
|
||||
'root',
|
||||
'childA', // visible but collapsed
|
||||
'childB', // visible and expanded
|
||||
@@ -134,11 +134,11 @@ describe('getVisibleSpans', () => {
|
||||
|
||||
// Collapsed
|
||||
const collapsed = getVisibleSpans(spans, new Set());
|
||||
expect(collapsed.map((s) => s.span_id)).toEqual(['root']);
|
||||
expect(collapsed.map((s) => s.span_id)).toStrictEqual(['root']);
|
||||
|
||||
// Uncollapsed
|
||||
const uncollapsed = getVisibleSpans(spans, new Set(['root']));
|
||||
expect(uncollapsed.map((s) => s.span_id)).toEqual(['root', 'a', 'b']);
|
||||
expect(uncollapsed.map((s) => s.span_id)).toStrictEqual(['root', 'a', 'b']);
|
||||
});
|
||||
|
||||
it('hides all levels below collapsed span in deeply nested tree', () => {
|
||||
@@ -154,7 +154,7 @@ describe('getVisibleSpans', () => {
|
||||
// Expand L0 and L1, collapse L2
|
||||
const uncollapsed = new Set(['L0', 'L1']);
|
||||
const result = getVisibleSpans(spans, uncollapsed);
|
||||
expect(result.map((s) => s.span_id)).toEqual(['L0', 'L1', 'L2']);
|
||||
expect(result.map((s) => s.span_id)).toStrictEqual(['L0', 'L1', 'L2']);
|
||||
});
|
||||
|
||||
it('shows only root-level spans when all parents are collapsed', () => {
|
||||
@@ -166,7 +166,7 @@ describe('getVisibleSpans', () => {
|
||||
];
|
||||
const uncollapsed = new Set<string>(); // nothing expanded
|
||||
const result = getVisibleSpans(spans, uncollapsed);
|
||||
expect(result.map((s) => s.span_id)).toEqual(['root1', 'root2']);
|
||||
expect(result.map((s) => s.span_id)).toStrictEqual(['root1', 'root2']);
|
||||
});
|
||||
|
||||
it('leaf span in uncollapsed set has no effect', () => {
|
||||
@@ -177,7 +177,7 @@ describe('getVisibleSpans', () => {
|
||||
// leaf is in uncollapsed set but has_children=false, should make no difference
|
||||
const uncollapsed = new Set(['root', 'leaf']);
|
||||
const result = getVisibleSpans(spans, uncollapsed);
|
||||
expect(result).toEqual(spans);
|
||||
expect(result).toStrictEqual(spans);
|
||||
});
|
||||
|
||||
it('handles 10k spans within 50ms', () => {
|
||||
@@ -218,7 +218,13 @@ describe('getVisibleSpans', () => {
|
||||
// root and C expanded; A and B collapsed
|
||||
const uncollapsed = new Set(['root', 'C']);
|
||||
const result = getVisibleSpans(spans, uncollapsed);
|
||||
expect(result.map((s) => s.span_id)).toEqual(['root', 'A', 'B', 'C', 'C1']);
|
||||
expect(result.map((s) => s.span_id)).toStrictEqual([
|
||||
'root',
|
||||
'A',
|
||||
'B',
|
||||
'C',
|
||||
'C1',
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -234,7 +240,7 @@ describe('getAncestorSpanIds', () => {
|
||||
makeSpan({ span_id: 'child', level: 1, parent_span_id: 'root' }),
|
||||
];
|
||||
const ancestors = getAncestorSpanIds(spans, 'child');
|
||||
expect(ancestors).toEqual(new Set(['root']));
|
||||
expect(ancestors).toStrictEqual(new Set(['root']));
|
||||
});
|
||||
|
||||
it('returns all ancestors for deeply nested span', () => {
|
||||
@@ -255,7 +261,7 @@ describe('getAncestorSpanIds', () => {
|
||||
makeSpan({ span_id: 'L3', level: 3, parent_span_id: 'L2' }),
|
||||
];
|
||||
const ancestors = getAncestorSpanIds(spans, 'L3');
|
||||
expect(ancestors).toEqual(new Set(['L2', 'L1', 'L0']));
|
||||
expect(ancestors).toStrictEqual(new Set(['L2', 'L1', 'L0']));
|
||||
});
|
||||
|
||||
it('returns empty set for unknown span', () => {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useCallback, useMemo, useState } from 'react';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Loader } from '@signozhq/icons';
|
||||
import { Empty, Spin } from 'antd';
|
||||
import {
|
||||
BarController,
|
||||
@@ -97,7 +97,7 @@ function FunnelGraph(): JSX.Element {
|
||||
}
|
||||
|
||||
return (
|
||||
<Spin spinning={isFetching} indicator={<LoadingOutlined spin />}>
|
||||
<Spin spinning={isFetching} indicator={<Loader className="animate-spin" />}>
|
||||
<div className={cx('funnel-graph', `funnel-graph--${totalSteps}-columns`)}>
|
||||
<div className="funnel-graph__chart-container">
|
||||
<canvas ref={canvasRef} />
|
||||
|
||||
@@ -54,9 +54,23 @@ describe('globalTimeStore', () => {
|
||||
expect(result.current.lastRefreshTimestamp).toBe(0);
|
||||
});
|
||||
|
||||
it('should have lastComputedMinMax with default values', () => {
|
||||
it('should have lastComputedMinMax computed from initial selectedTime', () => {
|
||||
const { result } = renderHook(() => useGlobalTimeStore());
|
||||
expect(result.current.lastComputedMinMax).toStrictEqual({
|
||||
// Now computes min/max on store creation, no longer starts with 0
|
||||
expect(result.current.lastComputedMinMax.minTime).toBeGreaterThan(0);
|
||||
expect(result.current.lastComputedMinMax.maxTime).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not crash with invalid relative time format', () => {
|
||||
// Invalid time formats should not crash store creation
|
||||
const store = createGlobalTimeStore({
|
||||
selectedTime: 'invalid_time' as GlobalTimeSelectedTime,
|
||||
});
|
||||
|
||||
// Store should be created successfully
|
||||
expect(store.getState().selectedTime).toBe('invalid_time');
|
||||
// Should fallback to {0, 0} when parsing fails
|
||||
expect(store.getState().lastComputedMinMax).toStrictEqual({
|
||||
minTime: 0,
|
||||
maxTime: 0,
|
||||
});
|
||||
@@ -724,8 +738,8 @@ describe('globalTimeStore', () => {
|
||||
const wrapper = createIsolatedWrapper();
|
||||
const { result } = renderHook(() => useGlobalTime(), { wrapper });
|
||||
|
||||
// Initial state has 0 values
|
||||
expect(result.current.lastComputedMinMax.maxTime).toBe(0);
|
||||
// Initial state now has computed values (no longer 0)
|
||||
expect(result.current.lastComputedMinMax.maxTime).toBeGreaterThan(0);
|
||||
|
||||
act(() => {
|
||||
result.current.setSelectedTime('15m');
|
||||
|
||||
@@ -134,17 +134,17 @@ describe('useComputedMinMaxSync', () => {
|
||||
jest.useRealTimers();
|
||||
});
|
||||
|
||||
it('should compute min/max on mount when store has zero values', () => {
|
||||
it('should have computed min/max on store creation (no longer needs mount sync)', () => {
|
||||
const contextStore = createGlobalTimeStore({ selectedTime: '15m' });
|
||||
|
||||
expect(contextStore.getState().lastComputedMinMax).toStrictEqual({
|
||||
minTime: 0,
|
||||
maxTime: 0,
|
||||
});
|
||||
// Store now computes min/max on creation, not on mount
|
||||
expect(contextStore.getState().lastComputedMinMax.minTime).toBeGreaterThan(0);
|
||||
expect(contextStore.getState().lastComputedMinMax.maxTime).toBeGreaterThan(0);
|
||||
|
||||
// Hook still works but is a no-op when values already exist
|
||||
renderHook(() => useComputedMinMaxSync(contextStore));
|
||||
|
||||
// Should have computed values now
|
||||
// Values remain computed
|
||||
expect(contextStore.getState().lastComputedMinMax.maxTime).toBeGreaterThan(0);
|
||||
expect(contextStore.getState().lastComputedMinMax.minTime).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
computeRounded5sMinMax,
|
||||
isCustomTimeRange,
|
||||
parseSelectedTime,
|
||||
safeParseSelectedTime,
|
||||
} from './utils';
|
||||
|
||||
export type GlobalTimeStoreApi = StoreApi<GlobalTimeStore>;
|
||||
@@ -40,7 +41,7 @@ export function createGlobalTimeStore(
|
||||
refreshInterval,
|
||||
isRefreshEnabled: computeIsRefreshEnabled(selectedTime, refreshInterval),
|
||||
lastRefreshTimestamp: 0,
|
||||
lastComputedMinMax: { minTime: 0, maxTime: 0 },
|
||||
lastComputedMinMax: safeParseSelectedTime(selectedTime),
|
||||
|
||||
setSelectedTime: (
|
||||
time: GlobalTimeSelectedTime,
|
||||
|
||||
@@ -61,6 +61,8 @@ const fallbackDurationInNanoSeconds = 30 * 1000 * NANO_SECOND_MULTIPLIER; // 30s
|
||||
* Parse the selectedTime string to get min/max time values.
|
||||
* For relative times, computes fresh values based on Date.now().
|
||||
* For custom times, extracts the stored min/max values.
|
||||
*
|
||||
* @throws Error - When selectedTime is relativeTime and it's invalid
|
||||
*/
|
||||
export function parseSelectedTime(selectedTime: string): ParsedTimeRange {
|
||||
if (isCustomTimeRange(selectedTime)) {
|
||||
@@ -78,6 +80,18 @@ export function parseSelectedTime(selectedTime: string): ParsedTimeRange {
|
||||
return getMinMaxForSelectedTime(selectedTime as Time, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@ref parseSelectedTime} can throw errors, this handles and fallbacks to 0,0 if invalid selected time is provided
|
||||
*/
|
||||
export function safeParseSelectedTime(selectedTime: string): ParsedTimeRange {
|
||||
try {
|
||||
return parseSelectedTime(selectedTime);
|
||||
} catch (e) {
|
||||
console.error('Error parsing selected time:', e);
|
||||
return { minTime: 0, maxTime: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use store.getAutoRefreshQueryKey() instead.
|
||||
* Access via: const getAutoRefreshQueryKey = useGlobalTime((s) => s.getAutoRefreshQueryKey);
|
||||
|
||||
@@ -86,5 +86,24 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/clusters", handler.New(
|
||||
provider.authzMiddleware.ViewAccess(provider.infraMonitoringHandler.ListClusters),
|
||||
handler.OpenAPIDef{
|
||||
ID: "ListClusters",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List Clusters for Infra Monitoring",
|
||||
Description: "Returns a paginated list of Kubernetes clusters with key aggregated metrics derived by summing per-node values within the group: CPU usage, CPU allocatable, memory working set, memory allocatable. Each row also reports per-group nodeCountsByReadiness ({ ready, notReady } from each node's latest k8s.node.condition_ready value) and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each cluster includes metadata attributes (k8s.cluster.name). The response type is 'list' for the default k8s.cluster.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates nodes and pods in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (clusterCPU, clusterCPUAllocatable, clusterMemory, clusterMemoryAllocatable) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostableClusters),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.Clusters),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"errors" //nolint:depguard
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// base is the fundamental struct that implements the error interface.
|
||||
@@ -253,3 +255,10 @@ func NewTimeoutf(code Code, format string, args ...any) *base {
|
||||
func Attr(err error) slog.Attr {
|
||||
return slog.Any("exception", err)
|
||||
}
|
||||
|
||||
// TypeAttr returns an OTel attribute.KeyValue with the "error.type" semconv key
|
||||
// set to the error's type string.
|
||||
func TypeAttr(err error) attribute.KeyValue {
|
||||
t, _, _, _, _, _ := Unwrapb(err)
|
||||
return attribute.String("error.type", t.String())
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ var (
|
||||
FeatureHideRootUser = featuretypes.MustNewName("hide_root_user")
|
||||
FeatureGetMetersFromZeus = featuretypes.MustNewName("get_meters_from_zeus")
|
||||
FeaturePutMetersInZeus = featuretypes.MustNewName("put_meters_in_zeus")
|
||||
FeatureUseMeterReporter = featuretypes.MustNewName("use_meter_reporter")
|
||||
FeatureUseJSONBody = featuretypes.MustNewName("use_json_body")
|
||||
)
|
||||
|
||||
@@ -53,6 +54,14 @@ func MustNewRegistry() featuretypes.Registry {
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureUseMeterReporter,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Stage: featuretypes.StageExperimental,
|
||||
Description: "Controls whether the enterprise meter reporter runs instead of the noop reporter",
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureUseJSONBody,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
|
||||
68
pkg/metercollector/config.go
Normal file
68
pkg/metercollector/config.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package metercollector
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/zeustypes"
|
||||
)
|
||||
|
||||
const (
|
||||
ProviderStatic = "static"
|
||||
ProviderTelemetry = "telemetry"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Provider string `mapstructure:"provider"`
|
||||
Telemetry TelemetryConfig `mapstructure:"telemetry"`
|
||||
Static StaticConfig `mapstructure:"static"`
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
switch c.Provider {
|
||||
case ProviderStatic:
|
||||
return c.Static.Validate()
|
||||
case ProviderTelemetry:
|
||||
return c.Telemetry.Validate()
|
||||
default:
|
||||
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidConfig, "meter collector: unknown provider %q", c.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
type TelemetryConfig struct {
|
||||
Name zeustypes.MeterName
|
||||
Unit zeustypes.MeterUnit
|
||||
Aggregation zeustypes.MeterAggregation
|
||||
DBName string
|
||||
TableName string
|
||||
DefaultRetentionDays int
|
||||
}
|
||||
|
||||
type StaticConfig struct {
|
||||
Name zeustypes.MeterName
|
||||
Unit zeustypes.MeterUnit
|
||||
Aggregation zeustypes.MeterAggregation
|
||||
Value int64
|
||||
}
|
||||
|
||||
func (c StaticConfig) Validate() error {
|
||||
if c.Name.IsZero() {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "static meter collector: name must be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c TelemetryConfig) Validate() error {
|
||||
if c.Name.IsZero() {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: name must be set")
|
||||
}
|
||||
|
||||
if c.DBName == "" || c.TableName == "" {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: db_name and table_name are required")
|
||||
}
|
||||
|
||||
if c.DefaultRetentionDays <= 0 {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: default_retention_days must be positive")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
25
pkg/metercollector/metercollector.go
Normal file
25
pkg/metercollector/metercollector.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package metercollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/zeustypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCodeMeterCollectorCollectFailed = errors.MustNewCode("meter_collector_collect_failed")
|
||||
ErrCodeMeterCollectorInvalidCustomRetentionRule = errors.MustNewCode("meter_collector_invalid_custom_retention_rule")
|
||||
ErrCodeInvalidConfig = errors.MustNewCode("meter_collector_invalid_config")
|
||||
)
|
||||
|
||||
type MeterCollector interface {
|
||||
Name() zeustypes.MeterName
|
||||
Unit() zeustypes.MeterUnit
|
||||
Aggregation() zeustypes.MeterAggregation
|
||||
Origin(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, todayStart time.Time) (time.Time, error)
|
||||
Collect(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error)
|
||||
}
|
||||
37
pkg/meterreporter/config.go
Normal file
37
pkg/meterreporter/config.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package meterreporter
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
var _ factory.Config = (*Config)(nil)
|
||||
|
||||
type Config struct {
|
||||
// Interval is how often the reporter collects and ships meters.
|
||||
Interval time.Duration `mapstructure:"interval"`
|
||||
|
||||
// Backfill enables sealed-day catch-up from the license creation day.
|
||||
Backfill bool `mapstructure:"backfill"`
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
return Config{
|
||||
Interval: 6 * time.Hour,
|
||||
Backfill: true,
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("meterreporter"), newConfig)
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
if c.Interval < 5*time.Minute || c.Interval > 24*time.Hour {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::interval must be between 5m and 24h")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
14
pkg/meterreporter/meterreporter.go
Normal file
14
pkg/meterreporter/meterreporter.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package meterreporter
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCodeInvalidInput = errors.MustNewCode("meterreporter_invalid_input")
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
factory.ServiceWithHealthy
|
||||
}
|
||||
39
pkg/meterreporter/noopmeterreporter/provider.go
Normal file
39
pkg/meterreporter/noopmeterreporter/provider.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package noopmeterreporter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
healthyC chan struct{}
|
||||
stopC chan struct{}
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("noop"), New)
|
||||
}
|
||||
|
||||
func New(_ context.Context, _ factory.ProviderSettings, _ meterreporter.Config) (meterreporter.Reporter, error) {
|
||||
return &provider{
|
||||
healthyC: make(chan struct{}),
|
||||
stopC: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *provider) Start(_ context.Context) error {
|
||||
close(p.healthyC)
|
||||
<-p.stopC
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *provider) Stop(_ context.Context) error {
|
||||
close(p.stopC)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *provider) Healthy() <-chan struct{} {
|
||||
return p.healthyC
|
||||
}
|
||||
140
pkg/modules/inframonitoring/implinframonitoring/clusters.go
Normal file
140
pkg/modules/inframonitoring/implinframonitoring/clusters.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// buildClusterRecords assembles the page records. Node condition counts and
|
||||
// pod phase counts come from the respective per-group maps in both modes;
|
||||
// every row is a group of nodes+pods, so there's no per-row "current state"
|
||||
// concept (analogous to namespaces).
|
||||
func buildClusterRecords(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
nodeConditionCountsMap map[string]nodeConditionCounts,
|
||||
podPhaseCountsMap map[string]podPhaseCounts,
|
||||
) []inframonitoringtypes.ClusterRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.ClusterRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
clusterName := labels[clusterNameAttrKey]
|
||||
|
||||
record := inframonitoringtypes.ClusterRecord{ // initialize with default values
|
||||
ClusterName: clusterName,
|
||||
ClusterCPU: -1,
|
||||
ClusterCPUAllocatable: -1,
|
||||
ClusterMemory: -1,
|
||||
ClusterMemoryAllocatable: -1,
|
||||
Meta: map[string]string{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["A"]; exists {
|
||||
record.ClusterCPU = v
|
||||
}
|
||||
if v, exists := metrics["B"]; exists {
|
||||
record.ClusterCPUAllocatable = v
|
||||
}
|
||||
if v, exists := metrics["C"]; exists {
|
||||
record.ClusterMemory = v
|
||||
}
|
||||
if v, exists := metrics["D"]; exists {
|
||||
record.ClusterMemoryAllocatable = v
|
||||
}
|
||||
}
|
||||
|
||||
if conditionCountsForGroup, ok := nodeConditionCountsMap[compositeKey]; ok {
|
||||
record.NodeCountsByReadiness = inframonitoringtypes.NodeCountsByReadiness{
|
||||
Ready: conditionCountsForGroup.Ready,
|
||||
NotReady: conditionCountsForGroup.NotReady,
|
||||
}
|
||||
}
|
||||
|
||||
if phaseCountsForGroup, ok := podPhaseCountsMap[compositeKey]; ok {
|
||||
record.PodCountsByPhase = inframonitoringtypes.PodCountsByPhase{
|
||||
Pending: phaseCountsForGroup.Pending,
|
||||
Running: phaseCountsForGroup.Running,
|
||||
Succeeded: phaseCountsForGroup.Succeeded,
|
||||
Failed: phaseCountsForGroup.Failed,
|
||||
Unknown: phaseCountsForGroup.Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok {
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (m *module) getTopClusterGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.PostableClusters,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToClustersQueryNames[orderByKey]
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newClustersTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
func (m *module) getClustersTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableClusters) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range clusterAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
return m.getMetadata(ctx, clustersTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// TODO(nikhilmantri0902): change to k8s.cluster.uid after showing the missing
|
||||
// data banner. Carried forward from v1 (see k8sClusterUIDAttrKey in
|
||||
// pkg/query-service/app/inframetrics/clusters.go).
|
||||
const clusterNameAttrKey = "k8s.cluster.name"
|
||||
|
||||
var clusterNameGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: clusterNameAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
// clustersTableMetricNamesList drives the existence/retention check.
|
||||
// Includes k8s.node.condition_ready and k8s.pod.phase so the response
|
||||
// short-circuits cleanly when a cluster doesn't ship those metrics — even
|
||||
// though they aren't part of the QB composite query (they're queried separately
|
||||
// via getPerGroupNodeConditionCounts and getPerGroupPodPhaseCounts).
|
||||
var clustersTableMetricNamesList = []string{
|
||||
"k8s.node.cpu.usage",
|
||||
"k8s.node.allocatable_cpu",
|
||||
"k8s.node.memory.working_set",
|
||||
"k8s.node.allocatable_memory",
|
||||
"k8s.node.condition_ready", //TODO(nikhilmantri0902): should these metrics be used to count groups k8s.node.condition_ready and k8s.pod.phase
|
||||
"k8s.pod.phase",
|
||||
}
|
||||
|
||||
var clusterAttrKeysForMetadata = []string{
|
||||
"k8s.cluster.name",
|
||||
}
|
||||
|
||||
var orderByToClustersQueryNames = map[string][]string{
|
||||
inframonitoringtypes.ClustersOrderByCPU: {"A"},
|
||||
inframonitoringtypes.ClustersOrderByCPUAllocatable: {"B"},
|
||||
inframonitoringtypes.ClustersOrderByMemory: {"C"},
|
||||
inframonitoringtypes.ClustersOrderByMemoryAllocatable: {"D"},
|
||||
}
|
||||
|
||||
// newClustersTableListQuery builds the composite QB v5 request for the clusters list.
|
||||
// Cluster-scope metrics are derived by summing per-node metrics within the
|
||||
// group (default group: k8s.cluster.name). Node condition counts and pod phase
|
||||
// counts are derived separately via getPerGroupNodeConditionCounts and
|
||||
// getPerGroupPodPhaseCounts respectively (works for both list and grouped_list
|
||||
// modes), so neither is included here. Query letters A/B/C/D mirror the v1
|
||||
// implementation and the v2 nodes list.
|
||||
func (m *module) newClustersTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
// Query A: CPU usage — sum of node CPU within the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.cpu.usage",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{clusterNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query B: CPU allocatable — sum of node allocatable CPU within the group.
|
||||
// TimeAggregationLatest is the closest v5 equivalent of v1's AnyLast;
|
||||
// allocatable values change rarely so divergence in practice is negligible.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.allocatable_cpu",
|
||||
TimeAggregation: metrictypes.TimeAggregationLatest,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{clusterNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query C: Memory working set — sum of node memory within the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.memory.working_set",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{clusterNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query D: Memory allocatable — sum of node allocatable memory within the group.
|
||||
// Same Latest caveat as Query B.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.allocatable_memory",
|
||||
TimeAggregation: metrictypes.TimeAggregationLatest,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{clusterNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -117,3 +117,27 @@ func (h *handler) ListNamespaces(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListClusters(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostableClusters
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.ListClusters(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
@@ -426,3 +426,105 @@ func (m *module) ListNamespaces(ctx context.Context, orgID valuer.UUID, req *inf
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListClusters(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableClusters) (*inframonitoringtypes.Clusters, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.Clusters{}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: inframonitoringtypes.ClustersOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{clusterNameGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, clustersTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.ClusterRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.ClusterRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
|
||||
|
||||
metadataMap, err := m.getClustersTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopClusterGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.ClusterRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
filterExpr := ""
|
||||
if req.Filter != nil {
|
||||
filterExpr = req.Filter.Expression
|
||||
}
|
||||
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newClustersTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reuse the nodes condition-counts CTE function via a temp struct — it reads only
|
||||
// Start/End/Filter/GroupBy from PostableNodes. With default groupBy
|
||||
// [k8s.cluster.name], counts are bucketed per cluster; with a custom groupBy,
|
||||
// they aggregate across clusters in that group.
|
||||
nodeConditionCountsMap, err := m.getPerGroupNodeConditionCounts(ctx, &inframonitoringtypes.PostableNodes{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Filter: req.Filter,
|
||||
GroupBy: req.GroupBy,
|
||||
}, pageGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Same pattern for pod phase counts via PostablePods shim.
|
||||
podPhaseCountsMap, err := m.getPerGroupPodPhaseCounts(ctx, &inframonitoringtypes.PostablePods{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Filter: req.Filter,
|
||||
GroupBy: req.GroupBy,
|
||||
}, pageGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Records = buildClusterRecords(queryResp, pageGroups, req.GroupBy, metadataMap, nodeConditionCountsMap, podPhaseCountsMap)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ type Handler interface {
|
||||
ListPods(http.ResponseWriter, *http.Request)
|
||||
ListNodes(http.ResponseWriter, *http.Request)
|
||||
ListNamespaces(http.ResponseWriter, *http.Request)
|
||||
ListClusters(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
@@ -20,4 +21,5 @@ type Module interface {
|
||||
ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error)
|
||||
ListNodes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNodes) (*inframonitoringtypes.Nodes, error)
|
||||
ListNamespaces(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNamespaces) (*inframonitoringtypes.Namespaces, error)
|
||||
ListClusters(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableClusters) (*inframonitoringtypes.Clusters, error)
|
||||
}
|
||||
|
||||
52
pkg/modules/retention/implretention/getter.go
Normal file
52
pkg/modules/retention/implretention/getter.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package implretention
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type getter struct {
|
||||
store retentiontypes.Store
|
||||
}
|
||||
|
||||
// NewGetter creates a retention getter backed by the retention store.
|
||||
func NewGetter(store retentiontypes.Store) retention.Getter {
|
||||
return &getter{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRetentionPolicySegments loads successful TTL changes and converts them into retention policy segments.
|
||||
func (getter *getter) GetRetentionPolicySegments(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
dbName string,
|
||||
tableName string,
|
||||
fallbackDefaultDays int,
|
||||
startMs int64,
|
||||
endMs int64,
|
||||
) ([]*retentiontypes.RetentionPolicySegment, error) {
|
||||
if startMs >= endMs {
|
||||
return nil, nil
|
||||
}
|
||||
if dbName == "" {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dbName is empty")
|
||||
}
|
||||
if tableName == "" {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "tableName is empty")
|
||||
}
|
||||
if fallbackDefaultDays <= 0 {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "non-positive fallbackDefaultDays %d", fallbackDefaultDays)
|
||||
}
|
||||
|
||||
rows, err := getter.store.ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx, orgID, dbName+"."+tableName, endMs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return retentiontypes.BuildRetentionPolicySegmentsFromRows(rows, fallbackDefaultDays, startMs, endMs)
|
||||
}
|
||||
41
pkg/modules/retention/implretention/store.go
Normal file
41
pkg/modules/retention/implretention/store.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package implretention
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type store struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
// NewStore creates a SQL-backed retention store.
|
||||
func NewStore(sqlstore sqlstore.SQLStore) retentiontypes.Store {
|
||||
return &store{sqlstore: sqlstore}
|
||||
}
|
||||
|
||||
// ListTTLSettingsByTableNameAndBeforeCreatedAt returns successful TTL settings before the given timestamp.
|
||||
func (store *store) ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx context.Context, orgID valuer.UUID, tableName string, beforeMs int64) ([]*retentiontypes.TTLSetting, error) {
|
||||
rows := []*retentiontypes.TTLSetting{}
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(&rows).
|
||||
Where("table_name = ?", tableName).
|
||||
Where("org_id = ?", orgID.StringValue()).
|
||||
Where("status = ?", retentiontypes.TTLSettingStatusSuccess).
|
||||
Where("created_at < ?", time.UnixMilli(beforeMs).UTC()).
|
||||
OrderExpr("created_at ASC").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "load ttl_setting rows for org %q table %q", orgID.StringValue(), tableName)
|
||||
}
|
||||
|
||||
return rows, nil
|
||||
}
|
||||
14
pkg/modules/retention/retention.go
Normal file
14
pkg/modules/retention/retention.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package retention
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// Getter resolves retention data and expressions for read paths.
|
||||
type Getter interface {
|
||||
// GetRetentionPolicySegments returns retention policy segments active over a half-open meter window.
|
||||
GetRetentionPolicySegments(ctx context.Context, orgID valuer.UUID, dbName string, tableName string, fallbackDefaultDays int, startMs int64, endMs int64) ([]*retentiontypes.RetentionPolicySegment, error)
|
||||
}
|
||||
@@ -377,7 +377,7 @@ func (module *module) getOrGetSetIdentity(ctx context.Context, serviceAccountID
|
||||
}
|
||||
|
||||
func (module *module) setRole(ctx context.Context, orgID valuer.UUID, id valuer.UUID, role *authtypes.Role) error {
|
||||
serviceAccount, err := module.GetWithRoles(ctx, orgID, id)
|
||||
serviceAccount, err := module.Get(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -387,24 +387,12 @@ func (module *module) setRole(ctx context.Context, orgID valuer.UUID, id valuer.
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.authz.ModifyGrant(ctx, orgID, serviceAccount.RoleNames(), []string{role.Name}, authtypes.MustNewSubject(coretypes.NewResourceServiceAccount(), id.String(), orgID, nil))
|
||||
err = module.authz.Grant(ctx, orgID, []string{role.Name}, authtypes.MustNewSubject(coretypes.NewResourceServiceAccount(), id.String(), orgID, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.store.RunInTx(ctx, func(ctx context.Context) error {
|
||||
err = module.store.DeleteServiceAccountRoles(ctx, serviceAccount.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.store.CreateServiceAccountRole(ctx, serviceAccountRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
err = module.store.CreateServiceAccountRole(ctx, serviceAccountRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -207,21 +207,6 @@ func (store *store) CreateServiceAccountRole(ctx context.Context, serviceAccount
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) DeleteServiceAccountRoles(ctx context.Context, serviceAccountID valuer.UUID) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewDelete().
|
||||
Model(new(serviceaccounttypes.ServiceAccountRole)).
|
||||
Where("service_account_id = ?", serviceAccountID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) DeleteServiceAccountRole(ctx context.Context, serviceAccountID valuer.UUID, roleID valuer.UUID) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
|
||||
@@ -1343,7 +1343,7 @@ func getLocalTableName(tableName string) string {
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
|
||||
func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
|
||||
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
|
||||
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalLogs.StringValue(),
|
||||
instrumentationtypes.CodeNamespace: "clickhouse-reader",
|
||||
@@ -1378,7 +1378,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
|
||||
if apiErr != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
|
||||
}
|
||||
if statusItem.Status == constants.StatusPending {
|
||||
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
|
||||
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
|
||||
}
|
||||
}
|
||||
@@ -1437,7 +1437,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
|
||||
TransactionID: uuid,
|
||||
TableName: tableName,
|
||||
TTL: int(params.DelDuration),
|
||||
Status: constants.StatusPending,
|
||||
Status: retentiontypes.TTLSettingStatusPending,
|
||||
ColdStorageTTL: coldStorageDuration,
|
||||
OrgID: orgID,
|
||||
}
|
||||
@@ -1463,7 +1463,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusFailed).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -1483,7 +1483,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusFailed).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -1498,7 +1498,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusSuccess).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -1508,10 +1508,10 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
|
||||
}
|
||||
|
||||
}(ttlPayload)
|
||||
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
|
||||
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
|
||||
func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
|
||||
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
|
||||
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalTraces.StringValue(),
|
||||
instrumentationtypes.CodeNamespace: "clickhouse-reader",
|
||||
@@ -1541,7 +1541,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
|
||||
if apiErr != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
|
||||
}
|
||||
if statusItem.Status == constants.StatusPending {
|
||||
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
|
||||
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
|
||||
}
|
||||
}
|
||||
@@ -1575,7 +1575,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
|
||||
TransactionID: uuid,
|
||||
TableName: tableName,
|
||||
TTL: int(params.DelDuration),
|
||||
Status: constants.StatusPending,
|
||||
Status: retentiontypes.TTLSettingStatusPending,
|
||||
ColdStorageTTL: coldStorageDuration,
|
||||
OrgID: orgID,
|
||||
}
|
||||
@@ -1613,7 +1613,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusFailed).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -1634,7 +1634,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusFailed).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -1649,7 +1649,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusSuccess).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -1658,7 +1658,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
|
||||
}
|
||||
}(distributedTableName)
|
||||
}
|
||||
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
|
||||
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool, error) {
|
||||
@@ -1687,7 +1687,7 @@ func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool,
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *model.CustomRetentionTTLParams) (*model.CustomRetentionTTLResponse, error) {
|
||||
func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *retentiontypes.CustomRetentionTTLParams) (*retentiontypes.CustomRetentionTTLResponse, error) {
|
||||
|
||||
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
|
||||
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalLogs.StringValue(),
|
||||
@@ -1702,7 +1702,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
if !hasCustomRetention {
|
||||
r.logger.Info("Custom retention not supported, falling back to standard TTL method", "orgID", orgID)
|
||||
|
||||
ttlParams := &model.TTLParams{
|
||||
ttlParams := &retentiontypes.TTLParams{
|
||||
Type: params.Type,
|
||||
DelDuration: int64(params.DefaultTTLDays * 24 * 3600),
|
||||
}
|
||||
@@ -1723,7 +1723,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
return nil, errorsV2.Wrapf(apiErr.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to set standard TTL")
|
||||
}
|
||||
|
||||
return &model.CustomRetentionTTLResponse{
|
||||
return &retentiontypes.CustomRetentionTTLResponse{
|
||||
Message: fmt.Sprintf("Custom retention not supported, applied standard TTL of %d days. %s", params.DefaultTTLDays, ttlResult.Message),
|
||||
}, nil
|
||||
}
|
||||
@@ -1734,7 +1734,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
uuidWithHyphen := valuer.GenerateUUID()
|
||||
uuid := strings.Replace(uuidWithHyphen.String(), "-", "", -1)
|
||||
|
||||
if params.Type != constants.LogsTTL {
|
||||
if params.Type != retentiontypes.LogsTTL {
|
||||
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL only supported for logs")
|
||||
}
|
||||
|
||||
@@ -1765,7 +1765,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
if apiErr != nil {
|
||||
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
|
||||
}
|
||||
if statusItem.Status == constants.StatusPending {
|
||||
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
|
||||
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL is already running")
|
||||
}
|
||||
}
|
||||
@@ -1851,7 +1851,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
TableName: tableName,
|
||||
TTL: params.DefaultTTLDays,
|
||||
Condition: string(ttlConditionsJSON),
|
||||
Status: constants.StatusPending,
|
||||
Status: retentiontypes.TTLSettingStatusPending,
|
||||
ColdStorageTTL: coldStorageDuration,
|
||||
OrgID: orgID,
|
||||
}
|
||||
@@ -1867,7 +1867,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
|
||||
if err != nil {
|
||||
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusFailed)
|
||||
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
|
||||
}
|
||||
}
|
||||
@@ -1876,21 +1876,21 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
|
||||
r.logger.Debug("Executing custom retention TTL request: ", "request", query, "step", i+1)
|
||||
if err := r.db.Exec(ctx, query); err != nil {
|
||||
r.logger.Error("error while setting custom retention ttl", errorsV2.Attr(err))
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusFailed)
|
||||
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
|
||||
}
|
||||
}
|
||||
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusSuccess)
|
||||
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusSuccess)
|
||||
}
|
||||
|
||||
return &model.CustomRetentionTTLResponse{
|
||||
return &retentiontypes.CustomRetentionTTLResponse{
|
||||
Message: "custom retention TTL has been successfully set up",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// New method to build multiIf expressions with support for multiple AND conditions
|
||||
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []model.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
|
||||
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []retentiontypes.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
|
||||
var conditions []string
|
||||
|
||||
for i, rule := range ttlConditions {
|
||||
@@ -1962,7 +1962,7 @@ func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []model.CustomRe
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID string) (*model.GetCustomRetentionTTLResponse, error) {
|
||||
func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID string) (*retentiontypes.GetCustomRetentionTTLResponse, error) {
|
||||
// Check if V2 (custom retention) is supported
|
||||
hasCustomRetention, err := r.hasCustomRetentionColumn(ctx)
|
||||
if err != nil {
|
||||
@@ -1971,7 +1971,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
|
||||
hasCustomRetention = false
|
||||
}
|
||||
|
||||
response := &model.GetCustomRetentionTTLResponse{}
|
||||
response := &retentiontypes.GetCustomRetentionTTLResponse{}
|
||||
|
||||
if hasCustomRetention {
|
||||
// V2 - Custom retention is supported
|
||||
@@ -1994,19 +1994,19 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
// No V2 configuration found, return defaults
|
||||
response.DefaultTTLDays = 15
|
||||
response.TTLConditions = []model.CustomRetentionRule{}
|
||||
response.Status = constants.StatusSuccess
|
||||
response.DefaultTTLDays = retentiontypes.DefaultLogsRetentionDays
|
||||
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
|
||||
response.Status = retentiontypes.TTLSettingStatusSuccess
|
||||
response.ColdStorageTTLDays = -1
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Parse TTL conditions from Condition
|
||||
var ttlConditions []model.CustomRetentionRule
|
||||
var ttlConditions []retentiontypes.CustomRetentionRule
|
||||
if customTTL.Condition != "" {
|
||||
if err := json.Unmarshal([]byte(customTTL.Condition), &ttlConditions); err != nil {
|
||||
r.logger.Error("Error parsing TTL conditions", errorsV2.Attr(err))
|
||||
ttlConditions = []model.CustomRetentionRule{}
|
||||
ttlConditions = []retentiontypes.CustomRetentionRule{}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2020,8 +2020,8 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
|
||||
response.Version = "v1"
|
||||
|
||||
// Get V1 TTL configuration
|
||||
ttlParams := &model.GetTTLParams{
|
||||
Type: constants.LogsTTL,
|
||||
ttlParams := &retentiontypes.GetTTLParams{
|
||||
Type: retentiontypes.LogsTTL,
|
||||
}
|
||||
|
||||
ttlResult, apiErr := r.GetTTL(ctx, orgID, ttlParams)
|
||||
@@ -2041,7 +2041,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
|
||||
}
|
||||
|
||||
// For V1, we don't have TTL conditions
|
||||
response.TTLConditions = []model.CustomRetentionRule{}
|
||||
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -2081,7 +2081,7 @@ func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, o
|
||||
}
|
||||
|
||||
// Enhanced validation function with duplicate detection and efficient key validation
|
||||
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []model.CustomRetentionRule) error {
|
||||
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []retentiontypes.CustomRetentionRule) error {
|
||||
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
|
||||
instrumentationtypes.CodeNamespace: "clickhouse-reader",
|
||||
instrumentationtypes.CodeFunctionName: "validateTTLConditions",
|
||||
@@ -2185,16 +2185,16 @@ func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditi
|
||||
// SetTTL sets the TTL for traces or metrics or logs tables.
|
||||
// This is an async API which creates goroutines to set TTL.
|
||||
// Status of TTL update is tracked with ttl_status table in sqlite db.
|
||||
func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
|
||||
func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
|
||||
// Keep only latest 100 transactions/requests
|
||||
r.deleteTtlTransactions(ctx, orgID, 100)
|
||||
|
||||
switch params.Type {
|
||||
case constants.TraceTTL:
|
||||
case retentiontypes.TraceTTL:
|
||||
return r.setTTLTraces(ctx, orgID, params)
|
||||
case constants.MetricsTTL:
|
||||
case retentiontypes.MetricsTTL:
|
||||
return r.setTTLMetrics(ctx, orgID, params)
|
||||
case constants.LogsTTL:
|
||||
case retentiontypes.LogsTTL:
|
||||
return r.setTTLLogs(ctx, orgID, params)
|
||||
default:
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting ttl. ttl type should be <metrics|traces>, got %v", params.Type)}
|
||||
@@ -2202,7 +2202,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *mod
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
|
||||
func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
|
||||
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
|
||||
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalMetrics.StringValue(),
|
||||
instrumentationtypes.CodeNamespace: "clickhouse-reader",
|
||||
@@ -2231,7 +2231,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
|
||||
if apiErr != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
|
||||
}
|
||||
if statusItem.Status == constants.StatusPending {
|
||||
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
|
||||
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
|
||||
}
|
||||
}
|
||||
@@ -2247,7 +2247,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
|
||||
TransactionID: uuid,
|
||||
TableName: tableName,
|
||||
TTL: int(params.DelDuration),
|
||||
Status: constants.StatusPending,
|
||||
Status: retentiontypes.TTLSettingStatusPending,
|
||||
ColdStorageTTL: coldStorageDuration,
|
||||
OrgID: orgID,
|
||||
}
|
||||
@@ -2285,7 +2285,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusFailed).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -2306,7 +2306,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusFailed).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -2321,7 +2321,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
|
||||
NewUpdate().
|
||||
Model(new(retentiontypes.TTLSetting)).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Set("status = ?", constants.StatusSuccess).
|
||||
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
|
||||
Where("id = ?", statusItem.ID.StringValue()).
|
||||
Exec(ctx)
|
||||
if dbErr != nil {
|
||||
@@ -2332,7 +2332,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
|
||||
for _, tableName := range tableNames {
|
||||
go metricTTL(tableName)
|
||||
}
|
||||
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
|
||||
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID string, numberOfTransactionsStore int) {
|
||||
@@ -2389,7 +2389,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
|
||||
// getTTLQueryStatus fetches ttl_status table status from DB
|
||||
func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string, tableNameArray []string) (string, *model.ApiError) {
|
||||
failFlag := false
|
||||
status := constants.StatusSuccess
|
||||
status := retentiontypes.TTLSettingStatusSuccess
|
||||
for _, tableName := range tableNameArray {
|
||||
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
|
||||
emptyStatusStruct := new(retentiontypes.TTLSetting)
|
||||
@@ -2399,16 +2399,16 @@ func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string,
|
||||
if apiErr != nil {
|
||||
return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
|
||||
}
|
||||
if statusItem.Status == constants.StatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
|
||||
status = constants.StatusPending
|
||||
if statusItem.Status == retentiontypes.TTLSettingStatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
|
||||
status = retentiontypes.TTLSettingStatusPending
|
||||
return status, nil
|
||||
}
|
||||
if statusItem.Status == constants.StatusFailed {
|
||||
if statusItem.Status == retentiontypes.TTLSettingStatusFailed {
|
||||
failFlag = true
|
||||
}
|
||||
}
|
||||
if failFlag {
|
||||
status = constants.StatusFailed
|
||||
status = retentiontypes.TTLSettingStatusFailed
|
||||
}
|
||||
|
||||
return status, nil
|
||||
@@ -2461,7 +2461,7 @@ func getLocalTableNameArray(tableNames []string) []string {
|
||||
}
|
||||
|
||||
// GetTTL returns current ttl, expected ttl and past setTTL status for metrics/traces.
|
||||
func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError) {
|
||||
func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.GetTTLParams) (*retentiontypes.GetTTLResponseItem, *model.ApiError) {
|
||||
|
||||
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
|
||||
instrumentationtypes.CodeNamespace: "clickhouse-reader",
|
||||
@@ -2496,8 +2496,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
return delTTL, moveTTL
|
||||
}
|
||||
|
||||
getMetricsTTL := func() (*model.DBResponseTTL, *model.ApiError) {
|
||||
var dbResp []model.DBResponseTTL
|
||||
getMetricsTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
|
||||
var dbResp []retentiontypes.DBResponseTTL
|
||||
|
||||
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v'", signozSampleLocalTableName)
|
||||
|
||||
@@ -2514,8 +2514,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
}
|
||||
}
|
||||
|
||||
getTracesTTL := func() (*model.DBResponseTTL, *model.ApiError) {
|
||||
var dbResp []model.DBResponseTTL
|
||||
getTracesTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
|
||||
var dbResp []retentiontypes.DBResponseTTL
|
||||
|
||||
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.traceLocalTableName, signozTraceDBName)
|
||||
|
||||
@@ -2532,8 +2532,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
}
|
||||
}
|
||||
|
||||
getLogsTTL := func() (*model.DBResponseTTL, *model.ApiError) {
|
||||
var dbResp []model.DBResponseTTL
|
||||
getLogsTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
|
||||
var dbResp []retentiontypes.DBResponseTTL
|
||||
|
||||
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.logsLocalTableName, r.logsDB)
|
||||
|
||||
@@ -2551,7 +2551,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
}
|
||||
|
||||
switch ttlParams.Type {
|
||||
case constants.TraceTTL:
|
||||
case retentiontypes.TraceTTL:
|
||||
tableNameArray := []string{
|
||||
r.TraceDB + "." + r.traceTableName,
|
||||
r.TraceDB + "." + r.traceResourceTableV3,
|
||||
@@ -2579,9 +2579,9 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
}
|
||||
|
||||
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
|
||||
return &model.GetTTLResponseItem{TracesTime: delTTL, TracesMoveTime: moveTTL, ExpectedTracesTime: ttlQuery.TTL, ExpectedTracesMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
|
||||
return &retentiontypes.GetTTLResponseItem{TracesTime: delTTL, TracesMoveTime: moveTTL, ExpectedTracesTime: ttlQuery.TTL, ExpectedTracesMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
|
||||
|
||||
case constants.MetricsTTL:
|
||||
case retentiontypes.MetricsTTL:
|
||||
tableNameArray := []string{signozMetricDBName + "." + signozSampleTableName}
|
||||
tableNameArray = getLocalTableNameArray(tableNameArray)
|
||||
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
|
||||
@@ -2602,9 +2602,9 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
}
|
||||
|
||||
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
|
||||
return &model.GetTTLResponseItem{MetricsTime: delTTL, MetricsMoveTime: moveTTL, ExpectedMetricsTime: ttlQuery.TTL, ExpectedMetricsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
|
||||
return &retentiontypes.GetTTLResponseItem{MetricsTime: delTTL, MetricsMoveTime: moveTTL, ExpectedMetricsTime: ttlQuery.TTL, ExpectedMetricsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
|
||||
|
||||
case constants.LogsTTL:
|
||||
case retentiontypes.LogsTTL:
|
||||
tableNameArray := []string{r.logsDB + "." + r.logsTableName}
|
||||
tableNameArray = getLocalTableNameArray(tableNameArray)
|
||||
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
|
||||
@@ -2625,7 +2625,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
|
||||
}
|
||||
|
||||
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
|
||||
return &model.GetTTLResponseItem{LogsTime: delTTL, LogsMoveTime: moveTTL, ExpectedLogsTime: ttlQuery.TTL, ExpectedLogsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
|
||||
return &retentiontypes.GetTTLResponseItem{LogsTime: delTTL, LogsMoveTime: moveTTL, ExpectedLogsTime: ttlQuery.TTL, ExpectedLogsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
|
||||
|
||||
default:
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. ttl type should be metrics|traces, got %v",
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -1677,7 +1678,7 @@ func (aH *APIHandler) setCustomRetentionTTL(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
var params model.CustomRetentionTTLParams
|
||||
var params retentiontypes.CustomRetentionTTLParams
|
||||
if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "Invalid data"))
|
||||
return
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
querytemplate "github.com/SigNoz/signoz/pkg/query-service/utils/queryTemplate"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
chVariables "github.com/SigNoz/signoz/pkg/variables/clickhouse"
|
||||
)
|
||||
|
||||
@@ -419,7 +420,7 @@ func parseTime(param string, r *http.Request) (*time.Time, error) {
|
||||
|
||||
}
|
||||
|
||||
func parseTTLParams(r *http.Request) (*model.TTLParams, error) {
|
||||
func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
|
||||
|
||||
// make sure either of the query params are present
|
||||
typeTTL := r.URL.Query().Get("type")
|
||||
@@ -432,7 +433,7 @@ func parseTTLParams(r *http.Request) (*model.TTLParams, error) {
|
||||
}
|
||||
|
||||
// Validate the type parameter
|
||||
if typeTTL != baseconstants.TraceTTL && typeTTL != baseconstants.MetricsTTL && typeTTL != baseconstants.LogsTTL {
|
||||
if typeTTL != retentiontypes.TraceTTL && typeTTL != retentiontypes.MetricsTTL && typeTTL != retentiontypes.LogsTTL {
|
||||
return nil, fmt.Errorf("type param should be metrics|traces|logs, got %v", typeTTL)
|
||||
}
|
||||
|
||||
@@ -455,7 +456,7 @@ func parseTTLParams(r *http.Request) (*model.TTLParams, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return &model.TTLParams{
|
||||
return &retentiontypes.TTLParams{
|
||||
Type: typeTTL,
|
||||
DelDuration: int64(durationParsed.Seconds()),
|
||||
ColdStorageVolume: coldStorage,
|
||||
@@ -463,7 +464,7 @@ func parseTTLParams(r *http.Request) (*model.TTLParams, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseGetTTL(r *http.Request) (*model.GetTTLParams, error) {
|
||||
func parseGetTTL(r *http.Request) (*retentiontypes.GetTTLParams, error) {
|
||||
|
||||
typeTTL := r.URL.Query().Get("type")
|
||||
|
||||
@@ -471,12 +472,12 @@ func parseGetTTL(r *http.Request) (*model.GetTTLParams, error) {
|
||||
return nil, fmt.Errorf("type param cannot be empty from the query")
|
||||
} else {
|
||||
// Validate the type parameter
|
||||
if typeTTL != baseconstants.TraceTTL && typeTTL != baseconstants.MetricsTTL && typeTTL != baseconstants.LogsTTL {
|
||||
if typeTTL != retentiontypes.TraceTTL && typeTTL != retentiontypes.MetricsTTL && typeTTL != retentiontypes.LogsTTL {
|
||||
return nil, fmt.Errorf("type param should be metrics|traces|logs, got %v", typeTTL)
|
||||
}
|
||||
}
|
||||
|
||||
return &model.GetTTLParams{Type: typeTTL}, nil
|
||||
return &retentiontypes.GetTTLParams{Type: typeTTL}, nil
|
||||
}
|
||||
|
||||
func parseAggregateAttributeRequest(r *http.Request) (*v3.AggregateAttributeRequest, error) {
|
||||
|
||||
@@ -19,10 +19,6 @@ const (
|
||||
|
||||
const MaxAllowedPointsInTimeSeries = 300
|
||||
|
||||
const TraceTTL = "traces"
|
||||
const MetricsTTL = "metrics"
|
||||
const LogsTTL = "logs"
|
||||
|
||||
const SpanSearchScopeRoot = "isroot"
|
||||
const SpanSearchScopeEntryPoint = "isentrypoint"
|
||||
const OrderBySpanCount = "span_count"
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/querycache"
|
||||
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
@@ -23,8 +24,8 @@ type Reader interface {
|
||||
GetServicesList(ctx context.Context) (*[]string, error)
|
||||
GetDependencyGraph(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
|
||||
|
||||
GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError)
|
||||
GetCustomRetentionTTL(ctx context.Context, orgID string) (*model.GetCustomRetentionTTLResponse, error)
|
||||
GetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.GetTTLParams) (*retentiontypes.GetTTLResponseItem, *model.ApiError)
|
||||
GetCustomRetentionTTL(ctx context.Context, orgID string) (*retentiontypes.GetCustomRetentionTTLResponse, error)
|
||||
|
||||
// GetDisks returns a list of disks configured in the underlying DB. It is supported by
|
||||
// clickhouse only.
|
||||
@@ -46,8 +47,8 @@ type Reader interface {
|
||||
GetFlamegraphSpansForTrace(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, error)
|
||||
|
||||
// Setter Interfaces
|
||||
SetTTL(ctx context.Context, orgID string, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError)
|
||||
SetTTLV2(ctx context.Context, orgID string, params *model.CustomRetentionTTLParams) (*model.CustomRetentionTTLResponse, error)
|
||||
SetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError)
|
||||
SetTTLV2(ctx context.Context, orgID string, params *retentiontypes.CustomRetentionTTLParams) (*retentiontypes.CustomRetentionTTLResponse, error)
|
||||
|
||||
FetchTemporality(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]map[v3.Temporality]bool, error)
|
||||
GetMetricAggregateAttributes(ctx context.Context, orgID valuer.UUID, req *v3.AggregateAttributeRequest, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error)
|
||||
|
||||
@@ -404,56 +404,6 @@ type TagKey struct {
|
||||
Type TagDataType `json:"type"`
|
||||
}
|
||||
|
||||
type TTLParams struct {
|
||||
Type string // It can be one of {traces, metrics}.
|
||||
ColdStorageVolume string // Name of the cold storage volume.
|
||||
ToColdStorageDuration int64 // Seconds after which data will be moved to cold storage.
|
||||
DelDuration int64 // Seconds after which data will be deleted.
|
||||
}
|
||||
|
||||
type CustomRetentionTTLParams struct {
|
||||
Type string `json:"type"`
|
||||
DefaultTTLDays int `json:"defaultTTLDays"`
|
||||
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
|
||||
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
|
||||
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
|
||||
}
|
||||
|
||||
type CustomRetentionRule struct {
|
||||
Filters []FilterCondition `json:"conditions"`
|
||||
TTLDays int `json:"ttlDays"`
|
||||
}
|
||||
|
||||
type FilterCondition struct {
|
||||
Key string `json:"key"`
|
||||
Values []string `json:"values"`
|
||||
}
|
||||
|
||||
type GetCustomRetentionTTLResponse struct {
|
||||
Version string `json:"version"`
|
||||
Status string `json:"status"`
|
||||
|
||||
// V1 fields
|
||||
// LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
|
||||
// LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
|
||||
|
||||
// V2 fields
|
||||
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
|
||||
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
|
||||
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
|
||||
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
|
||||
}
|
||||
|
||||
type CustomRetentionTTLResponse struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type GetTTLParams struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
type ListErrorsParams struct {
|
||||
StartStr string `json:"start"`
|
||||
EndStr string `json:"end"`
|
||||
|
||||
@@ -150,16 +150,6 @@ type RuleResponseItem struct {
|
||||
Data string `json:"data" db:"data"`
|
||||
}
|
||||
|
||||
type TTLStatusItem struct {
|
||||
Id int `json:"id" db:"id"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
TableName string `json:"table_name" db:"table_name"`
|
||||
TTL int `json:"ttl" db:"ttl"`
|
||||
Status string `json:"status" db:"status"`
|
||||
ColdStorageTtl int `json:"cold_storage_ttl" db:"cold_storage_ttl"`
|
||||
}
|
||||
|
||||
type ChannelItem struct {
|
||||
Id int `json:"id" db:"id"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
@@ -462,35 +452,11 @@ type SpanAggregatesDBResponseItem struct {
|
||||
GroupBy string `ch:"groupBy"`
|
||||
}
|
||||
|
||||
type SetTTLResponseItem struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type DiskItem struct {
|
||||
Name string `json:"name,omitempty" ch:"name"`
|
||||
Type string `json:"type,omitempty" ch:"type"`
|
||||
}
|
||||
|
||||
type DBResponseTTL struct {
|
||||
EngineFull string `ch:"engine_full"`
|
||||
}
|
||||
|
||||
type GetTTLResponseItem struct {
|
||||
MetricsTime int `json:"metrics_ttl_duration_hrs,omitempty"`
|
||||
MetricsMoveTime int `json:"metrics_move_ttl_duration_hrs,omitempty"`
|
||||
TracesTime int `json:"traces_ttl_duration_hrs,omitempty"`
|
||||
TracesMoveTime int `json:"traces_move_ttl_duration_hrs,omitempty"`
|
||||
LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
|
||||
LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedMetricsTime int `json:"expected_metrics_ttl_duration_hrs,omitempty"`
|
||||
ExpectedMetricsMoveTime int `json:"expected_metrics_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedTracesTime int `json:"expected_traces_ttl_duration_hrs,omitempty"`
|
||||
ExpectedTracesMoveTime int `json:"expected_traces_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
type DBResponseServiceName struct {
|
||||
ServiceName string `ch:"serviceName"`
|
||||
Count uint64 `ch:"count"`
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/identn"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
@@ -135,6 +136,9 @@ type Config struct {
|
||||
// Auditor config
|
||||
Auditor auditor.Config `mapstructure:"auditor"`
|
||||
|
||||
// MeterReporter config
|
||||
MeterReporter meterreporter.Config `mapstructure:"meterreporter"`
|
||||
|
||||
// CloudIntegration config
|
||||
CloudIntegration cloudintegration.Config `mapstructure:"cloudintegration"`
|
||||
|
||||
@@ -175,6 +179,7 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
|
||||
identn.NewConfigFactory(),
|
||||
serviceaccount.NewConfigFactory(),
|
||||
auditor.NewConfigFactory(),
|
||||
meterreporter.NewConfigFactory(),
|
||||
cloudintegration.NewConfigFactory(),
|
||||
tracedetail.NewConfigFactory(),
|
||||
authz.NewConfigFactory(),
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
@@ -52,7 +53,8 @@ func TestNewHandlers(t *testing.T) {
|
||||
userRoleStore := impluser.NewUserRoleStore(sqlstore, providerSettings)
|
||||
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), userRoleStore, flagger)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, flagger)
|
||||
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, retentionGetter, flagger)
|
||||
|
||||
querierHandler := querier.NewHandler(providerSettings, nil, nil)
|
||||
registryHandler := factory.NewHandler(nil)
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/quickfilter/implquickfilter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport/implrawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory/implrulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/savedview"
|
||||
@@ -63,6 +64,7 @@ type Modules struct {
|
||||
Preference preference.Module
|
||||
UserSetter user.Setter
|
||||
UserGetter user.Getter
|
||||
RetentionGetter retention.Getter
|
||||
SavedView savedview.Module
|
||||
Apdex apdex.Module
|
||||
Dashboard dashboard.Module
|
||||
@@ -103,6 +105,7 @@ func NewModules(
|
||||
userRoleStore authtypes.UserRoleStore,
|
||||
serviceAccount serviceaccount.Module,
|
||||
cloudIntegrationModule cloudintegration.Module,
|
||||
retentionGetter retention.Getter,
|
||||
fl flagger.Flagger,
|
||||
) Modules {
|
||||
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
|
||||
@@ -119,6 +122,7 @@ func NewModules(
|
||||
Dashboard: dashboard,
|
||||
UserSetter: userSetter,
|
||||
UserGetter: userGetter,
|
||||
RetentionGetter: retentionGetter,
|
||||
QuickFilter: quickfilter,
|
||||
TraceFunnel: impltracefunnel.NewModule(impltracefunnel.NewStore(sqlstore)),
|
||||
RawDataExport: implrawdataexport.NewModule(querier),
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
@@ -56,7 +57,8 @@ func TestNewModules(t *testing.T) {
|
||||
|
||||
serviceAccount := implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), nil, nil, nil, providerSettings, serviceaccount.Config{})
|
||||
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), flagger)
|
||||
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), retentionGetter, flagger)
|
||||
|
||||
reflectVal := reflect.ValueOf(modules)
|
||||
for i := 0; i < reflectVal.NumField(); i++ {
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/identn/apikeyidentn"
|
||||
"github.com/SigNoz/signoz/pkg/identn/impersonationidentn"
|
||||
"github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter/noopmeterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
@@ -319,6 +321,12 @@ func NewAuditorProviderFactories() factory.NamedMap[factory.ProviderFactory[audi
|
||||
)
|
||||
}
|
||||
|
||||
func NewMeterReporterProviderFactories() factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
noopmeterreporter.NewFactory(),
|
||||
)
|
||||
}
|
||||
|
||||
func NewFlaggerProviderFactories(registry featuretypes.Registry) factory.NamedMap[factory.ProviderFactory[flagger.FlaggerProvider, flagger.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
configflagger.NewFactory(registry),
|
||||
|
||||
@@ -22,10 +22,13 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/identn"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/meterreporter"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
|
||||
@@ -84,6 +87,7 @@ type SigNoz struct {
|
||||
Flagger flagger.Flagger
|
||||
Gateway gateway.Gateway
|
||||
Auditor auditor.Auditor
|
||||
MeterReporter meterreporter.Reporter
|
||||
}
|
||||
|
||||
func New(
|
||||
@@ -104,6 +108,7 @@ func New(
|
||||
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing) dashboard.Module,
|
||||
gatewayProviderFactory func(licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config],
|
||||
auditorProviderFactories func(licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]],
|
||||
meterReporterProviderFactories func(context.Context, factory.ProviderSettings, flagger.Flagger, licensing.Licensing, telemetrystore.TelemetryStore, retention.Getter, organization.Getter, zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string),
|
||||
querierHandlerCallback func(factory.ProviderSettings, querier.Querier, analytics.Analytics) querier.Handler,
|
||||
cloudIntegrationCallback func(sqlstore.SQLStore, global.Global, zeus.Zeus, gateway.Gateway, licensing.Licensing, serviceaccount.Module, cloudintegration.Config) (cloudintegration.Module, error),
|
||||
rulerProviderFactories func(cache.Cache, alertmanager.Alertmanager, sqlstore.SQLStore, telemetrystore.TelemetryStore, telemetrytypes.MetadataStore, prometheus.Prometheus, organization.Getter, rulestatehistory.Module, querier.Querier, queryparser.QueryParser) factory.NamedMap[factory.ProviderFactory[ruler.Ruler, ruler.Config]],
|
||||
@@ -228,6 +233,8 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
|
||||
|
||||
// Initialize prometheus from the available prometheus provider factories
|
||||
prometheus, err := factory.NewProviderFromNamedMap(
|
||||
ctx,
|
||||
@@ -386,6 +393,13 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize meter reporter from the variant-specific provider factories
|
||||
meterReporterFactories, meterReporterProvider := meterReporterProviderFactories(ctx, providerSettings, flagger, licensing, telemetrystore, retentionGetter, orgGetter, zeus)
|
||||
meterReporter, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.MeterReporter, meterReporterFactories, meterReporterProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize authns
|
||||
store := sqlauthnstore.NewStore(sqlstore)
|
||||
authNs, err := authNsCallback(ctx, providerSettings, store, licensing)
|
||||
@@ -441,7 +455,7 @@ func New(
|
||||
}
|
||||
|
||||
// Initialize all modules
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, flagger)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, retentionGetter, flagger)
|
||||
|
||||
// Initialize ruler from the variant-specific provider factories
|
||||
rulerInstance, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.Ruler, rulerProviderFactories(cache, alertmanager, sqlstore, telemetrystore, telemetryMetadataStore, prometheus, orgGetter, modules.RuleStateHistory, querier, queryParser), "signoz")
|
||||
@@ -501,6 +515,7 @@ func New(
|
||||
factory.NewNamedService(factory.MustNewName("authz"), authz),
|
||||
factory.NewNamedService(factory.MustNewName("user"), userService, factory.MustNewName("authz")),
|
||||
factory.NewNamedService(factory.MustNewName("auditor"), auditor),
|
||||
factory.NewNamedService(factory.MustNewName("meterreporter"), meterReporter, factory.MustNewName("licensing")),
|
||||
factory.NewNamedService(factory.MustNewName("ruler"), rulerInstance),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -550,5 +565,6 @@ func New(
|
||||
Flagger: flagger,
|
||||
Gateway: gateway,
|
||||
Auditor: auditor,
|
||||
MeterReporter: meterReporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
105
pkg/types/inframonitoringtypes/clusters.go
Normal file
105
pkg/types/inframonitoringtypes/clusters.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
type Clusters struct {
|
||||
Type ResponseType `json:"type" required:"true"`
|
||||
Records []ClusterRecord `json:"records" required:"true"`
|
||||
Total int `json:"total" required:"true"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type ClusterRecord struct {
|
||||
// TODO(nikhilmantri0902): once the underlying attr key is migrated to
|
||||
// k8s.cluster.uid (see clusterNameAttrKey TODO in implinframonitoring),
|
||||
// surface ClusterUID alongside (or replace) ClusterName.
|
||||
ClusterName string `json:"clusterName" required:"true"`
|
||||
ClusterCPU float64 `json:"clusterCPU" required:"true"`
|
||||
ClusterCPUAllocatable float64 `json:"clusterCPUAllocatable" required:"true"`
|
||||
ClusterMemory float64 `json:"clusterMemory" required:"true"`
|
||||
ClusterMemoryAllocatable float64 `json:"clusterMemoryAllocatable" required:"true"`
|
||||
NodeCountsByReadiness NodeCountsByReadiness `json:"nodeCountsByReadiness" required:"true"`
|
||||
PodCountsByPhase PodCountsByPhase `json:"podCountsByPhase" required:"true"`
|
||||
Meta map[string]string `json:"meta" required:"true"`
|
||||
}
|
||||
|
||||
// PostableClusters is the request body for the v2 clusters list API.
|
||||
type PostableClusters struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Filter *qbtypes.Filter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
}
|
||||
|
||||
// Validate ensures PostableClusters contains acceptable values.
|
||||
func (req *PostableClusters) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(ClustersValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *PostableClusters) UnmarshalJSON(data []byte) error {
|
||||
type raw PostableClusters
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = PostableClusters(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
15
pkg/types/inframonitoringtypes/clusters_constants.go
Normal file
15
pkg/types/inframonitoringtypes/clusters_constants.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
const (
|
||||
ClustersOrderByCPU = "cpu"
|
||||
ClustersOrderByCPUAllocatable = "cpu_allocatable"
|
||||
ClustersOrderByMemory = "memory"
|
||||
ClustersOrderByMemoryAllocatable = "memory_allocatable"
|
||||
)
|
||||
|
||||
var ClustersValidOrderByKeys = []string{
|
||||
ClustersOrderByCPU,
|
||||
ClustersOrderByCPUAllocatable,
|
||||
ClustersOrderByMemory,
|
||||
ClustersOrderByMemoryAllocatable,
|
||||
}
|
||||
291
pkg/types/inframonitoringtypes/clusters_test.go
Normal file
291
pkg/types/inframonitoringtypes/clusters_test.go
Normal file
@@ -0,0 +1,291 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostableClusters_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostableClusters
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &PostableClusters{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &PostableClusters{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &PostableClusters{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu and direction asc",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: ClustersOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu_allocatable and direction desc",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: ClustersOrderByCPUAllocatable,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key memory and direction desc",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: ClustersOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key memory_allocatable and direction asc",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: ClustersOrderByMemoryAllocatable,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with condition key is rejected",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "condition",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with pod_phase key is rejected",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "pod_phase",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &PostableClusters{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: ClustersOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
12
pkg/types/retentiontypes/store.go
Normal file
12
pkg/types/retentiontypes/store.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package retentiontypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
// ListTTLSettingsByTableNameAndBeforeCreatedAt returns successful TTL settings before the given timestamp.
|
||||
ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx context.Context, orgID valuer.UUID, tableName string, beforeMs int64) ([]*TTLSetting, error)
|
||||
}
|
||||
@@ -1,10 +1,141 @@
|
||||
package retentiontypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
const secondsPerDay = 24 * 60 * 60
|
||||
|
||||
const (
|
||||
DefaultLogsRetentionDays = 15
|
||||
DefaultMetricsRetentionDays = 30
|
||||
DefaultTracesRetentionDays = 15
|
||||
)
|
||||
|
||||
const (
|
||||
TraceTTL = "traces"
|
||||
MetricsTTL = "metrics"
|
||||
LogsTTL = "logs"
|
||||
)
|
||||
|
||||
const (
|
||||
TTLSettingStatusPending = "pending"
|
||||
TTLSettingStatusFailed = "failed"
|
||||
TTLSettingStatusSuccess = "success"
|
||||
)
|
||||
|
||||
// RetentionPolicySegment is a half-open time range using one retention policy.
|
||||
type RetentionPolicySegment struct {
|
||||
StartMs int64
|
||||
EndMs int64
|
||||
Rules []CustomRetentionRule
|
||||
DefaultDays int
|
||||
}
|
||||
|
||||
// NewRetentionPolicySegment creates a retention policy segment for a half-open time range.
|
||||
func NewRetentionPolicySegment(startMs int64, endMs int64, rules []CustomRetentionRule, defaultDays int) *RetentionPolicySegment {
|
||||
return &RetentionPolicySegment{
|
||||
StartMs: startMs,
|
||||
EndMs: endMs,
|
||||
Rules: rules,
|
||||
DefaultDays: defaultDays,
|
||||
}
|
||||
}
|
||||
|
||||
// BuildRetentionPolicySegmentsFromRows converts successful TTL settings into retention policy segments.
|
||||
func BuildRetentionPolicySegmentsFromRows(rows []*TTLSetting, fallbackDefaultDays int, startMs, endMs int64) ([]*RetentionPolicySegment, error) {
|
||||
if startMs >= endMs {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var activeAtStart *TTLSetting
|
||||
inWindow := make([]*TTLSetting, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
rowMs := row.CreatedAt.UnixMilli()
|
||||
if rowMs <= startMs {
|
||||
activeAtStart = row
|
||||
continue
|
||||
}
|
||||
if rowMs >= endMs {
|
||||
continue
|
||||
}
|
||||
inWindow = append(inWindow, row)
|
||||
}
|
||||
|
||||
activeRules, activeDefault, err := parseTTLSetting(activeAtStart, fallbackDefaultDays)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
segments := make([]*RetentionPolicySegment, 0, len(inWindow)+1)
|
||||
cursor := startMs
|
||||
for _, row := range inWindow {
|
||||
rowMs := row.CreatedAt.UnixMilli()
|
||||
if rowMs <= cursor {
|
||||
activeRules, activeDefault, err = parseTTLSetting(row, fallbackDefaultDays)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
segments = append(segments, NewRetentionPolicySegment(cursor, rowMs, activeRules, activeDefault))
|
||||
cursor = rowMs
|
||||
activeRules, activeDefault, err = parseTTLSetting(row, fallbackDefaultDays)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if cursor < endMs {
|
||||
segments = append(segments, NewRetentionPolicySegment(cursor, endMs, activeRules, activeDefault))
|
||||
}
|
||||
|
||||
return segments, nil
|
||||
}
|
||||
|
||||
func parseTTLSetting(row *TTLSetting, fallbackDefaultDays int) ([]CustomRetentionRule, int, error) {
|
||||
if row == nil {
|
||||
return nil, fallbackDefaultDays, nil
|
||||
}
|
||||
|
||||
defaultDays := row.TTL
|
||||
if row.Condition == "" {
|
||||
defaultDays = (row.TTL + secondsPerDay - 1) / secondsPerDay
|
||||
}
|
||||
if defaultDays <= 0 {
|
||||
defaultDays = fallbackDefaultDays
|
||||
}
|
||||
|
||||
if row.Condition == "" {
|
||||
return nil, defaultDays, nil
|
||||
}
|
||||
|
||||
var rules []CustomRetentionRule
|
||||
if err := json.Unmarshal([]byte(row.Condition), &rules); err != nil {
|
||||
return nil, 0, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "parse ttl_setting condition for row %q", row.ID.StringValue())
|
||||
}
|
||||
|
||||
return rules, defaultDays, nil
|
||||
}
|
||||
|
||||
// CustomRetentionRule is one custom retention rule as stored in ttl_setting.condition.
|
||||
// Rules are evaluated in declaration order; the first matching rule wins.
|
||||
type CustomRetentionRule struct {
|
||||
Filters []FilterCondition `json:"conditions"`
|
||||
TTLDays int `json:"ttlDays"`
|
||||
}
|
||||
|
||||
// FilterCondition is one label-key, allowed-values condition inside a retention rule.
|
||||
type FilterCondition struct {
|
||||
Key string `json:"key"`
|
||||
Values []string `json:"values"`
|
||||
}
|
||||
|
||||
type TTLSetting struct {
|
||||
bun.BaseModel `bun:"table:ttl_setting"`
|
||||
types.Identifiable
|
||||
@@ -17,3 +148,73 @@ type TTLSetting struct {
|
||||
OrgID string `json:"-" bun:"org_id,notnull"`
|
||||
Condition string `bun:"condition,type:text"`
|
||||
}
|
||||
|
||||
type TTLParams struct {
|
||||
Type string
|
||||
ColdStorageVolume string
|
||||
ToColdStorageDuration int64
|
||||
DelDuration int64
|
||||
}
|
||||
|
||||
type CustomRetentionTTLParams struct {
|
||||
Type string `json:"type"`
|
||||
DefaultTTLDays int `json:"defaultTTLDays"`
|
||||
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
|
||||
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
|
||||
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
|
||||
}
|
||||
|
||||
type GetTTLParams struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
type GetCustomRetentionTTLResponse struct {
|
||||
Version string `json:"version"`
|
||||
Status string `json:"status"`
|
||||
|
||||
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
|
||||
|
||||
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
|
||||
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
|
||||
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
|
||||
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
|
||||
}
|
||||
|
||||
type CustomRetentionTTLResponse struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type TTLStatusItem struct {
|
||||
Id int `json:"id" db:"id"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
TableName string `json:"table_name" db:"table_name"`
|
||||
TTL int `json:"ttl" db:"ttl"`
|
||||
Status string `json:"status" db:"status"`
|
||||
ColdStorageTtl int `json:"cold_storage_ttl" db:"cold_storage_ttl"`
|
||||
}
|
||||
|
||||
type SetTTLResponseItem struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type DBResponseTTL struct {
|
||||
EngineFull string `ch:"engine_full"`
|
||||
}
|
||||
|
||||
type GetTTLResponseItem struct {
|
||||
MetricsTime int `json:"metrics_ttl_duration_hrs,omitempty"`
|
||||
MetricsMoveTime int `json:"metrics_move_ttl_duration_hrs,omitempty"`
|
||||
TracesTime int `json:"traces_ttl_duration_hrs,omitempty"`
|
||||
TracesMoveTime int `json:"traces_move_ttl_duration_hrs,omitempty"`
|
||||
LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
|
||||
LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedMetricsTime int `json:"expected_metrics_ttl_duration_hrs,omitempty"`
|
||||
ExpectedMetricsMoveTime int `json:"expected_metrics_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedTracesTime int `json:"expected_traces_ttl_duration_hrs,omitempty"`
|
||||
ExpectedTracesMoveTime int `json:"expected_traces_move_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
|
||||
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
83
pkg/types/retentiontypes/ttl_test.go
Normal file
83
pkg/types/retentiontypes/ttl_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package retentiontypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuildRetentionPolicySegmentsFromRows(t *testing.T) {
|
||||
start := time.Date(2026, 5, 4, 0, 0, 0, 0, time.UTC)
|
||||
end := start.AddDate(0, 0, 1)
|
||||
|
||||
ruleA := CustomRetentionRule{
|
||||
Filters: []FilterCondition{{Key: "service.name", Values: []string{"api"}}},
|
||||
TTLDays: 7,
|
||||
}
|
||||
ruleB := CustomRetentionRule{
|
||||
Filters: []FilterCondition{{Key: "env", Values: []string{"prod"}}},
|
||||
TTLDays: 15,
|
||||
}
|
||||
|
||||
t.Run("row before window is active at start", func(t *testing.T) {
|
||||
segments, err := BuildRetentionPolicySegmentsFromRows(
|
||||
[]*TTLSetting{
|
||||
ttlSetting(t, start.Add(-time.Hour), 45, []CustomRetentionRule{ruleA}),
|
||||
},
|
||||
30,
|
||||
start.UnixMilli(),
|
||||
end.UnixMilli(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []*RetentionPolicySegment{
|
||||
NewRetentionPolicySegment(start.UnixMilli(), end.UnixMilli(), []CustomRetentionRule{ruleA}, 45),
|
||||
}, segments)
|
||||
})
|
||||
|
||||
t.Run("row inside window splits segments", func(t *testing.T) {
|
||||
firstChange := start.Add(6 * time.Hour)
|
||||
secondChange := start.Add(18 * time.Hour)
|
||||
|
||||
segments, err := BuildRetentionPolicySegmentsFromRows(
|
||||
[]*TTLSetting{
|
||||
ttlSetting(t, firstChange, 21, []CustomRetentionRule{ruleA}),
|
||||
ttlSetting(t, secondChange, 14, []CustomRetentionRule{ruleB}),
|
||||
},
|
||||
30,
|
||||
start.UnixMilli(),
|
||||
end.UnixMilli(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []*RetentionPolicySegment{
|
||||
NewRetentionPolicySegment(start.UnixMilli(), firstChange.UnixMilli(), nil, 30),
|
||||
NewRetentionPolicySegment(firstChange.UnixMilli(), secondChange.UnixMilli(), []CustomRetentionRule{ruleA}, 21),
|
||||
NewRetentionPolicySegment(secondChange.UnixMilli(), end.UnixMilli(), []CustomRetentionRule{ruleB}, 14),
|
||||
}, segments)
|
||||
})
|
||||
|
||||
t.Run("no rows uses fallback", func(t *testing.T) {
|
||||
segments, err := BuildRetentionPolicySegmentsFromRows(nil, 30, start.UnixMilli(), end.UnixMilli())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []*RetentionPolicySegment{
|
||||
NewRetentionPolicySegment(start.UnixMilli(), end.UnixMilli(), nil, 30),
|
||||
}, segments)
|
||||
})
|
||||
}
|
||||
|
||||
func ttlSetting(t *testing.T, createdAt time.Time, ttlDays int, rules []CustomRetentionRule) *TTLSetting {
|
||||
t.Helper()
|
||||
|
||||
condition, err := json.Marshal(rules)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &TTLSetting{
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: createdAt,
|
||||
},
|
||||
TTL: ttlDays,
|
||||
Condition: string(condition),
|
||||
}
|
||||
}
|
||||
@@ -245,7 +245,6 @@ type Store interface {
|
||||
|
||||
// Service Account Role
|
||||
CreateServiceAccountRole(context.Context, *ServiceAccountRole) error
|
||||
DeleteServiceAccountRoles(context.Context, valuer.UUID) error
|
||||
DeleteServiceAccountRole(context.Context, valuer.UUID, valuer.UUID) error
|
||||
|
||||
// Service Account Factor API Key
|
||||
|
||||
20
pkg/types/zeustypes/attributes.go
Normal file
20
pkg/types/zeustypes/attributes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package zeustypes
|
||||
|
||||
import "go.opentelemetry.io/otel/attribute"
|
||||
|
||||
var (
|
||||
// Identifies the organization.
|
||||
OrganizationID = attribute.Key("signoz.organization.id")
|
||||
|
||||
// Identifies the retention bucket a meter belongs to.
|
||||
RetentionDuration = attribute.Key("signoz.retention.duration")
|
||||
)
|
||||
|
||||
func NewDimensions(kvs ...attribute.KeyValue) map[string]string {
|
||||
dimensions := map[string]string{}
|
||||
for _, kv := range kvs {
|
||||
dimensions[string(kv.Key)] = kv.Value.AsString()
|
||||
}
|
||||
|
||||
return dimensions
|
||||
}
|
||||
29
pkg/types/zeustypes/deployment.go
Normal file
29
pkg/types/zeustypes/deployment.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package zeustypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type GettableDeployment struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Cluster struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Region struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
}
|
||||
|
||||
func NewGettableDeployment(data []byte) (*GettableDeployment, error) {
|
||||
deployment := new(GettableDeployment)
|
||||
err := json.Unmarshal(data, deployment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return deployment, nil
|
||||
}
|
||||
46
pkg/types/zeustypes/host.go
Normal file
46
pkg/types/zeustypes/host.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package zeustypes
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
type Host struct {
|
||||
Name string `json:"name" required:"true"`
|
||||
IsDefault bool `json:"is_default" required:"true"`
|
||||
URL string `json:"url" required:"true"`
|
||||
}
|
||||
|
||||
type GettableHost struct {
|
||||
Name string `json:"name" required:"true"`
|
||||
State string `json:"state" required:"true"`
|
||||
Tier string `json:"tier" required:"true"`
|
||||
Hosts []Host `json:"hosts" required:"true"`
|
||||
}
|
||||
|
||||
type PostableHost struct {
|
||||
Name string `json:"name" required:"true"`
|
||||
}
|
||||
|
||||
func NewGettableHost(data []byte) *GettableHost {
|
||||
parsed := gjson.ParseBytes(data)
|
||||
dns := parsed.Get("cluster.region.dns").String()
|
||||
|
||||
hostResults := parsed.Get("hosts").Array()
|
||||
hosts := make([]Host, len(hostResults))
|
||||
|
||||
for i, h := range hostResults {
|
||||
name := h.Get("name").String()
|
||||
hosts[i].Name = name
|
||||
hosts[i].IsDefault = h.Get("is_default").Bool()
|
||||
hosts[i].URL = (&url.URL{Scheme: "https", Host: name + "." + dns}).String()
|
||||
}
|
||||
|
||||
return &GettableHost{
|
||||
Name: parsed.Get("name").String(),
|
||||
State: parsed.Get("state").String(),
|
||||
Tier: parsed.Get("tier").String(),
|
||||
Hosts: hosts,
|
||||
}
|
||||
}
|
||||
54
pkg/types/zeustypes/meter.go
Normal file
54
pkg/types/zeustypes/meter.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package zeustypes
|
||||
|
||||
import "time"
|
||||
|
||||
type MeterCheckpoint struct {
|
||||
Name string
|
||||
StartDate time.Time
|
||||
}
|
||||
|
||||
type Meter struct {
|
||||
// MeterName is the fully-qualified meter identifier.
|
||||
MeterName MeterName `json:"name"`
|
||||
|
||||
// Value is the aggregated integer scalar for this meter over the reporting window.
|
||||
Value int64 `json:"value"`
|
||||
|
||||
// Unit is the metric unit for this meter.
|
||||
Unit MeterUnit `json:"unit"`
|
||||
|
||||
// Aggregation names the aggregation applied to produce Value.
|
||||
Aggregation MeterAggregation `json:"aggregation"`
|
||||
|
||||
// StartUnixMilli is the inclusive window start in epoch milliseconds.
|
||||
StartUnixMilli int64 `json:"start_unix_milli"`
|
||||
|
||||
// EndUnixMilli is the exclusive window end in epoch milliseconds.
|
||||
EndUnixMilli int64 `json:"end_unix_milli"`
|
||||
|
||||
// IsCompleted is false for the current day's partial value.
|
||||
IsCompleted bool `json:"is_completed"`
|
||||
|
||||
// Dimensions is the per-meter label set.
|
||||
Dimensions map[string]string `json:"dimensions"`
|
||||
}
|
||||
|
||||
func NewMeter(
|
||||
name MeterName,
|
||||
value int64,
|
||||
unit MeterUnit,
|
||||
aggregation MeterAggregation,
|
||||
window MeterWindow,
|
||||
dimensions map[string]string,
|
||||
) Meter {
|
||||
return Meter{
|
||||
MeterName: name,
|
||||
Value: value,
|
||||
Unit: unit,
|
||||
Aggregation: aggregation,
|
||||
StartUnixMilli: window.StartUnixMilli,
|
||||
EndUnixMilli: window.EndUnixMilli,
|
||||
IsCompleted: window.IsCompleted,
|
||||
Dimensions: dimensions,
|
||||
}
|
||||
}
|
||||
12
pkg/types/zeustypes/meter_aggregation.go
Normal file
12
pkg/types/zeustypes/meter_aggregation.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package zeustypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
type MeterAggregation struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
MeterAggregationSum = MeterAggregation{valuer.NewString("sum")}
|
||||
MeterAggregationMax = MeterAggregation{valuer.NewString("max")}
|
||||
)
|
||||
66
pkg/types/zeustypes/meter_name.go
Normal file
66
pkg/types/zeustypes/meter_name.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package zeustypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
MeterSpanSize = MustNewMeterName("signoz.meter.span.size")
|
||||
MeterLogSize = MustNewMeterName("signoz.meter.log.size")
|
||||
MeterDatapointCount = MustNewMeterName("signoz.meter.metric.datapoint.count")
|
||||
MeterPlatformActive = MustNewMeterName("signoz.meter.platform.active")
|
||||
)
|
||||
|
||||
var meterNameRegex = regexp.MustCompile(`^[a-z][a-z0-9_.]+$`)
|
||||
|
||||
// MeterName is a validated dotted Zeus meter name.
|
||||
type MeterName struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func NewMeterName(s string) (MeterName, error) {
|
||||
if !meterNameRegex.MatchString(s) {
|
||||
return MeterName{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid meter name: %s", s)
|
||||
}
|
||||
|
||||
return MeterName{s: s}, nil
|
||||
}
|
||||
|
||||
func MustNewMeterName(s string) MeterName {
|
||||
name, err := NewMeterName(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
func (n MeterName) String() string {
|
||||
return n.s
|
||||
}
|
||||
|
||||
func (n MeterName) IsZero() bool {
|
||||
return n.s == ""
|
||||
}
|
||||
|
||||
func (n MeterName) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(n.s)
|
||||
}
|
||||
|
||||
func (n *MeterName) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name, err := NewMeterName(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*n = name
|
||||
return nil
|
||||
}
|
||||
30
pkg/types/zeustypes/meter_test.go
Normal file
30
pkg/types/zeustypes/meter_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package zeustypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewMeterWindow(t *testing.T) {
|
||||
start := time.Date(2026, 5, 4, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
window, err := NewMeterWindow(start.UnixMilli(), start.AddDate(0, 0, 1).UnixMilli(), true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, start.UnixMilli(), window.StartUnixMilli)
|
||||
require.Equal(t, start.AddDate(0, 0, 1).UnixMilli(), window.EndUnixMilli)
|
||||
require.True(t, window.IsCompleted)
|
||||
|
||||
_, err = NewMeterWindow(0, start.UnixMilli(), true)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = NewMeterWindow(start.UnixMilli(), start.UnixMilli(), false)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMustNewMeterWindowPanicsForInvalidWindow(t *testing.T) {
|
||||
require.Panics(t, func() {
|
||||
MustNewMeterWindow(0, 0, true)
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user