Compare commits

..

5 Commits

Author SHA1 Message Date
srikanthccv
5ad672aa63 chore: update tests 2026-02-24 21:52:07 +05:30
srikanthccv
5be4c5d536 chore: remove support for non v5 version in rules 2026-02-24 21:35:36 +05:30
Vinicius Lourenço
cb1a2a8a13 perf(bundle-size): lazy load pages to reduce main bundle size (#10230)
Some checks are pending
build-staging / prepare (push) Waiting to run
build-staging / js-build (push) Blocked by required conditions
build-staging / go-build (push) Blocked by required conditions
build-staging / staging (push) Blocked by required conditions
Release Drafter / update_release_draft (push) Waiting to run
2026-02-24 10:41:40 +00:00
Nikhil Soni
1a5d37b25a fix: add missing filtering for ip address for scalar data (#10264)
* fix: add missing filtering for ip address for scalar data

In domain listing api for external api monitoring,
we have option to filter out the IP address but
it only handles timeseries and raw type data while
domain list handler returns scalar data.

* fix: switch to new derived attributes for ip filtering

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2026-02-24 10:26:10 +00:00
Piyush Singariya
bc4273f2f8 chore: test clickhouse version 25.12.5 (#10402) 2026-02-24 14:55:51 +05:30
19 changed files with 395 additions and 358 deletions

View File

@@ -54,7 +54,7 @@ jobs:
- sqlite
clickhouse-version:
- 25.5.6
- 25.10.5
- 25.12.5
schema-migrator-version:
- v0.142.0
postgres-version:

View File

@@ -308,3 +308,15 @@ export const PublicDashboardPage = Loadable(
/* webpackChunkName: "Public Dashboard Page" */ 'pages/PublicDashboard'
),
);
export const AlertTypeSelectionPage = Loadable(
() =>
import(
/* webpackChunkName: "Alert Type Selection Page" */ 'pages/AlertTypeSelection'
),
);
export const MeterExplorerPage = Loadable(
() =>
import(/* webpackChunkName: "Meter Explorer Page" */ 'pages/MeterExplorer'),
);

View File

@@ -1,12 +1,10 @@
import { RouteProps } from 'react-router-dom';
import ROUTES from 'constants/routes';
import AlertTypeSelectionPage from 'pages/AlertTypeSelection';
import MessagingQueues from 'pages/MessagingQueues';
import MeterExplorer from 'pages/MeterExplorer';
import {
AlertHistory,
AlertOverview,
AlertTypeSelectionPage,
AllAlertChannels,
AllErrors,
ApiMonitoring,
@@ -29,6 +27,8 @@ import {
LogsExplorer,
LogsIndexToFields,
LogsSaveViews,
MessagingQueuesMainPage,
MeterExplorerPage,
MetricsExplorer,
OldLogsExplorer,
Onboarding,
@@ -399,28 +399,28 @@ const routes: AppRoutes[] = [
{
path: ROUTES.MESSAGING_QUEUES_KAFKA,
exact: true,
component: MessagingQueues,
component: MessagingQueuesMainPage,
key: 'MESSAGING_QUEUES_KAFKA',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_CELERY_TASK,
exact: true,
component: MessagingQueues,
component: MessagingQueuesMainPage,
key: 'MESSAGING_QUEUES_CELERY_TASK',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_OVERVIEW,
exact: true,
component: MessagingQueues,
component: MessagingQueuesMainPage,
key: 'MESSAGING_QUEUES_OVERVIEW',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_KAFKA_DETAIL,
exact: true,
component: MessagingQueues,
component: MessagingQueuesMainPage,
key: 'MESSAGING_QUEUES_KAFKA_DETAIL',
isPrivate: true,
},
@@ -463,21 +463,21 @@ const routes: AppRoutes[] = [
{
path: ROUTES.METER,
exact: true,
component: MeterExplorer,
component: MeterExplorerPage,
key: 'METER',
isPrivate: true,
},
{
path: ROUTES.METER_EXPLORER,
exact: true,
component: MeterExplorer,
component: MeterExplorerPage,
key: 'METER_EXPLORER',
isPrivate: true,
},
{
path: ROUTES.METER_EXPLORER_VIEWS,
exact: true,
component: MeterExplorer,
component: MeterExplorerPage,
key: 'METER_EXPLORER_VIEWS',
isPrivate: true,
},

View File

@@ -39,7 +39,6 @@ export interface HostData {
waitTimeSeries: TimeSeries;
load15: number;
load15TimeSeries: TimeSeries;
filesystem: number;
}
export interface HostListResponse {

View File

@@ -48,7 +48,7 @@
.labels-row,
.values-row {
display: grid;
grid-template-columns: 1fr 1.5fr 1.5fr 1.5fr 1.5fr;
grid-template-columns: 1fr 1.5fr 1.5fr 1.5fr;
gap: 30px;
align-items: center;
}

View File

@@ -426,12 +426,6 @@ function HostMetricsDetails({
>
MEMORY USAGE
</Typography.Text>
<Typography.Text
type="secondary"
className="host-details-metadata-label"
>
DISK USAGE
</Typography.Text>
</div>
<div className="values-row">
@@ -484,23 +478,6 @@ function HostMetricsDetails({
className="progress-bar"
/>
</div>
<div className="progress-container">
<Progress
percent={Number((host.filesystem * 100).toFixed(1))}
size="small"
strokeColor={((): string => {
const filesystemPercent = Number((host.filesystem * 100).toFixed(1));
if (filesystemPercent >= 90) {
return Color.BG_CHERRY_500;
}
if (filesystemPercent >= 60) {
return Color.BG_AMBER_500;
}
return Color.BG_FOREST_500;
})()}
className="progress-bar"
/>
</div>
</div>
</div>
</div>

View File

@@ -24,7 +24,6 @@ describe('InfraMonitoringHosts utils', () => {
active: true,
cpu: 0.95,
memory: 0.85,
filesystem: 0.65,
wait: 0.05,
load15: 2.5,
os: 'linux',
@@ -68,7 +67,6 @@ describe('InfraMonitoringHosts utils', () => {
active: false,
cpu: 0.3,
memory: 0.4,
filesystem: 0.2,
wait: 0.02,
load15: 1.2,
os: 'linux',
@@ -93,7 +91,6 @@ describe('InfraMonitoringHosts utils', () => {
active: true,
cpu: 0.5,
memory: 0.4,
filesystem: 0.5,
wait: 0.01,
load15: 1.0,
os: 'linux',

View File

@@ -29,7 +29,6 @@ export interface HostRowData {
hostName: string;
cpu: React.ReactNode;
memory: React.ReactNode;
filesystem: React.ReactNode;
wait: string;
load15: number;
active: React.ReactNode;
@@ -164,14 +163,6 @@ export const getHostsListColumns = (): ColumnType<HostRowData>[] => [
sorter: true,
align: 'right',
},
{
title: <div className="column-header-right">Disk Usage</div>,
dataIndex: 'filesystem',
key: 'filesystem',
width: 100,
sorter: true,
align: 'right',
},
{
title: <div className="column-header-right">IOWait</div>,
dataIndex: 'wait',
@@ -242,26 +233,6 @@ export const formatDataForTable = (data: HostData[]): HostRowData[] =>
/>
</div>
),
filesystem: (
<div className="progress-container">
<Progress
percent={Number((host.filesystem * 100).toFixed(1))}
strokeLinecap="butt"
size="small"
strokeColor={((): string => {
const filesystemPercent = Number((host.filesystem * 100).toFixed(1));
if (filesystemPercent >= 90) {
return Color.BG_CHERRY_500;
}
if (filesystemPercent >= 60) {
return Color.BG_AMBER_500;
}
return Color.BG_FOREST_500;
})()}
className="progress-bar"
/>
</div>
),
wait: `${Number((host.wait * 100).toFixed(1))}%`,
load15: host.load15,
}));

View File

@@ -1623,9 +1623,6 @@ export const getHostQueryPayload = (
const diskPendingKey = dotMetricsEnabled
? 'system.disk.pending_operations'
: 'system_disk_pending_operations';
const fsUsageKey = dotMetricsEnabled
? 'system.filesystem.usage'
: 'system_filesystem_usage';
return [
{
@@ -2486,143 +2483,6 @@ export const getHostQueryPayload = (
start,
end,
},
{
selectedTime: 'GLOBAL_TIME',
graphType: PANEL_TYPES.TIME_SERIES,
query: {
builder: {
queryData: [
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'system_filesystem_usage--float64--Gauge--true',
key: fsUsageKey,
type: 'Gauge',
},
aggregateOperator: 'avg',
dataSource: DataSource.METRICS,
disabled: true,
expression: 'A',
filters: {
items: [
{
id: 'fs_f1',
key: {
dataType: DataTypes.String,
id: 'host_name--string--tag--false',
key: hostNameKey,
type: 'tag',
},
op: '=',
value: hostName,
},
{
id: 'fs_f2',
key: {
dataType: DataTypes.String,
id: 'state--string--tag--false',
key: 'state',
type: 'tag',
},
op: '=',
value: 'used',
},
],
op: 'AND',
},
functions: [],
groupBy: [
{
dataType: DataTypes.String,
id: 'mountpoint--string--tag--false',
key: 'mountpoint',
type: 'tag',
},
],
having: [],
legend: '{{mountpoint}}',
limit: null,
orderBy: [],
queryName: 'A',
reduceTo: ReduceOperators.AVG,
spaceAggregation: 'sum',
stepInterval: 60,
timeAggregation: 'avg',
},
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'system_filesystem_usage--float64--Gauge--true',
key: fsUsageKey,
type: 'Gauge',
},
aggregateOperator: 'avg',
dataSource: DataSource.METRICS,
disabled: true,
expression: 'B',
filters: {
items: [
{
id: 'fs_f3',
key: {
dataType: DataTypes.String,
id: 'host_name--string--tag--false',
key: hostNameKey,
type: 'tag',
},
op: '=',
value: hostName,
},
],
op: 'AND',
},
functions: [],
groupBy: [
{
dataType: DataTypes.String,
id: 'mountpoint--string--tag--false',
key: 'mountpoint',
type: 'tag',
},
],
having: [],
legend: '{{mountpoint}}',
limit: null,
orderBy: [],
queryName: 'B',
reduceTo: ReduceOperators.AVG,
spaceAggregation: 'sum',
stepInterval: 60,
timeAggregation: 'avg',
},
],
queryFormulas: [
{
disabled: false,
expression: 'A/B',
legend: '{{mountpoint}}',
queryName: 'F1',
},
],
queryTraceOperator: [],
},
clickhouse_sql: [{ disabled: false, legend: '', name: 'A', query: '' }],
id: 'a1b2c3d4-e5f6-7890-abcd-ef1234567890',
promql: [{ disabled: false, legend: '', name: 'A', query: '' }],
queryType: EQueryType.QUERY_BUILDER,
},
variables: {},
formatForWeb: false,
start,
end,
},
{
selectedTime: 'GLOBAL_TIME',
graphType: PANEL_TYPES.TIME_SERIES,
@@ -2771,6 +2631,6 @@ export const hostWidgetInfo = [
{ title: 'Network connections', yAxisUnit: 'short' },
{ title: 'System disk io (bytes transferred)', yAxisUnit: 'bytes' },
{ title: 'System disk operations/s', yAxisUnit: 'short' },
{ title: 'Disk Usage (%) by mountpoint', yAxisUnit: 'percentunit' },
{ title: 'Queue size', yAxisUnit: 'short' },
{ title: 'Disk operations time', yAxisUnit: 's' },
];

View File

@@ -120,6 +120,8 @@ func FilterResponse(results []*qbtypes.QueryRangeResponse) []*qbtypes.QueryRange
}
}
resultData.Rows = filteredRows
case *qbtypes.ScalarData:
resultData.Data = filterScalarDataIPs(resultData.Columns, resultData.Data)
}
filteredData = append(filteredData, result)
@@ -145,6 +147,39 @@ func shouldIncludeSeries(series *qbtypes.TimeSeries) bool {
return true
}
func filterScalarDataIPs(columns []*qbtypes.ColumnDescriptor, data [][]any) [][]any {
// Find column indices for server address fields
serverColIndices := make([]int, 0)
for i, col := range columns {
if col.Name == derivedKeyHTTPHost {
serverColIndices = append(serverColIndices, i)
}
}
if len(serverColIndices) == 0 {
return data
}
filtered := make([][]any, 0, len(data))
for _, row := range data {
includeRow := true
for _, colIdx := range serverColIndices {
if colIdx < len(row) {
if strVal, ok := row[colIdx].(string); ok {
if net.ParseIP(strVal) != nil {
includeRow = false
break
}
}
}
}
if includeRow {
filtered = append(filtered, row)
}
}
return filtered
}
func shouldIncludeRow(row *qbtypes.RawRow) bool {
if row.Data != nil {
if domainVal, ok := row.Data[derivedKeyHTTPHost]; ok {

View File

@@ -117,6 +117,59 @@ func TestFilterResponse(t *testing.T) {
},
},
},
{
name: "should filter out IP addresses from scalar data",
input: []*qbtypes.QueryRangeResponse{
{
Data: qbtypes.QueryData{
Results: []any{
&qbtypes.ScalarData{
QueryName: "endpoints",
Columns: []*qbtypes.ColumnDescriptor{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
Type: qbtypes.ColumnTypeGroup,
},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
Type: qbtypes.ColumnTypeAggregation,
},
},
Data: [][]any{
{"192.168.1.1", 10},
{"example.com", 20},
{"10.0.0.1", 5},
},
},
},
},
},
},
expected: []*qbtypes.QueryRangeResponse{
{
Data: qbtypes.QueryData{
Results: []any{
&qbtypes.ScalarData{
QueryName: "endpoints",
Columns: []*qbtypes.ColumnDescriptor{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
Type: qbtypes.ColumnTypeGroup,
},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
Type: qbtypes.ColumnTypeAggregation,
},
},
Data: [][]any{
{"example.com", 20},
},
},
},
},
},
},
},
}
for _, tt := range tests {

View File

@@ -10,7 +10,6 @@ import (
)
var dotMetricMap = map[string]string{
"system_filesystem_usage": "system.filesystem.usage",
"system_cpu_time": "system.cpu.time",
"system_memory_usage": "system.memory.usage",
"system_cpu_load_average_15m": "system.cpu.load_average.15m",

View File

@@ -53,11 +53,10 @@ var (
}
queryNamesForTopHosts = map[string][]string{
"cpu": {"A", "B", "F1"},
"memory": {"C", "D", "F2"},
"wait": {"E", "F", "F3"},
"load15": {"G"},
"filesystem": {"H", "I", "F4"},
"cpu": {"A", "B", "F1"},
"memory": {"C", "D", "F2"},
"wait": {"E", "F", "F3"},
"load15": {"G"},
}
// TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric
@@ -68,11 +67,10 @@ var (
GetDotMetrics("os_type"),
}
metricNamesForHosts = map[string]string{
"cpu": GetDotMetrics("system_cpu_time"),
"memory": GetDotMetrics("system_memory_usage"),
"load15": GetDotMetrics("system_cpu_load_average_15m"),
"wait": GetDotMetrics("system_cpu_time"),
"filesystem": GetDotMetrics("system_filesystem_usage"),
"cpu": GetDotMetrics("system_cpu_time"),
"memory": GetDotMetrics("system_memory_usage"),
"load15": GetDotMetrics("system_cpu_load_average_15m"),
"wait": GetDotMetrics("system_cpu_time"),
}
)
@@ -496,11 +494,10 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.HostListRecord{
CPU: -1,
Memory: -1,
Wait: -1,
Load15: -1,
Filesystem: -1,
CPU: -1,
Memory: -1,
Wait: -1,
Load15: -1,
}
if hostName, ok := row.Data[hostNameAttrKey].(string); ok {
@@ -519,9 +516,6 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
if load15, ok := row.Data["G"].(float64); ok {
record.Load15 = load15
}
if filesystem, ok := row.Data["F4"].(float64); ok {
record.Filesystem = filesystem
}
record.Meta = map[string]string{}
if _, ok := hostAttrs[record.HostName]; ok {
record.Meta = hostAttrs[record.HostName]

View File

@@ -269,130 +269,42 @@ var HostsTableListQuery = v3.QueryRangeParamsV3{
Items: []v3.FilterItem{},
},
},
"G": {
QueryName: "G",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["load15"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
"G": {
QueryName: "G",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["load15"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "G",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
Legend: "CPU Load Average (15m)",
},
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["filesystem"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Cumulative,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
GroupBy: []v3.AttributeKey{
{
Key: v3.AttributeKey{
Key: "state",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
},
Operator: v3.FilterOperatorEqual,
Value: "used",
},
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "G",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
Legend: "CPU Load Average (15m)",
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "H",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: true,
},
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["filesystem"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Cumulative,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "I",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: true,
},
"F4": {
QueryName: "F4",
Expression: "H/I",
Legend: "Disk Usage (%)",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,

View File

@@ -26,15 +26,14 @@ type HostListRequest struct {
}
type HostListRecord struct {
HostName string `json:"hostName"`
Active bool `json:"active"`
OS string `json:"os"`
CPU float64 `json:"cpu"`
Memory float64 `json:"memory"`
Wait float64 `json:"wait"`
Load15 float64 `json:"load15"`
Filesystem float64 `json:"filesystem"`
Meta map[string]string `json:"meta"`
HostName string `json:"hostName"`
Active bool `json:"active"`
OS string `json:"os"`
CPU float64 `json:"cpu"`
Memory float64 `json:"memory"`
Wait float64 `json:"wait"`
Load15 float64 `json:"load15"`
Meta map[string]string `json:"meta"`
}
type HostListResponse struct {
@@ -65,10 +64,6 @@ func (r *HostListResponse) SortBy(orderBy *v3.OrderBy) {
sort.Slice(r.Records, func(i, j int) bool {
return r.Records[i].Wait > r.Records[j].Wait
})
case "filesystem":
sort.Slice(r.Records, func(i, j int) bool {
return r.Records[i].Filesystem > r.Records[j].Filesystem
})
}
// the default is descending
if orderBy.Order == v3.DirectionAsc {

View File

@@ -169,6 +169,7 @@ func NewSQLMigrationProviderFactories(
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
)
}

View File

@@ -0,0 +1,209 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/transition"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type migrateRulesV4ToV5 struct {
store sqlstore.SQLStore
telemetryStore telemetrystore.TelemetryStore
logger *slog.Logger
}
func NewMigrateRulesV4ToV5Factory(
store sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(
factory.MustNewName("migrate_rules_post_deprecation"),
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return &migrateRulesV4ToV5{
store: store,
telemetryStore: telemetryStore,
logger: ps.Logger,
}, nil
})
}
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT name
FROM (
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
INTERSECT
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
)
ORDER BY name
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT tagKey
FROM signoz_traces.distributed_span_attributes_keys
WHERE tagType IN ('tag', 'resource')
GROUP BY tagKey
HAVING COUNT(DISTINCT tagType) > 1
ORDER BY tagKey
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
logsKeys, err := migration.getLogDuplicateKeys(ctx)
if err != nil {
return err
}
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
if err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
var rules []struct {
ID string `bun:"id"`
Data map[string]any `bun:"data"`
}
err = tx.NewSelect().
Table("rule").
Column("id", "data").
Scan(ctx, &rules)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
count := 0
for _, rule := range rules {
version, _ := rule.Data["version"].(string)
if version == "v5" {
continue
}
if version == "" {
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
}
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
// Check if the queries envelope already exists and is non-empty
hasQueriesEnvelope := false
if condition, ok := rule.Data["condition"].(map[string]any); ok {
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
hasQueriesEnvelope = true
}
}
}
if hasQueriesEnvelope {
// already has queries envelope, just bump version
// this is because user made a mistake of choosing version
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
rule.Data["version"] = "v5"
} else {
// old format, run full migration
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
updated := alertsMigrator.Migrate(ctx, rule.Data)
if !updated {
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
continue
}
rule.Data["version"] = "v5"
}
dataJSON, err := json.Marshal(rule.Data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("rule").
Set("data = ?", string(dataJSON)).
Where("id = ?", rule.ID).
Exec(ctx)
if err != nil {
return err
}
count++
}
if count != 0 {
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
}
return tx.Commit()
}
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -355,6 +355,10 @@ func (r *PostableRule) validate() error {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
}
if r.Version != "v5" {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
}
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
}

View File

@@ -108,6 +108,7 @@ func TestParseIntoRule(t *testing.T) {
"ruleType": "threshold_rule",
"evalWindow": "5m",
"frequency": "1m",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -150,6 +151,7 @@ func TestParseIntoRule(t *testing.T) {
content: []byte(`{
"alert": "DefaultsRule",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -187,6 +189,7 @@ func TestParseIntoRule(t *testing.T) {
initRule: PostableRule{},
content: []byte(`{
"alert": "PromQLRule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "promql",
@@ -256,6 +259,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "SeverityLabelTest",
"schemaVersion": "v1",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -344,6 +348,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "NoLabelsTest",
"schemaVersion": "v1",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -384,6 +389,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "OverwriteTest",
"schemaVersion": "v1",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -474,6 +480,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "V2Test",
"schemaVersion": "v2",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -517,6 +524,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
initRule: PostableRule{},
content: []byte(`{
"alert": "DefaultSchemaTest",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -569,6 +577,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
func TestParseIntoRuleThresholdGeneration(t *testing.T) {
content := []byte(`{
"alert": "TestThresholds",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -639,6 +648,7 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
"schemaVersion": "v2",
"alert": "MultiThresholdAlert",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -732,6 +742,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -766,6 +777,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -799,6 +811,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -833,6 +846,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -866,6 +880,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -901,6 +916,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -935,6 +951,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyOutOfBoundsTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -969,6 +986,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "ThresholdTest",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -1003,6 +1021,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "ThresholdTest",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",