Compare commits

..

11 Commits

Author SHA1 Message Date
Nikhil Mantri
5a9f2b29ce Merge branch 'main' into feat/infra_disk_usage 2026-02-24 14:42:55 +05:30
nikhilmantri0902
1340ce78e0 fix: lint and workflow 2026-02-24 14:12:32 +05:30
Nikhil Mantri
a7497b450c Merge branch 'main' into feat/infra_disk_usage 2026-02-24 13:59:00 +05:30
Nikhil Mantri
c1b83c2bb6 Merge branch 'main' into feat/infra_disk_usage 2026-02-23 14:38:56 +05:30
Nikhil Mantri
449c8b4b8d Merge branch 'main' into feat/infra_disk_usage 2026-02-16 12:37:11 +05:30
Nikhil Mantri
dba06754ce Merge branch 'main' into feat/infra_disk_usage 2026-02-12 13:39:32 +05:30
Nikhil Mantri
7f693afce2 Merge branch 'main' into feat/infra_disk_usage 2026-02-11 14:40:39 +05:30
Nikhil Mantri
89dcb5da97 Merge branch 'main' into feat/infra_disk_usage 2026-02-10 14:17:41 +05:30
nikhilmantri0902
feef6515bf chore: rearrange widgets 2026-02-10 14:14:25 +05:30
nikhilmantri0902
e20eca9b62 chore: removed log 2026-02-10 12:29:29 +05:30
nikhilmantri0902
1332098b7d chore: added disk usage 2026-02-08 17:55:22 +05:30
32 changed files with 400 additions and 676 deletions

View File

@@ -54,7 +54,7 @@ jobs:
- sqlite
clickhouse-version:
- 25.5.6
- 25.12.5
- 25.10.5
schema-migrator-version:
- v0.142.0
postgres-version:

View File

@@ -82,12 +82,6 @@ exporters:
timeout: 45s
sending_queue:
enabled: false
metadataexporter:
cache:
provider: in_memory
dsn: tcp://clickhouse:9000/signoz_metadata
enabled: true
timeout: 45s
service:
telemetry:
logs:
@@ -99,19 +93,19 @@ service:
traces:
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces, metadataexporter, signozmeter]
exporters: [clickhousetraces, signozmeter]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
exporters: [signozclickhousemetrics, signozmeter]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
exporters: [signozclickhousemetrics, signozmeter]
logs:
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter, metadataexporter, signozmeter]
exporters: [clickhouselogsexporter, signozmeter]
metrics/meter:
receivers: [signozmeter]
processors: [batch/meter]

View File

@@ -82,12 +82,6 @@ exporters:
timeout: 45s
sending_queue:
enabled: false
metadataexporter:
cache:
provider: in_memory
dsn: tcp://clickhouse:9000/signoz_metadata
enabled: true
timeout: 45s
service:
telemetry:
logs:
@@ -99,19 +93,19 @@ service:
traces:
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces, metadataexporter, signozmeter]
exporters: [clickhousetraces, signozmeter]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
exporters: [signozclickhousemetrics, signozmeter]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
exporters: [signozclickhousemetrics, signozmeter]
logs:
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter, metadataexporter, signozmeter]
exporters: [clickhouselogsexporter, signozmeter]
metrics/meter:
receivers: [signozmeter]
processors: [batch/meter]

View File

@@ -308,15 +308,3 @@ export const PublicDashboardPage = Loadable(
/* webpackChunkName: "Public Dashboard Page" */ 'pages/PublicDashboard'
),
);
export const AlertTypeSelectionPage = Loadable(
() =>
import(
/* webpackChunkName: "Alert Type Selection Page" */ 'pages/AlertTypeSelection'
),
);
export const MeterExplorerPage = Loadable(
() =>
import(/* webpackChunkName: "Meter Explorer Page" */ 'pages/MeterExplorer'),
);

View File

@@ -1,10 +1,12 @@
import { RouteProps } from 'react-router-dom';
import ROUTES from 'constants/routes';
import AlertTypeSelectionPage from 'pages/AlertTypeSelection';
import MessagingQueues from 'pages/MessagingQueues';
import MeterExplorer from 'pages/MeterExplorer';
import {
AlertHistory,
AlertOverview,
AlertTypeSelectionPage,
AllAlertChannels,
AllErrors,
ApiMonitoring,
@@ -27,8 +29,6 @@ import {
LogsExplorer,
LogsIndexToFields,
LogsSaveViews,
MessagingQueuesMainPage,
MeterExplorerPage,
MetricsExplorer,
OldLogsExplorer,
Onboarding,
@@ -399,28 +399,28 @@ const routes: AppRoutes[] = [
{
path: ROUTES.MESSAGING_QUEUES_KAFKA,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_KAFKA',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_CELERY_TASK,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_CELERY_TASK',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_OVERVIEW,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_OVERVIEW',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_KAFKA_DETAIL,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_KAFKA_DETAIL',
isPrivate: true,
},
@@ -463,21 +463,21 @@ const routes: AppRoutes[] = [
{
path: ROUTES.METER,
exact: true,
component: MeterExplorerPage,
component: MeterExplorer,
key: 'METER',
isPrivate: true,
},
{
path: ROUTES.METER_EXPLORER,
exact: true,
component: MeterExplorerPage,
component: MeterExplorer,
key: 'METER_EXPLORER',
isPrivate: true,
},
{
path: ROUTES.METER_EXPLORER_VIEWS,
exact: true,
component: MeterExplorerPage,
component: MeterExplorer,
key: 'METER_EXPLORER_VIEWS',
isPrivate: true,
},

View File

@@ -39,6 +39,7 @@ export interface HostData {
waitTimeSeries: TimeSeries;
load15: number;
load15TimeSeries: TimeSeries;
filesystem: number;
}
export interface HostListResponse {

View File

@@ -48,7 +48,7 @@
.labels-row,
.values-row {
display: grid;
grid-template-columns: 1fr 1.5fr 1.5fr 1.5fr;
grid-template-columns: 1fr 1.5fr 1.5fr 1.5fr 1.5fr;
gap: 30px;
align-items: center;
}

View File

@@ -426,6 +426,12 @@ function HostMetricsDetails({
>
MEMORY USAGE
</Typography.Text>
<Typography.Text
type="secondary"
className="host-details-metadata-label"
>
DISK USAGE
</Typography.Text>
</div>
<div className="values-row">
@@ -478,6 +484,23 @@ function HostMetricsDetails({
className="progress-bar"
/>
</div>
<div className="progress-container">
<Progress
percent={Number((host.filesystem * 100).toFixed(1))}
size="small"
strokeColor={((): string => {
const filesystemPercent = Number((host.filesystem * 100).toFixed(1));
if (filesystemPercent >= 90) {
return Color.BG_CHERRY_500;
}
if (filesystemPercent >= 60) {
return Color.BG_AMBER_500;
}
return Color.BG_FOREST_500;
})()}
className="progress-bar"
/>
</div>
</div>
</div>
</div>

View File

@@ -21,7 +21,7 @@ export function getDefaultCellStyle(isDarkMode?: boolean): CSSProperties {
export const defaultTableStyle: CSSProperties = {
minWidth: '40rem',
maxWidth: '90rem',
maxWidth: '60rem',
};
export const defaultListViewPanelStyle: CSSProperties = {

View File

@@ -24,6 +24,7 @@ describe('InfraMonitoringHosts utils', () => {
active: true,
cpu: 0.95,
memory: 0.85,
filesystem: 0.65,
wait: 0.05,
load15: 2.5,
os: 'linux',
@@ -67,6 +68,7 @@ describe('InfraMonitoringHosts utils', () => {
active: false,
cpu: 0.3,
memory: 0.4,
filesystem: 0.2,
wait: 0.02,
load15: 1.2,
os: 'linux',
@@ -91,6 +93,7 @@ describe('InfraMonitoringHosts utils', () => {
active: true,
cpu: 0.5,
memory: 0.4,
filesystem: 0.5,
wait: 0.01,
load15: 1.0,
os: 'linux',

View File

@@ -29,6 +29,7 @@ export interface HostRowData {
hostName: string;
cpu: React.ReactNode;
memory: React.ReactNode;
filesystem: React.ReactNode;
wait: string;
load15: number;
active: React.ReactNode;
@@ -163,6 +164,14 @@ export const getHostsListColumns = (): ColumnType<HostRowData>[] => [
sorter: true,
align: 'right',
},
{
title: <div className="column-header-right">Disk Usage</div>,
dataIndex: 'filesystem',
key: 'filesystem',
width: 100,
sorter: true,
align: 'right',
},
{
title: <div className="column-header-right">IOWait</div>,
dataIndex: 'wait',
@@ -233,6 +242,26 @@ export const formatDataForTable = (data: HostData[]): HostRowData[] =>
/>
</div>
),
filesystem: (
<div className="progress-container">
<Progress
percent={Number((host.filesystem * 100).toFixed(1))}
strokeLinecap="butt"
size="small"
strokeColor={((): string => {
const filesystemPercent = Number((host.filesystem * 100).toFixed(1));
if (filesystemPercent >= 90) {
return Color.BG_CHERRY_500;
}
if (filesystemPercent >= 60) {
return Color.BG_AMBER_500;
}
return Color.BG_FOREST_500;
})()}
className="progress-bar"
/>
</div>
),
wait: `${Number((host.wait * 100).toFixed(1))}%`,
load15: host.load15,
}));

View File

@@ -1623,6 +1623,9 @@ export const getHostQueryPayload = (
const diskPendingKey = dotMetricsEnabled
? 'system.disk.pending_operations'
: 'system_disk_pending_operations';
const fsUsageKey = dotMetricsEnabled
? 'system.filesystem.usage'
: 'system_filesystem_usage';
return [
{
@@ -2483,6 +2486,143 @@ export const getHostQueryPayload = (
start,
end,
},
{
selectedTime: 'GLOBAL_TIME',
graphType: PANEL_TYPES.TIME_SERIES,
query: {
builder: {
queryData: [
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'system_filesystem_usage--float64--Gauge--true',
key: fsUsageKey,
type: 'Gauge',
},
aggregateOperator: 'avg',
dataSource: DataSource.METRICS,
disabled: true,
expression: 'A',
filters: {
items: [
{
id: 'fs_f1',
key: {
dataType: DataTypes.String,
id: 'host_name--string--tag--false',
key: hostNameKey,
type: 'tag',
},
op: '=',
value: hostName,
},
{
id: 'fs_f2',
key: {
dataType: DataTypes.String,
id: 'state--string--tag--false',
key: 'state',
type: 'tag',
},
op: '=',
value: 'used',
},
],
op: 'AND',
},
functions: [],
groupBy: [
{
dataType: DataTypes.String,
id: 'mountpoint--string--tag--false',
key: 'mountpoint',
type: 'tag',
},
],
having: [],
legend: '{{mountpoint}}',
limit: null,
orderBy: [],
queryName: 'A',
reduceTo: ReduceOperators.AVG,
spaceAggregation: 'sum',
stepInterval: 60,
timeAggregation: 'avg',
},
{
aggregateAttribute: {
dataType: DataTypes.Float64,
id: 'system_filesystem_usage--float64--Gauge--true',
key: fsUsageKey,
type: 'Gauge',
},
aggregateOperator: 'avg',
dataSource: DataSource.METRICS,
disabled: true,
expression: 'B',
filters: {
items: [
{
id: 'fs_f3',
key: {
dataType: DataTypes.String,
id: 'host_name--string--tag--false',
key: hostNameKey,
type: 'tag',
},
op: '=',
value: hostName,
},
],
op: 'AND',
},
functions: [],
groupBy: [
{
dataType: DataTypes.String,
id: 'mountpoint--string--tag--false',
key: 'mountpoint',
type: 'tag',
},
],
having: [],
legend: '{{mountpoint}}',
limit: null,
orderBy: [],
queryName: 'B',
reduceTo: ReduceOperators.AVG,
spaceAggregation: 'sum',
stepInterval: 60,
timeAggregation: 'avg',
},
],
queryFormulas: [
{
disabled: false,
expression: 'A/B',
legend: '{{mountpoint}}',
queryName: 'F1',
},
],
queryTraceOperator: [],
},
clickhouse_sql: [{ disabled: false, legend: '', name: 'A', query: '' }],
id: 'a1b2c3d4-e5f6-7890-abcd-ef1234567890',
promql: [{ disabled: false, legend: '', name: 'A', query: '' }],
queryType: EQueryType.QUERY_BUILDER,
},
variables: {},
formatForWeb: false,
start,
end,
},
{
selectedTime: 'GLOBAL_TIME',
graphType: PANEL_TYPES.TIME_SERIES,
@@ -2631,6 +2771,6 @@ export const hostWidgetInfo = [
{ title: 'Network connections', yAxisUnit: 'short' },
{ title: 'System disk io (bytes transferred)', yAxisUnit: 'bytes' },
{ title: 'System disk operations/s', yAxisUnit: 'short' },
{ title: 'Disk Usage (%) by mountpoint', yAxisUnit: 'percentunit' },
{ title: 'Queue size', yAxisUnit: 'short' },
{ title: 'Disk operations time', yAxisUnit: 's' },
];

View File

@@ -1,7 +1,6 @@
import { ReactNode, useCallback, useEffect, useRef, useState } from 'react';
import { ReactNode, useState } from 'react';
import MEditor, { EditorProps, Monaco } from '@monaco-editor/react';
import { Color } from '@signozhq/design-tokens';
import type { InputRef } from 'antd';
import {
Button,
Collapse,
@@ -47,23 +46,12 @@ function Overview({
handleChangeSelectedView,
}: Props): JSX.Element {
const [isWrapWord, setIsWrapWord] = useState<boolean>(true);
const [isSearchVisible, setIsSearchVisible] = useState<boolean>(true);
const [isSearchVisible, setIsSearchVisible] = useState<boolean>(false);
const [isAttributesExpanded, setIsAttributesExpanded] = useState<boolean>(
true,
);
const [fieldSearchInput, setFieldSearchInput] = useState<string>('');
const focusTimerRef = useRef<ReturnType<typeof setTimeout>>();
const searchInputRef = useCallback((node: InputRef | null) => {
clearTimeout(focusTimerRef.current);
if (node) {
focusTimerRef.current = setTimeout(() => node.focus(), 100);
}
}, []);
useEffect(() => (): void => clearTimeout(focusTimerRef.current), []);
const isDarkMode = useIsDarkMode();
const options: EditorProps['options'] = {
@@ -208,7 +196,7 @@ function Overview({
<>
{isSearchVisible && (
<Input
ref={searchInputRef}
autoFocus
placeholder="Search for a field..."
className="search-input"
value={fieldSearchInput}

View File

@@ -1,34 +0,0 @@
import { Color } from '@signozhq/design-tokens';
import { getColorsForSeverityLabels, isRedLike } from '../utils';
describe('getColorsForSeverityLabels', () => {
it('should return slate for blank labels', () => {
expect(getColorsForSeverityLabels('', 0)).toBe(Color.BG_SLATE_300);
expect(getColorsForSeverityLabels(' ', 0)).toBe(Color.BG_SLATE_300);
});
it('should return correct colors for known severity variants', () => {
expect(getColorsForSeverityLabels('INFO', 0)).toBe(Color.BG_ROBIN_600);
expect(getColorsForSeverityLabels('ERROR', 0)).toBe(Color.BG_CHERRY_600);
expect(getColorsForSeverityLabels('WARN', 0)).toBe(Color.BG_AMBER_600);
expect(getColorsForSeverityLabels('DEBUG', 0)).toBe(Color.BG_AQUA_600);
expect(getColorsForSeverityLabels('TRACE', 0)).toBe(Color.BG_FOREST_600);
expect(getColorsForSeverityLabels('FATAL', 0)).toBe(Color.BG_SAKURA_600);
});
it('should return non-red colors for unrecognized labels at any index', () => {
for (let i = 0; i < 30; i++) {
const color = getColorsForSeverityLabels('4', i);
expect(isRedLike(color)).toBe(false);
}
});
it('should return non-red colors for numeric severity text', () => {
const numericLabels = ['1', '2', '4', '9', '13', '17', '21'];
numericLabels.forEach((label) => {
const color = getColorsForSeverityLabels(label, 0);
expect(isRedLike(color)).toBe(false);
});
});
});

View File

@@ -1,16 +1,7 @@
import { Color } from '@signozhq/design-tokens';
import { themeColors } from 'constants/theme';
import { colors } from 'lib/getRandomColor';
// Function to determine if a color is "red-like" based on its RGB values
export function isRedLike(hex: string): boolean {
const r = parseInt(hex.slice(1, 3), 16);
const g = parseInt(hex.slice(3, 5), 16);
const b = parseInt(hex.slice(5, 7), 16);
return r > 180 && r > g * 1.4 && r > b * 1.4;
}
const SAFE_FALLBACK_COLORS = colors.filter((c) => !isRedLike(c));
const SEVERITY_VARIANT_COLORS: Record<string, string> = {
TRACE: Color.BG_FOREST_600,
Trace: Color.BG_FOREST_500,
@@ -76,13 +67,8 @@ export function getColorsForSeverityLabels(
label: string,
index: number,
): string {
const trimmed = label.trim();
if (!trimmed) {
return Color.BG_SLATE_300;
}
const variantColor = SEVERITY_VARIANT_COLORS[trimmed];
// Check if we have a direct mapping for this severity variant
const variantColor = SEVERITY_VARIANT_COLORS[label.trim()];
if (variantColor) {
return variantColor;
}
@@ -117,8 +103,5 @@ export function getColorsForSeverityLabels(
return Color.BG_SAKURA_500;
}
return (
SAFE_FALLBACK_COLORS[index % SAFE_FALLBACK_COLORS.length] ||
Color.BG_SLATE_400
);
return colors[index % colors.length] || themeColors.red;
}

View File

@@ -111,19 +111,23 @@ const InfinityTable = forwardRef<TableVirtuosoHandle, InfinityTableProps>(
);
const itemContent = useCallback(
(index: number, log: Record<string, unknown>): JSX.Element => (
<TableRow
tableColumns={tableColumns}
index={index}
log={log}
logs={tableViewProps.logs}
hasActions
fontSize={tableViewProps.fontSize}
onShowLogDetails={onSetActiveLog}
isActiveLog={activeLog?.id === log.id}
onClearActiveLog={onCloseActiveLog}
/>
),
(index: number, log: Record<string, unknown>): JSX.Element => {
return (
<div key={log.id as string}>
<TableRow
tableColumns={tableColumns}
index={index}
log={log}
logs={tableViewProps.logs}
hasActions
fontSize={tableViewProps.fontSize}
onShowLogDetails={onSetActiveLog}
isActiveLog={activeLog?.id === log.id}
onClearActiveLog={onCloseActiveLog}
/>
</div>
);
},
[
tableColumns,
onSetActiveLog,
@@ -139,8 +143,7 @@ const InfinityTable = forwardRef<TableVirtuosoHandle, InfinityTableProps>(
{tableColumns
.filter((column) => column.key)
.map((column) => {
const isDragColumn =
column.key !== 'expand' && column.key !== 'state-indicator';
const isDragColumn = column.key !== 'expand';
return (
<TableHeaderCellStyled

View File

@@ -1,94 +0,0 @@
import { act, renderHook } from '@testing-library/react';
import { useActiveLog } from 'hooks/logs/useActiveLog';
import { useIsTextSelected } from 'hooks/useIsTextSelected';
import { ILog } from 'types/api/logs/log';
import useLogDetailHandlers from '../useLogDetailHandlers';
jest.mock('hooks/logs/useActiveLog');
jest.mock('hooks/useIsTextSelected');
const mockOnSetActiveLog = jest.fn();
const mockOnClearActiveLog = jest.fn();
const mockOnAddToQuery = jest.fn();
const mockOnGroupByAttribute = jest.fn();
const mockIsTextSelected = jest.fn();
const mockLog: ILog = {
id: 'log-1',
timestamp: '2024-01-01T00:00:00Z',
date: '2024-01-01',
body: 'test log body',
severityText: 'INFO',
severityNumber: 9,
traceFlags: 0,
traceId: '',
spanID: '',
attributesString: {},
attributesInt: {},
attributesFloat: {},
resources_string: {},
scope_string: {},
attributes_string: {},
severity_text: '',
severity_number: 0,
};
beforeEach(() => {
jest.clearAllMocks();
jest.mocked(useIsTextSelected).mockReturnValue(mockIsTextSelected);
jest.mocked(useActiveLog).mockReturnValue({
activeLog: null,
onSetActiveLog: mockOnSetActiveLog,
onClearActiveLog: mockOnClearActiveLog,
onAddToQuery: mockOnAddToQuery,
onGroupByAttribute: mockOnGroupByAttribute,
});
});
it('should not open log detail when text is selected', () => {
mockIsTextSelected.mockReturnValue(true);
const { result } = renderHook(() => useLogDetailHandlers());
act(() => {
result.current.handleSetActiveLog(mockLog);
});
expect(mockOnSetActiveLog).not.toHaveBeenCalled();
});
it('should open log detail when no text is selected', () => {
mockIsTextSelected.mockReturnValue(false);
const { result } = renderHook(() => useLogDetailHandlers());
act(() => {
result.current.handleSetActiveLog(mockLog);
});
expect(mockOnSetActiveLog).toHaveBeenCalledWith(mockLog);
});
it('should toggle off when clicking the same active log', () => {
mockIsTextSelected.mockReturnValue(false);
jest.mocked(useActiveLog).mockReturnValue({
activeLog: mockLog,
onSetActiveLog: mockOnSetActiveLog,
onClearActiveLog: mockOnClearActiveLog,
onAddToQuery: mockOnAddToQuery,
onGroupByAttribute: mockOnGroupByAttribute,
});
const { result } = renderHook(() => useLogDetailHandlers());
act(() => {
result.current.handleSetActiveLog(mockLog);
});
expect(mockOnClearActiveLog).toHaveBeenCalled();
expect(mockOnSetActiveLog).not.toHaveBeenCalled();
});

View File

@@ -2,7 +2,6 @@ import { useCallback, useState } from 'react';
import { VIEW_TYPES } from 'components/LogDetail/constants';
import type { UseActiveLog } from 'hooks/logs/types';
import { useActiveLog } from 'hooks/logs/useActiveLog';
import { useIsTextSelected } from 'hooks/useIsTextSelected';
import { ILog } from 'types/api/logs/log';
type SelectedTab = typeof VIEW_TYPES[keyof typeof VIEW_TYPES] | undefined;
@@ -29,13 +28,9 @@ function useLogDetailHandlers({
onAddToQuery,
} = useActiveLog();
const [selectedTab, setSelectedTab] = useState<SelectedTab>(defaultTab);
const isTextSelected = useIsTextSelected();
const handleSetActiveLog = useCallback(
(log: ILog, nextTab: SelectedTab = defaultTab): void => {
if (isTextSelected()) {
return;
}
if (activeLog?.id === log.id) {
onClearActiveLog();
setSelectedTab(undefined);
@@ -44,7 +39,7 @@ function useLogDetailHandlers({
onSetActiveLog(log);
setSelectedTab(nextTab ?? defaultTab);
},
[activeLog?.id, defaultTab, onClearActiveLog, onSetActiveLog, isTextSelected],
[activeLog?.id, defaultTab, onClearActiveLog, onSetActiveLog],
);
const handleCloseLogDetail = useCallback((): void => {

View File

@@ -1,10 +0,0 @@
import { useCallback } from 'react';
export function useIsTextSelected(): () => boolean {
return useCallback((): boolean => {
const selection = window.getSelection();
return (
!!selection && !selection.isCollapsed && selection.toString().length > 0
);
}, []);
}

View File

@@ -120,8 +120,6 @@ func FilterResponse(results []*qbtypes.QueryRangeResponse) []*qbtypes.QueryRange
}
}
resultData.Rows = filteredRows
case *qbtypes.ScalarData:
resultData.Data = filterScalarDataIPs(resultData.Columns, resultData.Data)
}
filteredData = append(filteredData, result)
@@ -147,39 +145,6 @@ func shouldIncludeSeries(series *qbtypes.TimeSeries) bool {
return true
}
func filterScalarDataIPs(columns []*qbtypes.ColumnDescriptor, data [][]any) [][]any {
// Find column indices for server address fields
serverColIndices := make([]int, 0)
for i, col := range columns {
if col.Name == derivedKeyHTTPHost {
serverColIndices = append(serverColIndices, i)
}
}
if len(serverColIndices) == 0 {
return data
}
filtered := make([][]any, 0, len(data))
for _, row := range data {
includeRow := true
for _, colIdx := range serverColIndices {
if colIdx < len(row) {
if strVal, ok := row[colIdx].(string); ok {
if net.ParseIP(strVal) != nil {
includeRow = false
break
}
}
}
}
if includeRow {
filtered = append(filtered, row)
}
}
return filtered
}
func shouldIncludeRow(row *qbtypes.RawRow) bool {
if row.Data != nil {
if domainVal, ok := row.Data[derivedKeyHTTPHost]; ok {

View File

@@ -117,59 +117,6 @@ func TestFilterResponse(t *testing.T) {
},
},
},
{
name: "should filter out IP addresses from scalar data",
input: []*qbtypes.QueryRangeResponse{
{
Data: qbtypes.QueryData{
Results: []any{
&qbtypes.ScalarData{
QueryName: "endpoints",
Columns: []*qbtypes.ColumnDescriptor{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
Type: qbtypes.ColumnTypeGroup,
},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
Type: qbtypes.ColumnTypeAggregation,
},
},
Data: [][]any{
{"192.168.1.1", 10},
{"example.com", 20},
{"10.0.0.1", 5},
},
},
},
},
},
},
expected: []*qbtypes.QueryRangeResponse{
{
Data: qbtypes.QueryData{
Results: []any{
&qbtypes.ScalarData{
QueryName: "endpoints",
Columns: []*qbtypes.ColumnDescriptor{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
Type: qbtypes.ColumnTypeGroup,
},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
Type: qbtypes.ColumnTypeAggregation,
},
},
Data: [][]any{
{"example.com", 20},
},
},
},
},
},
},
},
}
for _, tt := range tests {

View File

@@ -22,8 +22,7 @@ type RootConfig struct {
}
type OrgConfig struct {
ID valuer.UUID `mapstructure:"id"`
Name string `mapstructure:"name"`
Name string `mapstructure:"name"`
}
type PasswordConfig struct {

View File

@@ -78,48 +78,6 @@ func (s *service) Stop(ctx context.Context) error {
}
func (s *service) reconcile(ctx context.Context) error {
if !s.config.Org.ID.IsZero() {
return s.reconcileWithOrgID(ctx)
}
return s.reconcileByName(ctx)
}
func (s *service) reconcileWithOrgID(ctx context.Context) error {
org, err := s.orgGetter.Get(ctx, s.config.Org.ID)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
return err // something really went wrong
}
// org was not found using id
// check if we can find an org using name
existingOrgByName, nameErr := s.orgGetter.GetByName(ctx, s.config.Org.Name)
if nameErr != nil && !errors.Ast(nameErr, errors.TypeNotFound) {
return nameErr // something really went wrong
}
// we found an org using name
if existingOrgByName != nil {
// the existing org has the same name as config but org id is different
// inform user with actionable message
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput,
"organization with name %q already exists with a different ID %s (expected %s)",
s.config.Org.Name, existingOrgByName.ID.StringValue(), s.config.Org.ID.StringValue(),
)
}
// default - we did not found any org using id and name both - create a new org
newOrg := types.NewOrganizationWithID(s.config.Org.ID, s.config.Org.Name, s.config.Org.Name)
_, err = s.module.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
return err
}
return s.reconcileRootUser(ctx, org.ID)
}
func (s *service) reconcileByName(ctx context.Context) error {
org, err := s.orgGetter.GetByName(ctx, s.config.Org.Name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {

View File

@@ -10,6 +10,7 @@ import (
)
var dotMetricMap = map[string]string{
"system_filesystem_usage": "system.filesystem.usage",
"system_cpu_time": "system.cpu.time",
"system_memory_usage": "system.memory.usage",
"system_cpu_load_average_15m": "system.cpu.load_average.15m",

View File

@@ -53,10 +53,11 @@ var (
}
queryNamesForTopHosts = map[string][]string{
"cpu": {"A", "B", "F1"},
"memory": {"C", "D", "F2"},
"wait": {"E", "F", "F3"},
"load15": {"G"},
"cpu": {"A", "B", "F1"},
"memory": {"C", "D", "F2"},
"wait": {"E", "F", "F3"},
"load15": {"G"},
"filesystem": {"H", "I", "F4"},
}
// TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric
@@ -67,10 +68,11 @@ var (
GetDotMetrics("os_type"),
}
metricNamesForHosts = map[string]string{
"cpu": GetDotMetrics("system_cpu_time"),
"memory": GetDotMetrics("system_memory_usage"),
"load15": GetDotMetrics("system_cpu_load_average_15m"),
"wait": GetDotMetrics("system_cpu_time"),
"cpu": GetDotMetrics("system_cpu_time"),
"memory": GetDotMetrics("system_memory_usage"),
"load15": GetDotMetrics("system_cpu_load_average_15m"),
"wait": GetDotMetrics("system_cpu_time"),
"filesystem": GetDotMetrics("system_filesystem_usage"),
}
)
@@ -494,10 +496,11 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.HostListRecord{
CPU: -1,
Memory: -1,
Wait: -1,
Load15: -1,
CPU: -1,
Memory: -1,
Wait: -1,
Load15: -1,
Filesystem: -1,
}
if hostName, ok := row.Data[hostNameAttrKey].(string); ok {
@@ -516,6 +519,9 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
if load15, ok := row.Data["G"].(float64); ok {
record.Load15 = load15
}
if filesystem, ok := row.Data["F4"].(float64); ok {
record.Filesystem = filesystem
}
record.Meta = map[string]string{}
if _, ok := hostAttrs[record.HostName]; ok {
record.Meta = hostAttrs[record.HostName]

View File

@@ -269,42 +269,130 @@ var HostsTableListQuery = v3.QueryRangeParamsV3{
Items: []v3.FilterItem{},
},
},
"G": {
QueryName: "G",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["load15"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "G",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
Legend: "CPU Load Average (15m)",
"G": {
QueryName: "G",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["load15"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "G",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
Legend: "CPU Load Average (15m)",
},
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["filesystem"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Cumulative,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "state",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
},
Operator: v3.FilterOperatorEqual,
Value: "used",
},
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "H",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: true,
},
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForHosts["filesystem"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Cumulative,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotContains,
Value: agentNameToIgnore,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: hostNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "I",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: true,
},
"F4": {
QueryName: "F4",
Expression: "H/I",
Legend: "Disk Usage (%)",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,

View File

@@ -26,14 +26,15 @@ type HostListRequest struct {
}
type HostListRecord struct {
HostName string `json:"hostName"`
Active bool `json:"active"`
OS string `json:"os"`
CPU float64 `json:"cpu"`
Memory float64 `json:"memory"`
Wait float64 `json:"wait"`
Load15 float64 `json:"load15"`
Meta map[string]string `json:"meta"`
HostName string `json:"hostName"`
Active bool `json:"active"`
OS string `json:"os"`
CPU float64 `json:"cpu"`
Memory float64 `json:"memory"`
Wait float64 `json:"wait"`
Load15 float64 `json:"load15"`
Filesystem float64 `json:"filesystem"`
Meta map[string]string `json:"meta"`
}
type HostListResponse struct {
@@ -64,6 +65,10 @@ func (r *HostListResponse) SortBy(orderBy *v3.OrderBy) {
sort.Slice(r.Records, func(i, j int) bool {
return r.Records[i].Wait > r.Records[j].Wait
})
case "filesystem":
sort.Slice(r.Records, func(i, j int) bool {
return r.Records[i].Filesystem > r.Records[j].Filesystem
})
}
// the default is descending
if orderBy.Order == v3.DirectionAsc {

View File

@@ -169,7 +169,6 @@ func NewSQLMigrationProviderFactories(
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
)
}

View File

@@ -1,209 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/transition"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type migrateRulesV4ToV5 struct {
store sqlstore.SQLStore
telemetryStore telemetrystore.TelemetryStore
logger *slog.Logger
}
func NewMigrateRulesV4ToV5Factory(
store sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(
factory.MustNewName("migrate_rules_post_deprecation"),
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return &migrateRulesV4ToV5{
store: store,
telemetryStore: telemetryStore,
logger: ps.Logger,
}, nil
})
}
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT name
FROM (
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
INTERSECT
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
)
ORDER BY name
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT tagKey
FROM signoz_traces.distributed_span_attributes_keys
WHERE tagType IN ('tag', 'resource')
GROUP BY tagKey
HAVING COUNT(DISTINCT tagType) > 1
ORDER BY tagKey
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
logsKeys, err := migration.getLogDuplicateKeys(ctx)
if err != nil {
return err
}
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
if err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
var rules []struct {
ID string `bun:"id"`
Data map[string]any `bun:"data"`
}
err = tx.NewSelect().
Table("rule").
Column("id", "data").
Scan(ctx, &rules)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
count := 0
for _, rule := range rules {
version, _ := rule.Data["version"].(string)
if version == "v5" {
continue
}
if version == "" {
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
}
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
// Check if the queries envelope already exists and is non-empty
hasQueriesEnvelope := false
if condition, ok := rule.Data["condition"].(map[string]any); ok {
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
hasQueriesEnvelope = true
}
}
}
if hasQueriesEnvelope {
// already has queries envelope, just bump version
// this is because user made a mistake of choosing version
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
rule.Data["version"] = "v5"
} else {
// old format, run full migration
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
updated := alertsMigrator.Migrate(ctx, rule.Data)
if !updated {
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
continue
}
rule.Data["version"] = "v5"
}
dataJSON, err := json.Marshal(rule.Data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("rule").
Set("data = ?", string(dataJSON)).
Where("id = ?", rule.ID).
Exec(ctx)
if err != nil {
return err
}
count++
}
if count != 0 {
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
}
return tx.Commit()
}
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -41,21 +41,6 @@ func NewOrganization(displayName string, name string) *Organization {
}
}
func NewOrganizationWithID(id valuer.UUID, displayName string, name string) *Organization {
return &Organization{
Identifiable: Identifiable{
ID: id,
},
TimeAuditable: TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Name: name,
DisplayName: displayName,
Key: NewOrganizationKey(id),
}
}
func NewOrganizationKey(orgID valuer.UUID) uint32 {
hasher := fnv.New32a()

View File

@@ -355,10 +355,6 @@ func (r *PostableRule) validate() error {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
}
if r.Version != "v5" {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
}
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
}

View File

@@ -108,7 +108,6 @@ func TestParseIntoRule(t *testing.T) {
"ruleType": "threshold_rule",
"evalWindow": "5m",
"frequency": "1m",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -151,7 +150,6 @@ func TestParseIntoRule(t *testing.T) {
content: []byte(`{
"alert": "DefaultsRule",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -189,7 +187,6 @@ func TestParseIntoRule(t *testing.T) {
initRule: PostableRule{},
content: []byte(`{
"alert": "PromQLRule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "promql",
@@ -259,7 +256,6 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "SeverityLabelTest",
"schemaVersion": "v1",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -348,7 +344,6 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "NoLabelsTest",
"schemaVersion": "v1",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -389,7 +384,6 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "OverwriteTest",
"schemaVersion": "v1",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -480,7 +474,6 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{
"alert": "V2Test",
"schemaVersion": "v2",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -524,7 +517,6 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
initRule: PostableRule{},
content: []byte(`{
"alert": "DefaultSchemaTest",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -577,7 +569,6 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
func TestParseIntoRuleThresholdGeneration(t *testing.T) {
content := []byte(`{
"alert": "TestThresholds",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -648,7 +639,6 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
"schemaVersion": "v2",
"alert": "MultiThresholdAlert",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -742,7 +732,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -777,7 +766,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -811,7 +799,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -846,7 +833,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -880,7 +866,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -916,7 +901,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -951,7 +935,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "AnomalyOutOfBoundsTest",
"ruleType": "anomaly_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -986,7 +969,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "ThresholdTest",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",
@@ -1021,7 +1003,6 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{
"alert": "ThresholdTest",
"ruleType": "threshold_rule",
"version": "v5",
"condition": {
"compositeQuery": {
"queryType": "builder",