mirror of
https://github.com/SigNoz/signoz.git
synced 2026-03-18 10:42:14 +00:00
Compare commits
18 Commits
nv/6204
...
fix/null-g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7dc4e1a0a | ||
|
|
5d789ca38e | ||
|
|
8e28899626 | ||
|
|
dc4a735e7f | ||
|
|
50d88fd783 | ||
|
|
b473f55444 | ||
|
|
7233af57aa | ||
|
|
30ce22511a | ||
|
|
dd8d236647 | ||
|
|
68e4a2c5de | ||
|
|
4affdeda56 | ||
|
|
99944cc1de | ||
|
|
d1bd36e88a | ||
|
|
d26d4ebd31 | ||
|
|
771e5bd287 | ||
|
|
bd33304912 | ||
|
|
ec35ef86cf | ||
|
|
ca1cc0a4ac |
@@ -308,9 +308,6 @@ user:
|
||||
allow_self: true
|
||||
# The duration within which a user can reset their password.
|
||||
max_token_lifetime: 6h
|
||||
invite:
|
||||
# The duration within which a user can accept their invite.
|
||||
max_token_lifetime: 48h
|
||||
root:
|
||||
# Whether to enable the root user. When enabled, a root user is provisioned
|
||||
# on startup using the email and password below. The root user cannot be
|
||||
|
||||
@@ -24,7 +24,8 @@ const config: Config.InitialOptions = {
|
||||
'<rootDir>/node_modules/@signozhq/icons/dist/index.esm.js',
|
||||
'^react-syntax-highlighter/dist/esm/(.*)$':
|
||||
'<rootDir>/node_modules/react-syntax-highlighter/dist/cjs/$1',
|
||||
'^@signozhq/([^/]+)$': '<rootDir>/node_modules/@signozhq/$1/dist/$1.js',
|
||||
'^@signozhq/(?!ui$)([^/]+)$':
|
||||
'<rootDir>/node_modules/@signozhq/$1/dist/$1.js',
|
||||
},
|
||||
extensionsToTreatAsEsm: ['.ts'],
|
||||
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
|
||||
|
||||
@@ -67,6 +67,7 @@
|
||||
"@signozhq/table": "0.3.7",
|
||||
"@signozhq/toggle-group": "0.0.1",
|
||||
"@signozhq/tooltip": "0.0.2",
|
||||
"@signozhq/ui": "0.0.4",
|
||||
"@tanstack/react-table": "8.20.6",
|
||||
"@tanstack/react-virtual": "3.11.2",
|
||||
"@uiw/codemirror-theme-copilot": "4.23.11",
|
||||
|
||||
1
frontend/src/auto-import-registry.d.ts
vendored
1
frontend/src/auto-import-registry.d.ts
vendored
@@ -30,3 +30,4 @@ import '@signozhq/switch';
|
||||
import '@signozhq/table';
|
||||
import '@signozhq/toggle-group';
|
||||
import '@signozhq/tooltip';
|
||||
import '@signozhq/ui';
|
||||
|
||||
@@ -123,7 +123,7 @@ export const prepareUPlotConfig = ({
|
||||
drawStyle: hasSingleValidPoint ? DrawStyle.Points : DrawStyle.Line,
|
||||
label: label,
|
||||
colorMapping: widget.customLegendColors ?? {},
|
||||
spanGaps: true,
|
||||
spanGaps: widget.spanGaps ?? true,
|
||||
lineStyle: widget.lineStyle || LineStyle.Solid,
|
||||
lineInterpolation: widget.lineInterpolation || LineInterpolation.Spline,
|
||||
showPoints:
|
||||
|
||||
@@ -30,6 +30,17 @@
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.section-heading {
|
||||
font-family: 'Space Mono';
|
||||
color: var(--bg-vanilla-400);
|
||||
font-size: 13px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 18px; /* 138.462% */
|
||||
letter-spacing: 0.52px;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
padding: 14px 14px 14px 12px;
|
||||
|
||||
@@ -7,9 +7,10 @@ import {
|
||||
} from 'lib/uPlotV2/config/types';
|
||||
import { Paintbrush } from 'lucide-react';
|
||||
|
||||
import { FillModeSelector } from '../../components/FillModeSelector/FillModeSelector';
|
||||
import { LineInterpolationSelector } from '../../components/LineInterpolationSelector/LineInterpolationSelector';
|
||||
import { LineStyleSelector } from '../../components/LineStyleSelector/LineStyleSelector';
|
||||
import DisconnectValuesSelector from '../../components/DisconnectValuesSelector/DisconnectValuesSelector';
|
||||
import FillModeSelector from '../../components/FillModeSelector/FillModeSelector';
|
||||
import LineInterpolationSelector from '../../components/LineInterpolationSelector/LineInterpolationSelector';
|
||||
import LineStyleSelector from '../../components/LineStyleSelector/LineStyleSelector';
|
||||
import SettingsSection from '../../components/SettingsSection/SettingsSection';
|
||||
|
||||
interface ChartAppearanceSectionProps {
|
||||
@@ -21,10 +22,14 @@ interface ChartAppearanceSectionProps {
|
||||
setLineInterpolation: Dispatch<SetStateAction<LineInterpolation>>;
|
||||
showPoints: boolean;
|
||||
setShowPoints: Dispatch<SetStateAction<boolean>>;
|
||||
spanGaps: boolean | number;
|
||||
setSpanGaps: Dispatch<SetStateAction<boolean | number>>;
|
||||
allowFillMode: boolean;
|
||||
allowLineStyle: boolean;
|
||||
allowLineInterpolation: boolean;
|
||||
allowShowPoints: boolean;
|
||||
allowSpanGaps: boolean;
|
||||
stepInterval: number;
|
||||
}
|
||||
|
||||
export default function ChartAppearanceSection({
|
||||
@@ -36,10 +41,14 @@ export default function ChartAppearanceSection({
|
||||
setLineInterpolation,
|
||||
showPoints,
|
||||
setShowPoints,
|
||||
spanGaps,
|
||||
setSpanGaps,
|
||||
allowFillMode,
|
||||
allowLineStyle,
|
||||
allowLineInterpolation,
|
||||
allowShowPoints,
|
||||
allowSpanGaps,
|
||||
stepInterval,
|
||||
}: ChartAppearanceSectionProps): JSX.Element {
|
||||
return (
|
||||
<SettingsSection title="Chart Appearance" icon={<Paintbrush size={14} />}>
|
||||
@@ -66,6 +75,13 @@ export default function ChartAppearanceSection({
|
||||
<Switch size="small" checked={showPoints} onChange={setShowPoints} />
|
||||
</section>
|
||||
)}
|
||||
{allowSpanGaps && (
|
||||
<DisconnectValuesSelector
|
||||
value={spanGaps}
|
||||
minValue={stepInterval}
|
||||
onChange={setSpanGaps}
|
||||
/>
|
||||
)}
|
||||
</SettingsSection>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -178,6 +178,8 @@ describe('RightContainer - Alerts Section', () => {
|
||||
setLineStyle: jest.fn(),
|
||||
showPoints: false,
|
||||
setShowPoints: jest.fn(),
|
||||
spanGaps: false,
|
||||
setSpanGaps: jest.fn(),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
import { ToggleGroup, ToggleGroupItem } from '@signozhq/toggle-group';
|
||||
import { Typography } from 'antd';
|
||||
import { DisconnectedValuesMode } from 'lib/uPlotV2/config/types';
|
||||
|
||||
interface DisconnectValuesModeToggleProps {
|
||||
value: DisconnectedValuesMode;
|
||||
onChange: (value: DisconnectedValuesMode) => void;
|
||||
}
|
||||
|
||||
export default function DisconnectValuesModeToggle({
|
||||
value,
|
||||
onChange,
|
||||
}: DisconnectValuesModeToggleProps): JSX.Element {
|
||||
return (
|
||||
<ToggleGroup
|
||||
type="single"
|
||||
value={value}
|
||||
variant="outline"
|
||||
size="lg"
|
||||
onValueChange={(newValue): void => {
|
||||
if (newValue) {
|
||||
onChange(newValue as DisconnectedValuesMode);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<ToggleGroupItem
|
||||
value={DisconnectedValuesMode.Never}
|
||||
aria-label="Never"
|
||||
title="Never"
|
||||
>
|
||||
<Typography.Text className="section-heading-small">Never</Typography.Text>
|
||||
</ToggleGroupItem>
|
||||
<ToggleGroupItem
|
||||
value={DisconnectedValuesMode.Threshold}
|
||||
aria-label="Threshold"
|
||||
title="Threshold"
|
||||
>
|
||||
<Typography.Text className="section-heading-small">
|
||||
Threshold
|
||||
</Typography.Text>
|
||||
</ToggleGroupItem>
|
||||
</ToggleGroup>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
.disconnect-values-selector {
|
||||
.disconnect-values-input-wrapper {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
|
||||
.disconnect-values-threshold-wrapper {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
.disconnect-values-threshold-input {
|
||||
max-width: 160px;
|
||||
height: auto;
|
||||
.disconnect-values-threshold-prefix {
|
||||
padding: 0 8px;
|
||||
font-size: 20px;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import { Typography } from 'antd';
|
||||
import { DisconnectedValuesMode } from 'lib/uPlotV2/config/types';
|
||||
|
||||
import DisconnectValuesModeToggle from './DisconnectValuesModeToggle';
|
||||
import DisconnectValuesThresholdInput from './DisconnectValuesThresholdInput';
|
||||
|
||||
import './DisconnectValuesSelector.styles.scss';
|
||||
|
||||
const DEFAULT_THRESHOLD_SECONDS = 60;
|
||||
|
||||
interface DisconnectValuesSelectorProps {
|
||||
value: boolean | number;
|
||||
minValue?: number;
|
||||
onChange: (value: boolean | number) => void;
|
||||
}
|
||||
|
||||
export default function DisconnectValuesSelector({
|
||||
value,
|
||||
minValue,
|
||||
onChange,
|
||||
}: DisconnectValuesSelectorProps): JSX.Element {
|
||||
const [mode, setMode] = useState<DisconnectedValuesMode>(() => {
|
||||
if (typeof value === 'number') {
|
||||
return DisconnectedValuesMode.Threshold;
|
||||
}
|
||||
return DisconnectedValuesMode.Never;
|
||||
});
|
||||
const [thresholdSeconds, setThresholdSeconds] = useState<number>(
|
||||
typeof value === 'number' ? value : minValue ?? DEFAULT_THRESHOLD_SECONDS,
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (typeof value === 'boolean') {
|
||||
setMode(DisconnectedValuesMode.Never);
|
||||
} else if (typeof value === 'number') {
|
||||
setMode(DisconnectedValuesMode.Threshold);
|
||||
setThresholdSeconds(value);
|
||||
}
|
||||
}, [value]);
|
||||
|
||||
useEffect(() => {
|
||||
if (minValue !== undefined) {
|
||||
setThresholdSeconds(minValue);
|
||||
if (mode === DisconnectedValuesMode.Threshold) {
|
||||
onChange(minValue);
|
||||
}
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [minValue]);
|
||||
|
||||
const handleModeChange = (newMode: DisconnectedValuesMode): void => {
|
||||
setMode(newMode);
|
||||
switch (newMode) {
|
||||
case DisconnectedValuesMode.Never:
|
||||
onChange(true);
|
||||
break;
|
||||
case DisconnectedValuesMode.Threshold:
|
||||
onChange(thresholdSeconds);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
const handleThresholdChange = (seconds: number): void => {
|
||||
setThresholdSeconds(seconds);
|
||||
onChange(seconds);
|
||||
};
|
||||
|
||||
return (
|
||||
<section className="disconnect-values-selector control-container">
|
||||
<Typography.Text className="section-heading">
|
||||
Disconnect values
|
||||
</Typography.Text>
|
||||
<div className="disconnect-values-input-wrapper">
|
||||
<DisconnectValuesModeToggle value={mode} onChange={handleModeChange} />
|
||||
{mode === DisconnectedValuesMode.Threshold && (
|
||||
<section className="control-container">
|
||||
<Typography.Text className="section-heading">
|
||||
Threshold Value
|
||||
</Typography.Text>
|
||||
<DisconnectValuesThresholdInput
|
||||
value={thresholdSeconds}
|
||||
minValue={minValue}
|
||||
onChange={handleThresholdChange}
|
||||
/>
|
||||
</section>
|
||||
)}
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import { rangeUtil } from '@grafana/data';
|
||||
import { Callout } from '@signozhq/callout';
|
||||
import { Input } from '@signozhq/ui';
|
||||
interface DisconnectValuesThresholdInputProps {
|
||||
value: number;
|
||||
onChange: (seconds: number) => void;
|
||||
minValue?: number;
|
||||
}
|
||||
|
||||
export default function DisconnectValuesThresholdInput({
|
||||
value,
|
||||
onChange,
|
||||
minValue,
|
||||
}: DisconnectValuesThresholdInputProps): JSX.Element {
|
||||
const [inputValue, setInputValue] = useState<string>(
|
||||
rangeUtil.secondsToHms(value),
|
||||
);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
setInputValue(rangeUtil.secondsToHms(value));
|
||||
setError(null);
|
||||
}, [value]);
|
||||
|
||||
const commitValue = (txt: string): void => {
|
||||
if (!txt) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
let seconds: number;
|
||||
if (rangeUtil.isValidTimeSpan(txt)) {
|
||||
seconds = rangeUtil.intervalToSeconds(txt);
|
||||
} else {
|
||||
const parsed = Number(txt);
|
||||
if (Number.isNaN(parsed) || parsed <= 0) {
|
||||
setError('Enter a valid duration (e.g. 1h, 10m, 1d)');
|
||||
return;
|
||||
}
|
||||
seconds = parsed;
|
||||
}
|
||||
if (minValue !== undefined && seconds <= minValue) {
|
||||
setError(`Threshold should be > ${rangeUtil.secondsToHms(minValue)}`);
|
||||
return;
|
||||
}
|
||||
setError(null);
|
||||
setInputValue(txt);
|
||||
onChange(seconds);
|
||||
} catch {
|
||||
setError('Invalid threshold value');
|
||||
}
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: React.KeyboardEvent<HTMLInputElement>): void => {
|
||||
if (e.key === 'Enter') {
|
||||
commitValue(e.currentTarget.value);
|
||||
}
|
||||
};
|
||||
|
||||
const handleBlur = (e: React.FocusEvent<HTMLInputElement>): void => {
|
||||
commitValue(e.currentTarget.value);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="disconnect-values-threshold-wrapper">
|
||||
<Input
|
||||
name="disconnect-values-threshold"
|
||||
type="text"
|
||||
className="disconnect-values-threshold-input"
|
||||
prefix={<span className="disconnect-values-threshold-prefix">></span>}
|
||||
value={inputValue}
|
||||
onChange={(e): void => {
|
||||
setInputValue(e.currentTarget.value);
|
||||
if (error) {
|
||||
setError(null);
|
||||
}
|
||||
}}
|
||||
onKeyDown={handleKeyDown}
|
||||
onBlur={handleBlur}
|
||||
autoFocus={false}
|
||||
aria-invalid={!!error}
|
||||
aria-describedby={error ? 'threshold-error' : undefined}
|
||||
/>
|
||||
{error && <Callout type="error" size="small" showIcon description={error} />}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -9,7 +9,7 @@ interface FillModeSelectorProps {
|
||||
onChange: (value: FillMode) => void;
|
||||
}
|
||||
|
||||
export function FillModeSelector({
|
||||
export default function FillModeSelector({
|
||||
value,
|
||||
onChange,
|
||||
}: FillModeSelectorProps): JSX.Element {
|
||||
|
||||
@@ -9,7 +9,7 @@ interface LineInterpolationSelectorProps {
|
||||
onChange: (value: LineInterpolation) => void;
|
||||
}
|
||||
|
||||
export function LineInterpolationSelector({
|
||||
export default function LineInterpolationSelector({
|
||||
value,
|
||||
onChange,
|
||||
}: LineInterpolationSelectorProps): JSX.Element {
|
||||
|
||||
@@ -9,7 +9,7 @@ interface LineStyleSelectorProps {
|
||||
onChange: (value: LineStyle) => void;
|
||||
}
|
||||
|
||||
export function LineStyleSelector({
|
||||
export default function LineStyleSelector({
|
||||
value,
|
||||
onChange,
|
||||
}: LineStyleSelectorProps): JSX.Element {
|
||||
|
||||
@@ -262,3 +262,17 @@ export const panelTypeVsShowPoints: {
|
||||
[PANEL_TYPES.TRACE]: false,
|
||||
[PANEL_TYPES.EMPTY_WIDGET]: false,
|
||||
} as const;
|
||||
|
||||
export const panelTypeVsSpanGaps: {
|
||||
[key in PANEL_TYPES]: boolean;
|
||||
} = {
|
||||
[PANEL_TYPES.TIME_SERIES]: true,
|
||||
[PANEL_TYPES.VALUE]: false,
|
||||
[PANEL_TYPES.TABLE]: false,
|
||||
[PANEL_TYPES.LIST]: false,
|
||||
[PANEL_TYPES.PIE]: false,
|
||||
[PANEL_TYPES.BAR]: false,
|
||||
[PANEL_TYPES.HISTOGRAM]: false,
|
||||
[PANEL_TYPES.TRACE]: false,
|
||||
[PANEL_TYPES.EMPTY_WIDGET]: false,
|
||||
} as const;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Dispatch, SetStateAction, useMemo } from 'react';
|
||||
import { UseQueryResult } from 'react-query';
|
||||
import { Typography } from 'antd';
|
||||
import { ExecStats } from 'api/v5/v5';
|
||||
import { PrecisionOption, PrecisionOptionsEnum } from 'components/Graph/types';
|
||||
import { PANEL_TYPES, PanelDisplay } from 'constants/queryBuilder';
|
||||
import { PanelTypesWithData } from 'container/DashboardContainer/PanelTypeSelectionModal/menuItems';
|
||||
@@ -11,6 +12,7 @@ import {
|
||||
LineInterpolation,
|
||||
LineStyle,
|
||||
} from 'lib/uPlotV2/config/types';
|
||||
import get from 'lodash-es/get';
|
||||
import { SuccessResponse } from 'types/api';
|
||||
import {
|
||||
ColumnUnit,
|
||||
@@ -36,6 +38,7 @@ import {
|
||||
panelTypeVsPanelTimePreferences,
|
||||
panelTypeVsShowPoints,
|
||||
panelTypeVsSoftMinMax,
|
||||
panelTypeVsSpanGaps,
|
||||
panelTypeVsStackingChartPreferences,
|
||||
panelTypeVsThreshold,
|
||||
panelTypeVsYAxisUnit,
|
||||
@@ -68,6 +71,8 @@ function RightContainer({
|
||||
setLineStyle,
|
||||
showPoints,
|
||||
setShowPoints,
|
||||
spanGaps,
|
||||
setSpanGaps,
|
||||
bucketCount,
|
||||
bucketWidth,
|
||||
stackedBarChart,
|
||||
@@ -138,6 +143,7 @@ function RightContainer({
|
||||
const allowLineStyle = panelTypeVsLineStyle[selectedGraph];
|
||||
const allowFillMode = panelTypeVsFillMode[selectedGraph];
|
||||
const allowShowPoints = panelTypeVsShowPoints[selectedGraph];
|
||||
const allowSpanGaps = panelTypeVsSpanGaps[selectedGraph];
|
||||
|
||||
const decimapPrecisionOptions = useMemo(
|
||||
() => [
|
||||
@@ -176,10 +182,26 @@ function RightContainer({
|
||||
(allowFillMode ||
|
||||
allowLineStyle ||
|
||||
allowLineInterpolation ||
|
||||
allowShowPoints),
|
||||
[allowFillMode, allowLineStyle, allowLineInterpolation, allowShowPoints],
|
||||
allowShowPoints ||
|
||||
allowSpanGaps),
|
||||
[
|
||||
allowFillMode,
|
||||
allowLineStyle,
|
||||
allowLineInterpolation,
|
||||
allowShowPoints,
|
||||
allowSpanGaps,
|
||||
],
|
||||
);
|
||||
|
||||
const stepInterval = useMemo(() => {
|
||||
const stepIntervals: ExecStats['stepIntervals'] = get(
|
||||
queryResponse,
|
||||
'data.payload.data.newResult.meta.stepIntervals',
|
||||
{},
|
||||
);
|
||||
return Math.min(...Object.values(stepIntervals));
|
||||
}, [queryResponse]);
|
||||
|
||||
return (
|
||||
<div className="right-container">
|
||||
<section className="header">
|
||||
@@ -237,10 +259,14 @@ function RightContainer({
|
||||
setLineInterpolation={setLineInterpolation}
|
||||
showPoints={showPoints}
|
||||
setShowPoints={setShowPoints}
|
||||
spanGaps={spanGaps}
|
||||
setSpanGaps={setSpanGaps}
|
||||
allowFillMode={allowFillMode}
|
||||
allowLineStyle={allowLineStyle}
|
||||
allowLineInterpolation={allowLineInterpolation}
|
||||
allowShowPoints={allowShowPoints}
|
||||
allowSpanGaps={allowSpanGaps}
|
||||
stepInterval={stepInterval}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -364,6 +390,8 @@ export interface RightContainerProps {
|
||||
setLineStyle: Dispatch<SetStateAction<LineStyle>>;
|
||||
showPoints: boolean;
|
||||
setShowPoints: Dispatch<SetStateAction<boolean>>;
|
||||
spanGaps: boolean | number;
|
||||
setSpanGaps: Dispatch<SetStateAction<boolean | number>>;
|
||||
}
|
||||
|
||||
RightContainer.defaultProps = {
|
||||
|
||||
@@ -220,6 +220,9 @@ function NewWidget({
|
||||
const [showPoints, setShowPoints] = useState<boolean>(
|
||||
selectedWidget?.showPoints ?? false,
|
||||
);
|
||||
const [spanGaps, setSpanGaps] = useState<boolean | number>(
|
||||
selectedWidget?.spanGaps ?? false,
|
||||
);
|
||||
const [customLegendColors, setCustomLegendColors] = useState<
|
||||
Record<string, string>
|
||||
>(selectedWidget?.customLegendColors || {});
|
||||
@@ -289,6 +292,7 @@ function NewWidget({
|
||||
fillMode,
|
||||
lineStyle,
|
||||
showPoints,
|
||||
spanGaps,
|
||||
columnUnits,
|
||||
bucketCount,
|
||||
stackedBarChart,
|
||||
@@ -328,6 +332,7 @@ function NewWidget({
|
||||
fillMode,
|
||||
lineStyle,
|
||||
showPoints,
|
||||
spanGaps,
|
||||
customLegendColors,
|
||||
contextLinks,
|
||||
selectedWidget.columnWidths,
|
||||
@@ -541,6 +546,7 @@ function NewWidget({
|
||||
softMin: selectedWidget?.softMin || 0,
|
||||
softMax: selectedWidget?.softMax || 0,
|
||||
fillSpans: selectedWidget?.fillSpans,
|
||||
spanGaps: selectedWidget?.spanGaps ?? true,
|
||||
isLogScale: selectedWidget?.isLogScale || false,
|
||||
bucketWidth: selectedWidget?.bucketWidth || 0,
|
||||
bucketCount: selectedWidget?.bucketCount || 0,
|
||||
@@ -572,6 +578,7 @@ function NewWidget({
|
||||
softMin: selectedWidget?.softMin || 0,
|
||||
softMax: selectedWidget?.softMax || 0,
|
||||
fillSpans: selectedWidget?.fillSpans,
|
||||
spanGaps: selectedWidget?.spanGaps ?? true,
|
||||
isLogScale: selectedWidget?.isLogScale || false,
|
||||
bucketWidth: selectedWidget?.bucketWidth || 0,
|
||||
bucketCount: selectedWidget?.bucketCount || 0,
|
||||
@@ -889,6 +896,8 @@ function NewWidget({
|
||||
setLineStyle={setLineStyle}
|
||||
showPoints={showPoints}
|
||||
setShowPoints={setShowPoints}
|
||||
spanGaps={spanGaps}
|
||||
setSpanGaps={setSpanGaps}
|
||||
opacity={opacity}
|
||||
yAxisUnit={yAxisUnit}
|
||||
columnUnits={columnUnits}
|
||||
|
||||
@@ -7,6 +7,7 @@ import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFall
|
||||
import uPlot, { AlignedData, Options } from 'uplot';
|
||||
|
||||
import { usePlotContext } from '../context/PlotContext';
|
||||
import { applySpanGapsToAlignedData } from '../utils/dataUtils';
|
||||
import { UPlotChartProps } from './types';
|
||||
|
||||
/**
|
||||
@@ -84,7 +85,13 @@ export default function UPlotChart({
|
||||
} as Options;
|
||||
|
||||
// Create new plot instance
|
||||
const plot = new uPlot(plotConfig, data as AlignedData, containerRef.current);
|
||||
const seriesSpanGaps = config.getSeriesSpanGapsOptions();
|
||||
const preparedData =
|
||||
seriesSpanGaps.length > 0
|
||||
? applySpanGapsToAlignedData(data, seriesSpanGaps)
|
||||
: (data as AlignedData);
|
||||
|
||||
const plot = new uPlot(plotConfig, preparedData, containerRef.current);
|
||||
|
||||
if (plotRef) {
|
||||
plotRef(plot);
|
||||
@@ -162,7 +169,13 @@ export default function UPlotChart({
|
||||
}
|
||||
// Update data if only data changed
|
||||
else if (!sameData(prevProps, currentProps) && plotInstanceRef.current) {
|
||||
plotInstanceRef.current.setData(data as AlignedData);
|
||||
const seriesSpanGaps = config.getSeriesSpanGapsOptions?.() ?? [];
|
||||
const preparedData =
|
||||
seriesSpanGaps.length > 0
|
||||
? applySpanGapsToAlignedData(data as AlignedData, seriesSpanGaps)
|
||||
: (data as AlignedData);
|
||||
|
||||
plotInstanceRef.current.setData(preparedData as AlignedData);
|
||||
}
|
||||
|
||||
prevPropsRef.current = currentProps;
|
||||
|
||||
@@ -86,6 +86,7 @@ const createMockConfig = (): UPlotConfigBuilder => {
|
||||
}),
|
||||
getId: jest.fn().mockReturnValue(undefined),
|
||||
getShouldSaveSelectionPreference: jest.fn().mockReturnValue(false),
|
||||
getSeriesSpanGapsOptions: jest.fn().mockReturnValue([]),
|
||||
} as unknown) as UPlotConfigBuilder;
|
||||
};
|
||||
|
||||
@@ -328,6 +329,78 @@ describe('UPlotChart', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('spanGaps data transformation', () => {
|
||||
it('inserts null break points before passing data to uPlot when a gap exceeds the numeric threshold', () => {
|
||||
const config = createMockConfig();
|
||||
// gap 0→100 = 100 > threshold 50 → null inserted at midpoint x=50
|
||||
(config.getSeriesSpanGapsOptions as jest.Mock).mockReturnValue([
|
||||
{ spanGaps: 50 },
|
||||
]);
|
||||
const data: AlignedData = [
|
||||
[0, 100],
|
||||
[1, 2],
|
||||
];
|
||||
|
||||
render(<UPlotChart config={config} data={data} width={600} height={400} />, {
|
||||
wrapper: Wrapper,
|
||||
});
|
||||
|
||||
const [, receivedData] = mockUPlotConstructor.mock.calls[0];
|
||||
expect(receivedData[0]).toEqual([0, 50, 100]);
|
||||
expect(receivedData[1]).toEqual([1, null, 2]);
|
||||
});
|
||||
|
||||
it('passes data through unchanged when no gap exceeds the numeric threshold', () => {
|
||||
const config = createMockConfig();
|
||||
// all gaps = 10, threshold = 50 → no insertions, same reference returned
|
||||
(config.getSeriesSpanGapsOptions as jest.Mock).mockReturnValue([
|
||||
{ spanGaps: 50 },
|
||||
]);
|
||||
const data: AlignedData = [
|
||||
[0, 10, 20],
|
||||
[1, 2, 3],
|
||||
];
|
||||
|
||||
render(<UPlotChart config={config} data={data} width={600} height={400} />, {
|
||||
wrapper: Wrapper,
|
||||
});
|
||||
|
||||
const [, receivedData] = mockUPlotConstructor.mock.calls[0];
|
||||
expect(receivedData).toBe(data);
|
||||
});
|
||||
|
||||
it('transforms data passed to setData when data updates and a new gap exceeds the threshold', () => {
|
||||
const config = createMockConfig();
|
||||
(config.getSeriesSpanGapsOptions as jest.Mock).mockReturnValue([
|
||||
{ spanGaps: 50 },
|
||||
]);
|
||||
|
||||
// initial render: gap 10 < 50, no transformation
|
||||
const initialData: AlignedData = [
|
||||
[0, 10],
|
||||
[1, 2],
|
||||
];
|
||||
// updated data: gap 100 > 50 → null inserted at midpoint x=50
|
||||
const newData: AlignedData = [
|
||||
[0, 100],
|
||||
[3, 4],
|
||||
];
|
||||
|
||||
const { rerender } = render(
|
||||
<UPlotChart config={config} data={initialData} width={600} height={400} />,
|
||||
{ wrapper: Wrapper },
|
||||
);
|
||||
|
||||
rerender(
|
||||
<UPlotChart config={config} data={newData} width={600} height={400} />,
|
||||
);
|
||||
|
||||
const receivedData = instances[0].setData.mock.calls[0][0];
|
||||
expect(receivedData[0]).toEqual([0, 50, 100]);
|
||||
expect(receivedData[1]).toEqual([3, null, 4]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('prop updates', () => {
|
||||
it('calls setData without recreating the plot when only data changes', () => {
|
||||
const config = createMockConfig();
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
STEP_INTERVAL_MULTIPLIER,
|
||||
} from '../constants';
|
||||
import { calculateWidthBasedOnStepInterval } from '../utils';
|
||||
import { SeriesSpanGapsOption } from '../utils/dataUtils';
|
||||
import {
|
||||
ConfigBuilder,
|
||||
ConfigBuilderProps,
|
||||
@@ -161,6 +162,13 @@ export class UPlotConfigBuilder extends ConfigBuilder<
|
||||
this.series.push(new UPlotSeriesBuilder(props));
|
||||
}
|
||||
|
||||
getSeriesSpanGapsOptions(): SeriesSpanGapsOption[] {
|
||||
return this.series.map((s) => {
|
||||
const { spanGaps } = s.props;
|
||||
return { spanGaps };
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a hook for extensibility
|
||||
*/
|
||||
|
||||
@@ -4,6 +4,7 @@ import { calculateWidthBasedOnStepInterval } from 'lib/uPlotV2/utils';
|
||||
import uPlot, { Series } from 'uplot';
|
||||
|
||||
import { generateGradientFill } from '../utils/generateGradientFill';
|
||||
import { isolatedPointFilter } from '../utils/seriesPointsFilter';
|
||||
import {
|
||||
BarAlignment,
|
||||
ConfigBuilder,
|
||||
@@ -146,20 +147,8 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
|
||||
}: {
|
||||
resolvedLineColor: string;
|
||||
}): Partial<Series.Points> {
|
||||
const {
|
||||
lineWidth,
|
||||
pointSize,
|
||||
pointsBuilder,
|
||||
pointsFilter,
|
||||
drawStyle,
|
||||
showPoints,
|
||||
} = this.props;
|
||||
const { lineWidth, pointSize, pointsFilter } = this.props;
|
||||
|
||||
/**
|
||||
* If pointSize is not provided, use the lineWidth * POINT_SIZE_FACTOR
|
||||
* to determine the point size.
|
||||
* POINT_SIZE_FACTOR is 2, so the point size will be 2x the line width.
|
||||
*/
|
||||
const resolvedPointSize =
|
||||
pointSize ?? (lineWidth ?? DEFAULT_LINE_WIDTH) * POINT_SIZE_FACTOR;
|
||||
|
||||
@@ -168,19 +157,44 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
|
||||
fill: resolvedLineColor,
|
||||
size: resolvedPointSize,
|
||||
filter: pointsFilter || undefined,
|
||||
show: this.resolvePointsShow(),
|
||||
};
|
||||
|
||||
if (pointsBuilder) {
|
||||
pointsConfig.show = pointsBuilder;
|
||||
} else if (drawStyle === DrawStyle.Points) {
|
||||
pointsConfig.show = true;
|
||||
} else {
|
||||
pointsConfig.show = !!showPoints;
|
||||
// When spanGaps is in threshold (numeric) mode, points hidden by default
|
||||
// become invisible when isolated by injected gap-nulls (no line connects
|
||||
// to them). Use a gap-based filter to show only those isolated points as
|
||||
// dots. Do NOT set show=true here — the filter is called with show=false
|
||||
// and returns specific indices to render; setting show=true would cause
|
||||
// uPlot to call filter with show=true which short-circuits the logic and
|
||||
// renders all points.
|
||||
if (this.shouldApplyIsolatedPointFilter(pointsConfig.show)) {
|
||||
pointsConfig.filter = isolatedPointFilter;
|
||||
}
|
||||
|
||||
return pointsConfig;
|
||||
}
|
||||
|
||||
private resolvePointsShow(): Series.Points['show'] {
|
||||
const { pointsBuilder, drawStyle, showPoints } = this.props;
|
||||
if (pointsBuilder) {
|
||||
return pointsBuilder;
|
||||
}
|
||||
if (drawStyle === DrawStyle.Points) {
|
||||
return true;
|
||||
}
|
||||
return !!showPoints;
|
||||
}
|
||||
|
||||
private shouldApplyIsolatedPointFilter(show: Series.Points['show']): boolean {
|
||||
const { drawStyle, spanGaps, pointsFilter } = this.props;
|
||||
return (
|
||||
drawStyle === DrawStyle.Line &&
|
||||
typeof spanGaps === 'number' &&
|
||||
!pointsFilter &&
|
||||
!show
|
||||
);
|
||||
}
|
||||
|
||||
private getLineColor(): string {
|
||||
const { colorMapping, label, lineColor, isDarkMode } = this.props;
|
||||
if (!label) {
|
||||
@@ -212,7 +226,12 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
|
||||
return {
|
||||
scale: scaleKey,
|
||||
label,
|
||||
spanGaps: typeof spanGaps === 'boolean' ? spanGaps : false,
|
||||
// When spanGaps is numeric, we always disable uPlot's internal
|
||||
// spanGaps behavior and rely on data-prep to implement the
|
||||
// threshold-based null handling. When spanGaps is boolean we
|
||||
// map it directly. When spanGaps is undefined we fall back to
|
||||
// the default of false.
|
||||
spanGaps: typeof spanGaps === 'number' ? false : !!spanGaps,
|
||||
value: (): string => '',
|
||||
pxAlign: true,
|
||||
show,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { themeColors } from 'constants/theme';
|
||||
import uPlot from 'uplot';
|
||||
|
||||
import { isolatedPointFilter } from '../../utils/seriesPointsFilter';
|
||||
import type { SeriesProps } from '../types';
|
||||
import { DrawStyle, LineInterpolation, LineStyle } from '../types';
|
||||
import { POINT_SIZE_FACTOR, UPlotSeriesBuilder } from '../UPlotSeriesBuilder';
|
||||
@@ -40,6 +41,37 @@ describe('UPlotSeriesBuilder', () => {
|
||||
expect(typeof config.value).toBe('function');
|
||||
});
|
||||
|
||||
it('maps boolean spanGaps directly to uPlot spanGaps', () => {
|
||||
const trueBuilder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
spanGaps: true,
|
||||
}),
|
||||
);
|
||||
const falseBuilder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
spanGaps: false,
|
||||
}),
|
||||
);
|
||||
|
||||
const trueConfig = trueBuilder.getConfig();
|
||||
const falseConfig = falseBuilder.getConfig();
|
||||
|
||||
expect(trueConfig.spanGaps).toBe(true);
|
||||
expect(falseConfig.spanGaps).toBe(false);
|
||||
});
|
||||
|
||||
it('disables uPlot spanGaps when spanGaps is a number', () => {
|
||||
const builder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
spanGaps: 10000,
|
||||
}),
|
||||
);
|
||||
|
||||
const config = builder.getConfig();
|
||||
|
||||
expect(config.spanGaps).toBe(false);
|
||||
});
|
||||
|
||||
it('uses explicit lineColor when provided, regardless of mapping', () => {
|
||||
const builder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
@@ -284,4 +316,50 @@ describe('UPlotSeriesBuilder', () => {
|
||||
|
||||
expect(config.points?.filter).toBe(pointsFilter);
|
||||
});
|
||||
|
||||
it('assigns isolatedPointFilter and does not force show=true when spanGaps is numeric and no custom filter', () => {
|
||||
const builder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
drawStyle: DrawStyle.Line,
|
||||
spanGaps: 10_000,
|
||||
showPoints: false,
|
||||
}),
|
||||
);
|
||||
|
||||
const config = builder.getConfig();
|
||||
|
||||
expect(config.points?.filter).toBe(isolatedPointFilter);
|
||||
expect(config.points?.show).toBe(false);
|
||||
});
|
||||
|
||||
it('does not assign isolatedPointFilter when a custom pointsFilter is provided alongside numeric spanGaps', () => {
|
||||
const customFilter: uPlot.Series.Points.Filter = jest.fn(() => null);
|
||||
|
||||
const builder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
drawStyle: DrawStyle.Line,
|
||||
spanGaps: 10_000,
|
||||
pointsFilter: customFilter,
|
||||
}),
|
||||
);
|
||||
|
||||
const config = builder.getConfig();
|
||||
|
||||
expect(config.points?.filter).toBe(customFilter);
|
||||
});
|
||||
|
||||
it('does not assign isolatedPointFilter when showPoints is true even with numeric spanGaps', () => {
|
||||
const builder = new UPlotSeriesBuilder(
|
||||
createBaseProps({
|
||||
drawStyle: DrawStyle.Line,
|
||||
spanGaps: 10_000,
|
||||
showPoints: true,
|
||||
}),
|
||||
);
|
||||
|
||||
const config = builder.getConfig();
|
||||
|
||||
expect(config.points?.filter).toBeUndefined();
|
||||
expect(config.points?.show).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -99,6 +99,11 @@ export interface ScaleProps {
|
||||
distribution?: DistributionType;
|
||||
}
|
||||
|
||||
export enum DisconnectedValuesMode {
|
||||
Never = 'never',
|
||||
Threshold = 'threshold',
|
||||
}
|
||||
|
||||
/**
|
||||
* Props for configuring a series
|
||||
*/
|
||||
@@ -175,7 +180,16 @@ export interface SeriesProps extends LineConfig, PointsConfig, BarConfig {
|
||||
pointsFilter?: Series.Points.Filter;
|
||||
pointsBuilder?: Series.Points.Show;
|
||||
show?: boolean;
|
||||
spanGaps?: boolean;
|
||||
/**
|
||||
* Controls how nulls are treated for this series.
|
||||
*
|
||||
* - boolean: mapped directly to uPlot's spanGaps behavior
|
||||
* - number: interpreted as an X-axis threshold (same unit as ref values),
|
||||
* where gaps smaller than this threshold are spanned by
|
||||
* converting short null runs to undefined during data prep
|
||||
* while uPlot's internal spanGaps is kept disabled.
|
||||
*/
|
||||
spanGaps?: boolean | number;
|
||||
fillColor?: string;
|
||||
fillMode?: FillMode;
|
||||
isDarkMode?: boolean;
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
import { isInvalidPlotValue, normalizePlotValue } from '../dataUtils';
|
||||
import uPlot from 'uplot';
|
||||
|
||||
import {
|
||||
applySpanGapsToAlignedData,
|
||||
insertLargeGapNullsIntoAlignedData,
|
||||
isInvalidPlotValue,
|
||||
normalizePlotValue,
|
||||
SeriesSpanGapsOption,
|
||||
} from '../dataUtils';
|
||||
|
||||
describe('dataUtils', () => {
|
||||
describe('isInvalidPlotValue', () => {
|
||||
@@ -59,4 +67,217 @@ describe('dataUtils', () => {
|
||||
expect(normalizePlotValue(42.5)).toBe(42.5);
|
||||
});
|
||||
});
|
||||
|
||||
describe('insertLargeGapNullsIntoAlignedData', () => {
|
||||
it('returns original data unchanged when no gap exceeds the threshold', () => {
|
||||
// all gaps = 10, threshold = 25 → no insertions
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 10, 20, 30],
|
||||
[1, 2, 3, 4],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 25 }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('does not insert when the gap equals the threshold exactly', () => {
|
||||
// gap = 50, threshold = 50 → condition is gap > threshold, not >=
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 50],
|
||||
[1, 2],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 50 }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('inserts a null at the midpoint when a single gap exceeds the threshold', () => {
|
||||
// gap 0→100 = 100 > 50 → insert null at x=50
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100],
|
||||
[1, 2],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 50 }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result[0]).toEqual([0, 50, 100]);
|
||||
expect(result[1]).toEqual([1, null, 2]);
|
||||
});
|
||||
|
||||
it('inserts nulls at every gap that exceeds the threshold', () => {
|
||||
// gaps: 0→100=100, 100→110=10, 110→210=100; threshold=50
|
||||
// → insert at 0→100 and 110→210
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100, 110, 210],
|
||||
[1, 2, 3, 4],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 50 }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result[0]).toEqual([0, 50, 100, 110, 160, 210]);
|
||||
expect(result[1]).toEqual([1, null, 2, 3, null, 4]);
|
||||
});
|
||||
|
||||
it('inserts null for all series at a gap triggered by any one series', () => {
|
||||
// series 0: threshold=50, gap=100 → triggers insertion
|
||||
// series 1: threshold=200, gap=100 → would not trigger alone
|
||||
// result: both series get null at the inserted x because the x-axis is shared
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100],
|
||||
[1, 2],
|
||||
[3, 4],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [
|
||||
{ spanGaps: 50 },
|
||||
{ spanGaps: 200 },
|
||||
];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result[0]).toEqual([0, 50, 100]);
|
||||
expect(result[1]).toEqual([1, null, 2]);
|
||||
expect(result[2]).toEqual([3, null, 4]);
|
||||
});
|
||||
|
||||
it('ignores boolean spanGaps options (only numeric values trigger insertion)', () => {
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100],
|
||||
[1, 2],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: true }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('returns original data when series options array is empty', () => {
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100],
|
||||
[1, 2],
|
||||
];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, []);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('returns original data when there is only one x point', () => {
|
||||
const data: uPlot.AlignedData = [[0], [1]];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 10 }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('preserves existing null values in the series alongside inserted ones', () => {
|
||||
// original series already has a null; gap 0→100 also triggers insertion
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100, 110],
|
||||
[1, null, 2],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 50 }];
|
||||
|
||||
const result = insertLargeGapNullsIntoAlignedData(data, options);
|
||||
|
||||
expect(result[0]).toEqual([0, 50, 100, 110]);
|
||||
expect(result[1]).toEqual([1, null, null, 2]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('applySpanGapsToAlignedData', () => {
|
||||
const xs: uPlot.AlignedData[0] = [0, 10, 20, 30];
|
||||
|
||||
it('returns original data when there are no series', () => {
|
||||
const data: uPlot.AlignedData = [xs];
|
||||
const result = applySpanGapsToAlignedData(data, []);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('leaves data unchanged when spanGaps is undefined', () => {
|
||||
const ys = [1, null, 2, null];
|
||||
const data: uPlot.AlignedData = [xs, ys];
|
||||
const options: SeriesSpanGapsOption[] = [{}];
|
||||
|
||||
const result = applySpanGapsToAlignedData(data, options);
|
||||
|
||||
expect(result[1]).toEqual(ys);
|
||||
});
|
||||
|
||||
it('converts nulls to undefined when spanGaps is true', () => {
|
||||
const ys = [1, null, 2, null];
|
||||
const data: uPlot.AlignedData = [xs, ys];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: true }];
|
||||
|
||||
const result = applySpanGapsToAlignedData(data, options);
|
||||
|
||||
expect(result[1]).toEqual([1, undefined, 2, undefined]);
|
||||
});
|
||||
|
||||
it('leaves data unchanged when spanGaps is false', () => {
|
||||
const ys = [1, null, 2, null];
|
||||
const data: uPlot.AlignedData = [xs, ys];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: false }];
|
||||
|
||||
const result = applySpanGapsToAlignedData(data, options);
|
||||
|
||||
expect(result[1]).toEqual(ys);
|
||||
});
|
||||
|
||||
it('inserts a null break point when a gap exceeds the numeric threshold', () => {
|
||||
// gap 0→100 = 100 > 50 → null inserted at midpoint x=50
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100, 110],
|
||||
[1, 2, 3],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 50 }];
|
||||
|
||||
const result = applySpanGapsToAlignedData(data, options);
|
||||
|
||||
expect(result[0]).toEqual([0, 50, 100, 110]);
|
||||
expect(result[1]).toEqual([1, null, 2, 3]);
|
||||
});
|
||||
|
||||
it('returns original data when no gap exceeds the numeric threshold', () => {
|
||||
// all gaps = 10, threshold = 25 → no insertions
|
||||
const data: uPlot.AlignedData = [xs, [1, 2, 3, 4]];
|
||||
const options: SeriesSpanGapsOption[] = [{ spanGaps: 25 }];
|
||||
|
||||
const result = applySpanGapsToAlignedData(data, options);
|
||||
|
||||
expect(result).toBe(data);
|
||||
});
|
||||
|
||||
it('applies both numeric gap insertion and boolean null-to-undefined in one pass', () => {
|
||||
// series 0: spanGaps: 50 → gap 0→100 triggers a null break at midpoint x=50
|
||||
// series 1: spanGaps: true → the inserted null at x=50 becomes undefined,
|
||||
// so the line spans over it rather than breaking
|
||||
const data: uPlot.AlignedData = [
|
||||
[0, 100],
|
||||
[1, 2],
|
||||
[3, 4],
|
||||
];
|
||||
const options: SeriesSpanGapsOption[] = [
|
||||
{ spanGaps: 50 },
|
||||
{ spanGaps: true },
|
||||
];
|
||||
|
||||
const result = applySpanGapsToAlignedData(data, options);
|
||||
|
||||
// x-axis extended with the inserted midpoint
|
||||
expect(result[0]).toEqual([0, 50, 100]);
|
||||
// series 0: null at midpoint breaks the line
|
||||
expect(result[1]).toEqual([1, null, 2]);
|
||||
// series 1: null at midpoint converted to undefined → line spans over it
|
||||
expect(result[2]).toEqual([3, undefined, 4]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -0,0 +1,251 @@
|
||||
import type uPlot from 'uplot';
|
||||
|
||||
import {
|
||||
findNearestNonNull,
|
||||
findSandwichedIndices,
|
||||
isolatedPointFilter,
|
||||
} from '../seriesPointsFilter';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Minimal uPlot stub — only the surface used by seriesPointsFilter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeUPlot({
|
||||
xData,
|
||||
yData,
|
||||
idxs,
|
||||
valToPosFn,
|
||||
posToIdxFn,
|
||||
}: {
|
||||
xData: number[];
|
||||
yData: (number | null | undefined)[];
|
||||
idxs?: [number, number];
|
||||
valToPosFn?: (val: number) => number;
|
||||
posToIdxFn?: (pos: number) => number;
|
||||
}): uPlot {
|
||||
return ({
|
||||
data: [xData, yData],
|
||||
series: [{}, { idxs: idxs ?? [0, yData.length - 1] }],
|
||||
valToPos: jest.fn((val: number) => (valToPosFn ? valToPosFn(val) : val)),
|
||||
posToIdx: jest.fn((pos: number) =>
|
||||
posToIdxFn ? posToIdxFn(pos) : Math.round(pos),
|
||||
),
|
||||
} as unknown) as uPlot;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// findNearestNonNull
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('findNearestNonNull', () => {
|
||||
it('returns the right neighbor when left side is null', () => {
|
||||
const yData = [null, null, 42, null];
|
||||
expect(findNearestNonNull(yData, 1)).toBe(2);
|
||||
});
|
||||
|
||||
it('returns the left neighbor when right side is null', () => {
|
||||
const yData = [null, 42, null, null];
|
||||
expect(findNearestNonNull(yData, 2)).toBe(1);
|
||||
});
|
||||
|
||||
it('prefers the right neighbor over the left when both exist at the same distance', () => {
|
||||
const yData = [10, null, 20];
|
||||
// j=1: right (idx 3) is out of bounds (undefined == null), left (idx 1) is null
|
||||
// Actually right (idx 2) exists at j=1
|
||||
expect(findNearestNonNull(yData, 1)).toBe(2);
|
||||
});
|
||||
|
||||
it('returns approxIdx unchanged when no non-null value is found within 100 steps', () => {
|
||||
const yData: (number | null)[] = Array(5).fill(null);
|
||||
expect(findNearestNonNull(yData, 2)).toBe(2);
|
||||
});
|
||||
|
||||
it('handles undefined values the same as null', () => {
|
||||
const yData: (number | null | undefined)[] = [undefined, undefined, 99];
|
||||
expect(findNearestNonNull(yData, 0)).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// findSandwichedIndices
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('findSandwichedIndices', () => {
|
||||
it('returns empty array when no consecutive gaps share a pixel boundary', () => {
|
||||
const gaps = [
|
||||
[0, 10],
|
||||
[20, 30],
|
||||
];
|
||||
const yData = [1, null, null, 2];
|
||||
const u = makeUPlot({ xData: [0, 1, 2, 3], yData });
|
||||
expect(findSandwichedIndices(gaps, yData, u)).toEqual([]);
|
||||
});
|
||||
|
||||
it('returns the index between two gaps that share a pixel boundary', () => {
|
||||
// gaps[0] ends at 10, gaps[1] starts at 10 → sandwiched point at pixel 10
|
||||
const gaps = [
|
||||
[0, 10],
|
||||
[10, 20],
|
||||
];
|
||||
// posToIdx(10) → 2
|
||||
const yData = [null, null, 5, null, null];
|
||||
const u = makeUPlot({ xData: [0, 1, 2, 3, 4], yData, posToIdxFn: () => 2 });
|
||||
expect(findSandwichedIndices(gaps, yData, u)).toEqual([2]);
|
||||
});
|
||||
|
||||
it('scans to nearest non-null when posToIdx lands on a null', () => {
|
||||
// posToIdx returns 2 which is null; nearest non-null is index 3
|
||||
const gaps = [
|
||||
[0, 10],
|
||||
[10, 20],
|
||||
];
|
||||
const yData = [null, null, null, 7, null];
|
||||
const u = makeUPlot({ xData: [0, 1, 2, 3, 4], yData, posToIdxFn: () => 2 });
|
||||
expect(findSandwichedIndices(gaps, yData, u)).toEqual([3]);
|
||||
});
|
||||
|
||||
it('returns multiple indices when several gap pairs share boundaries', () => {
|
||||
// Three consecutive gaps: [0,10], [10,20], [20,30]
|
||||
// → two sandwiched points: between gaps 0-1 at px 10, between gaps 1-2 at px 20
|
||||
const gaps = [
|
||||
[0, 10],
|
||||
[10, 20],
|
||||
[20, 30],
|
||||
];
|
||||
const yData = [null, 1, null, 2, null];
|
||||
const u = makeUPlot({
|
||||
xData: [0, 1, 2, 3, 4],
|
||||
yData,
|
||||
posToIdxFn: (pos) => (pos === 10 ? 1 : 3),
|
||||
});
|
||||
expect(findSandwichedIndices(gaps, yData, u)).toEqual([1, 3]);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// isolatedPointFilter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('isolatedPointFilter', () => {
|
||||
it('returns null when show is true (normal point rendering active)', () => {
|
||||
const u = makeUPlot({ xData: [0, 1], yData: [1, null] });
|
||||
expect(isolatedPointFilter(u, 1, true, [[0, 10]])).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null when gaps is null', () => {
|
||||
const u = makeUPlot({ xData: [0, 1], yData: [1, null] });
|
||||
expect(isolatedPointFilter(u, 1, false, null)).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null when gaps is empty', () => {
|
||||
const u = makeUPlot({ xData: [0, 1], yData: [1, null] });
|
||||
expect(isolatedPointFilter(u, 1, false, [])).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null when series idxs is undefined', () => {
|
||||
const u = ({
|
||||
data: [
|
||||
[0, 1],
|
||||
[1, null],
|
||||
],
|
||||
series: [{}, { idxs: undefined }],
|
||||
valToPos: jest.fn(() => 0),
|
||||
posToIdx: jest.fn(() => 0),
|
||||
} as unknown) as uPlot;
|
||||
expect(isolatedPointFilter(u, 1, false, [[0, 10]])).toBeNull();
|
||||
});
|
||||
|
||||
it('includes firstIdx when the first gap starts at the first data point pixel', () => {
|
||||
// xData[firstIdx=0] → valToPos → 5; gaps[0][0] === 5 → isolated leading point
|
||||
const xData = [0, 1, 2, 3, 4];
|
||||
const yData = [10, null, null, null, 20];
|
||||
const u = makeUPlot({
|
||||
xData,
|
||||
yData,
|
||||
idxs: [0, 4],
|
||||
valToPosFn: (val) => (val === 0 ? 5 : 40), // firstPos=5, lastPos=40
|
||||
});
|
||||
// gaps[0][0] === 5 (firstPos), gaps last end !== 40
|
||||
const result = isolatedPointFilter(u, 1, false, [
|
||||
[5, 15],
|
||||
[20, 30],
|
||||
]);
|
||||
expect(result).toContain(0); // firstIdx
|
||||
});
|
||||
|
||||
it('includes lastIdx when the last gap ends at the last data point pixel', () => {
|
||||
const xData = [0, 1, 2, 3, 4];
|
||||
const yData = [10, null, null, null, 20];
|
||||
const u = makeUPlot({
|
||||
xData,
|
||||
yData,
|
||||
idxs: [0, 4],
|
||||
valToPosFn: (val) => (val === 0 ? 5 : 40), // firstPos=5, lastPos=40
|
||||
});
|
||||
// gaps last end === 40 (lastPos), gaps[0][0] !== 5
|
||||
const result = isolatedPointFilter(u, 1, false, [
|
||||
[10, 20],
|
||||
[30, 40],
|
||||
]);
|
||||
expect(result).toContain(4); // lastIdx
|
||||
});
|
||||
|
||||
it('includes sandwiched index between two gaps sharing a pixel boundary', () => {
|
||||
const xData = [0, 1, 2, 3, 4];
|
||||
const yData = [null, null, 5, null, null];
|
||||
const u = makeUPlot({
|
||||
xData,
|
||||
yData,
|
||||
idxs: [0, 4],
|
||||
valToPosFn: () => 99, // firstPos/lastPos won't match gap boundaries
|
||||
posToIdxFn: () => 2,
|
||||
});
|
||||
const result = isolatedPointFilter(u, 1, false, [
|
||||
[0, 50],
|
||||
[50, 100],
|
||||
]);
|
||||
expect(result).toContain(2);
|
||||
});
|
||||
|
||||
it('returns null when no isolated points are found', () => {
|
||||
const xData = [0, 1, 2];
|
||||
const yData = [1, 2, 3];
|
||||
const u = makeUPlot({
|
||||
xData,
|
||||
yData,
|
||||
idxs: [0, 2],
|
||||
// firstPos = 10, lastPos = 30 — neither matches any gap boundary
|
||||
valToPosFn: (val) => (val === 0 ? 10 : 30),
|
||||
});
|
||||
// gaps don't share boundaries and don't touch firstPos/lastPos
|
||||
const result = isolatedPointFilter(u, 1, false, [
|
||||
[0, 5],
|
||||
[15, 20],
|
||||
]);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('returns all three kinds of isolated points in one pass', () => {
|
||||
// Leading (firstPos=0 === gaps[0][0]), sandwiched (gaps[1] and gaps[2] share 50),
|
||||
// trailing (lastPos=100 === gaps last end)
|
||||
const xData = [0, 1, 2, 3, 4];
|
||||
const yData = [1, null, 2, null, 3];
|
||||
const u = makeUPlot({
|
||||
xData,
|
||||
yData,
|
||||
idxs: [0, 4],
|
||||
valToPosFn: (val) => (val === 0 ? 0 : 100),
|
||||
posToIdxFn: () => 2, // sandwiched point at idx 2
|
||||
});
|
||||
const gaps = [
|
||||
[0, 20],
|
||||
[40, 50],
|
||||
[50, 80],
|
||||
[90, 100],
|
||||
];
|
||||
const result = isolatedPointFilter(u, 1, false, gaps);
|
||||
expect(result).toContain(0); // leading
|
||||
expect(result).toContain(2); // sandwiched
|
||||
expect(result).toContain(4); // trailing
|
||||
});
|
||||
});
|
||||
@@ -51,3 +51,163 @@ export function normalizePlotValue(
|
||||
// Already a valid number
|
||||
return value as number;
|
||||
}
|
||||
|
||||
export interface SeriesSpanGapsOption {
|
||||
spanGaps?: boolean | number;
|
||||
}
|
||||
|
||||
// Internal type alias: a series value array that may contain nulls/undefineds.
|
||||
// uPlot uses null to draw a visible gap and undefined to represent "no sample"
|
||||
// (the line continues across undefined points but breaks at null ones).
|
||||
type SeriesArray = Array<number | null | undefined>;
|
||||
|
||||
/**
|
||||
* Returns true if the given gap size exceeds the numeric spanGaps threshold
|
||||
* of at least one series. Used to decide whether to insert a null break point.
|
||||
*/
|
||||
function gapExceedsThreshold(
|
||||
gapSize: number,
|
||||
seriesOptions: SeriesSpanGapsOption[],
|
||||
): boolean {
|
||||
return seriesOptions.some(
|
||||
({ spanGaps }) =>
|
||||
typeof spanGaps === 'number' && spanGaps > 0 && gapSize > spanGaps,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* For each series with a numeric spanGaps threshold, insert a null data point
|
||||
* between consecutive x timestamps whose gap exceeds the threshold.
|
||||
*
|
||||
* Why: uPlot draws a continuous line between all non-null points. When the
|
||||
* time gap between two consecutive samples is larger than the configured
|
||||
* spanGaps value, we inject a synthetic null at the midpoint so uPlot renders
|
||||
* a visible break instead of a misleading straight line across the gap.
|
||||
*
|
||||
* Because uPlot's AlignedData shares a single x-axis across all series, a null
|
||||
* is inserted for every series at each position where any series needs a break.
|
||||
*
|
||||
* Two-pass approach for performance:
|
||||
* Pass 1 — count how many nulls will be inserted (no allocations).
|
||||
* Pass 2 — fill pre-allocated output arrays by index (no push/reallocation).
|
||||
*/
|
||||
export function insertLargeGapNullsIntoAlignedData(
|
||||
data: uPlot.AlignedData,
|
||||
seriesOptions: SeriesSpanGapsOption[],
|
||||
): uPlot.AlignedData {
|
||||
const [xValues, ...seriesValues] = data;
|
||||
|
||||
if (
|
||||
!Array.isArray(xValues) ||
|
||||
xValues.length < 2 ||
|
||||
seriesValues.length === 0
|
||||
) {
|
||||
return data;
|
||||
}
|
||||
|
||||
const xs = xValues as number[];
|
||||
const n = xs.length;
|
||||
|
||||
// Pass 1: count insertions needed so we know the exact output length.
|
||||
// This lets us pre-allocate arrays rather than growing them dynamically.
|
||||
let insertionCount = 0;
|
||||
for (let i = 0; i < n - 1; i += 1) {
|
||||
if (gapExceedsThreshold(xs[i + 1] - xs[i], seriesOptions)) {
|
||||
insertionCount += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// No gaps exceed any threshold — return the original data unchanged.
|
||||
if (insertionCount === 0) {
|
||||
return data;
|
||||
}
|
||||
|
||||
// Pass 2: build output arrays of exact size and fill them.
|
||||
// `out` is the write cursor into the output arrays.
|
||||
const outputLen = n + insertionCount;
|
||||
const newX = new Array<number>(outputLen);
|
||||
const newSeries: SeriesArray[] = seriesValues.map(
|
||||
() => new Array<number | null | undefined>(outputLen),
|
||||
);
|
||||
|
||||
let out = 0;
|
||||
for (let i = 0; i < n; i += 1) {
|
||||
// Copy the real data point at position i
|
||||
newX[out] = xs[i];
|
||||
for (let s = 0; s < seriesValues.length; s += 1) {
|
||||
newSeries[s][out] = (seriesValues[s] as SeriesArray)[i];
|
||||
}
|
||||
out += 1;
|
||||
|
||||
// If the gap to the next x timestamp exceeds the threshold, insert a
|
||||
// synthetic null at the midpoint. The midpoint x is placed halfway
|
||||
// between xs[i] and xs[i+1] (minimum 1 unit past xs[i] to stay unique).
|
||||
if (i < n - 1 && gapExceedsThreshold(xs[i + 1] - xs[i], seriesOptions)) {
|
||||
newX[out] = xs[i] + Math.max(1, Math.floor((xs[i + 1] - xs[i]) / 2));
|
||||
for (let s = 0; s < seriesValues.length; s += 1) {
|
||||
newSeries[s][out] = null; // null tells uPlot to break the line here
|
||||
}
|
||||
out += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return [newX, ...newSeries] as uPlot.AlignedData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply per-series spanGaps (boolean | number) handling to an aligned dataset.
|
||||
*
|
||||
* spanGaps controls how uPlot handles gaps in a series:
|
||||
* - boolean true → convert null → undefined so uPlot spans over every gap
|
||||
* (draws a continuous line, skipping missing samples)
|
||||
* - boolean false → no change; nulls render as visible breaks (default)
|
||||
* - number → insert a null break point between any two consecutive
|
||||
* timestamps whose difference exceeds the threshold;
|
||||
* gaps smaller than the threshold are left as-is
|
||||
*
|
||||
* The input data is expected to be of the form:
|
||||
* [xValues, series1Values, series2Values, ...]
|
||||
*/
|
||||
export function applySpanGapsToAlignedData(
|
||||
data: uPlot.AlignedData,
|
||||
seriesOptions: SeriesSpanGapsOption[],
|
||||
): uPlot.AlignedData {
|
||||
const [xValues, ...seriesValues] = data;
|
||||
|
||||
if (!Array.isArray(xValues) || seriesValues.length === 0) {
|
||||
return data;
|
||||
}
|
||||
|
||||
// Numeric spanGaps: operates on the whole dataset at once because inserting
|
||||
// null break points requires modifying the shared x-axis.
|
||||
const hasNumericSpanGaps = seriesOptions.some(
|
||||
({ spanGaps }) => typeof spanGaps === 'number',
|
||||
);
|
||||
const gapProcessed = hasNumericSpanGaps
|
||||
? insertLargeGapNullsIntoAlignedData(data, seriesOptions)
|
||||
: data;
|
||||
|
||||
// Boolean spanGaps === true: convert null → undefined per series so uPlot
|
||||
// draws a continuous line across missing samples instead of breaking it.
|
||||
// Skip this pass entirely if no series uses spanGaps: true.
|
||||
const hasBooleanTrue = seriesOptions.some(({ spanGaps }) => spanGaps === true);
|
||||
if (!hasBooleanTrue) {
|
||||
return gapProcessed;
|
||||
}
|
||||
|
||||
const [newX, ...newSeries] = gapProcessed;
|
||||
const transformedSeries = newSeries.map((ys, idx) => {
|
||||
const { spanGaps } = seriesOptions[idx] ?? {};
|
||||
if (spanGaps !== true) {
|
||||
// This series doesn't use spanGaps: true — leave it unchanged.
|
||||
return ys;
|
||||
}
|
||||
// Replace null with undefined: uPlot skips undefined points without
|
||||
// breaking the line, effectively spanning over the gap.
|
||||
return (ys as SeriesArray).map((v) =>
|
||||
v === null ? undefined : v,
|
||||
) as uPlot.AlignedData[0];
|
||||
});
|
||||
|
||||
return [newX, ...transformedSeries] as uPlot.AlignedData;
|
||||
}
|
||||
|
||||
90
frontend/src/lib/uPlotV2/utils/seriesPointsFilter.ts
Normal file
90
frontend/src/lib/uPlotV2/utils/seriesPointsFilter.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
import uPlot from 'uplot';
|
||||
|
||||
/**
|
||||
* Scans outward from approxIdx to find the nearest non-null data index.
|
||||
* posToIdx can land on a null when pixel density exceeds 1 point-per-pixel.
|
||||
*/
|
||||
export function findNearestNonNull(
|
||||
yData: (number | null | undefined)[],
|
||||
approxIdx: number,
|
||||
): number {
|
||||
for (let j = 1; j < 100; j++) {
|
||||
if (yData[approxIdx + j] != null) {
|
||||
return approxIdx + j;
|
||||
}
|
||||
if (yData[approxIdx - j] != null) {
|
||||
return approxIdx - j;
|
||||
}
|
||||
}
|
||||
return approxIdx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns data indices of points sandwiched between two consecutive gaps that
|
||||
* share a pixel boundary — meaning a point (or cluster) is isolated between them.
|
||||
*/
|
||||
export function findSandwichedIndices(
|
||||
gaps: number[][],
|
||||
yData: (number | null | undefined)[],
|
||||
u: uPlot,
|
||||
): number[] {
|
||||
const indices: number[] = [];
|
||||
for (let i = 0; i < gaps.length; i++) {
|
||||
const nextGap = gaps[i + 1];
|
||||
if (nextGap && gaps[i][1] === nextGap[0]) {
|
||||
const approxIdx = u.posToIdx(gaps[i][1], true);
|
||||
indices.push(
|
||||
yData[approxIdx] == null ? findNearestNonNull(yData, approxIdx) : approxIdx,
|
||||
);
|
||||
}
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Points filter that shows data points isolated by gap-nulls (no connecting line).
|
||||
* Used when spanGaps threshold mode injects nulls around gaps — without this,
|
||||
* lone points become invisible because no line connects to them.
|
||||
*
|
||||
* Uses uPlot's gap pixel array rather than checking raw null neighbors in the
|
||||
* data array. Returns an array of data indices (not a bitmask); null = no points.
|
||||
*
|
||||
*/
|
||||
// eslint-disable-next-line max-params
|
||||
export function isolatedPointFilter(
|
||||
u: uPlot,
|
||||
seriesIdx: number,
|
||||
show: boolean,
|
||||
gaps?: null | number[][],
|
||||
): number[] | null {
|
||||
if (show || !gaps || gaps.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const idxs = u.series[seriesIdx].idxs;
|
||||
if (!idxs) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const [firstIdx, lastIdx] = idxs;
|
||||
const xData = u.data[0] as number[];
|
||||
const yData = u.data[seriesIdx] as (number | null | undefined)[];
|
||||
|
||||
// valToPos with canvas=true matches the pixel space used by the gaps array.
|
||||
const firstPos = Math.round(u.valToPos(xData[firstIdx], 'x', true));
|
||||
const lastPos = Math.round(u.valToPos(xData[lastIdx], 'x', true));
|
||||
|
||||
const filtered: number[] = [];
|
||||
|
||||
if (gaps[0][0] === firstPos) {
|
||||
filtered.push(firstIdx);
|
||||
}
|
||||
|
||||
filtered.push(...findSandwichedIndices(gaps, yData, u));
|
||||
|
||||
if (gaps[gaps.length - 1][1] === lastPos) {
|
||||
filtered.push(lastIdx);
|
||||
}
|
||||
|
||||
return filtered.length ? filtered : null;
|
||||
}
|
||||
@@ -141,6 +141,7 @@ export interface IBaseWidget {
|
||||
showPoints?: boolean;
|
||||
lineStyle?: LineStyle;
|
||||
fillMode?: FillMode;
|
||||
spanGaps?: boolean | number;
|
||||
}
|
||||
export interface Widgets extends IBaseWidget {
|
||||
query: Query;
|
||||
|
||||
@@ -4506,6 +4506,19 @@
|
||||
"@radix-ui/react-use-callback-ref" "1.1.1"
|
||||
"@radix-ui/react-use-escape-keydown" "1.1.1"
|
||||
|
||||
"@radix-ui/react-dropdown-menu@^2.1.16":
|
||||
version "2.1.16"
|
||||
resolved "https://registry.yarnpkg.com/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.16.tgz#5ee045c62bad8122347981c479d92b1ff24c7254"
|
||||
integrity sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==
|
||||
dependencies:
|
||||
"@radix-ui/primitive" "1.1.3"
|
||||
"@radix-ui/react-compose-refs" "1.1.2"
|
||||
"@radix-ui/react-context" "1.1.2"
|
||||
"@radix-ui/react-id" "1.1.1"
|
||||
"@radix-ui/react-menu" "2.1.16"
|
||||
"@radix-ui/react-primitive" "2.1.3"
|
||||
"@radix-ui/react-use-controllable-state" "1.2.2"
|
||||
|
||||
"@radix-ui/react-focus-guards@1.0.0":
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.0.tgz#339c1c69c41628c1a5e655f15f7020bf11aa01fa"
|
||||
@@ -4565,6 +4578,30 @@
|
||||
dependencies:
|
||||
"@radix-ui/react-use-layout-effect" "1.1.1"
|
||||
|
||||
"@radix-ui/react-menu@2.1.16":
|
||||
version "2.1.16"
|
||||
resolved "https://registry.yarnpkg.com/@radix-ui/react-menu/-/react-menu-2.1.16.tgz#528a5a973c3a7413d3d49eb9ccd229aa52402911"
|
||||
integrity sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==
|
||||
dependencies:
|
||||
"@radix-ui/primitive" "1.1.3"
|
||||
"@radix-ui/react-collection" "1.1.7"
|
||||
"@radix-ui/react-compose-refs" "1.1.2"
|
||||
"@radix-ui/react-context" "1.1.2"
|
||||
"@radix-ui/react-direction" "1.1.1"
|
||||
"@radix-ui/react-dismissable-layer" "1.1.11"
|
||||
"@radix-ui/react-focus-guards" "1.1.3"
|
||||
"@radix-ui/react-focus-scope" "1.1.7"
|
||||
"@radix-ui/react-id" "1.1.1"
|
||||
"@radix-ui/react-popper" "1.2.8"
|
||||
"@radix-ui/react-portal" "1.1.9"
|
||||
"@radix-ui/react-presence" "1.1.5"
|
||||
"@radix-ui/react-primitive" "2.1.3"
|
||||
"@radix-ui/react-roving-focus" "1.1.11"
|
||||
"@radix-ui/react-slot" "1.2.3"
|
||||
"@radix-ui/react-use-callback-ref" "1.1.1"
|
||||
aria-hidden "^1.2.4"
|
||||
react-remove-scroll "^2.6.3"
|
||||
|
||||
"@radix-ui/react-popover@^1.1.15", "@radix-ui/react-popover@^1.1.2":
|
||||
version "1.1.15"
|
||||
resolved "https://registry.yarnpkg.com/@radix-ui/react-popover/-/react-popover-1.1.15.tgz#9c852f93990a687ebdc949b2c3de1f37cdc4c5d5"
|
||||
@@ -4804,6 +4841,20 @@
|
||||
"@radix-ui/react-roving-focus" "1.0.4"
|
||||
"@radix-ui/react-use-controllable-state" "1.0.1"
|
||||
|
||||
"@radix-ui/react-tabs@^1.1.3":
|
||||
version "1.1.13"
|
||||
resolved "https://registry.yarnpkg.com/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz#3537ce379d7e7ff4eeb6b67a0973e139c2ac1f15"
|
||||
integrity sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==
|
||||
dependencies:
|
||||
"@radix-ui/primitive" "1.1.3"
|
||||
"@radix-ui/react-context" "1.1.2"
|
||||
"@radix-ui/react-direction" "1.1.1"
|
||||
"@radix-ui/react-id" "1.1.1"
|
||||
"@radix-ui/react-presence" "1.1.5"
|
||||
"@radix-ui/react-primitive" "2.1.3"
|
||||
"@radix-ui/react-roving-focus" "1.1.11"
|
||||
"@radix-ui/react-use-controllable-state" "1.2.2"
|
||||
|
||||
"@radix-ui/react-toggle-group@^1.1.7":
|
||||
version "1.1.11"
|
||||
resolved "https://registry.yarnpkg.com/@radix-ui/react-toggle-group/-/react-toggle-group-1.1.11.tgz#e513d6ffdb07509b400ab5b26f2523747c0d51c1"
|
||||
@@ -5675,6 +5726,42 @@
|
||||
tailwind-merge "^2.5.2"
|
||||
tailwindcss-animate "^1.0.7"
|
||||
|
||||
"@signozhq/ui@0.0.4":
|
||||
version "0.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@signozhq/ui/-/ui-0.0.4.tgz#2a9c403900311298d881ca9feb6245d94ca0aa0e"
|
||||
integrity sha512-ViiLsAciCzUgHCu3uDCOEMmjE6OkLpA2g8xvjjbbzi4XFosYBhEASx1Pf3a4f5wlh/JID7z12DOc5YnjzEcn4Q==
|
||||
dependencies:
|
||||
"@radix-ui/react-checkbox" "^1.2.3"
|
||||
"@radix-ui/react-dialog" "^1.1.11"
|
||||
"@radix-ui/react-dropdown-menu" "^2.1.16"
|
||||
"@radix-ui/react-icons" "^1.3.0"
|
||||
"@radix-ui/react-popover" "^1.1.15"
|
||||
"@radix-ui/react-radio-group" "^1.3.4"
|
||||
"@radix-ui/react-slot" "^1.2.3"
|
||||
"@radix-ui/react-switch" "^1.1.4"
|
||||
"@radix-ui/react-tabs" "^1.1.3"
|
||||
"@radix-ui/react-toggle" "^1.1.6"
|
||||
"@radix-ui/react-toggle-group" "^1.1.7"
|
||||
"@radix-ui/react-tooltip" "^1.2.6"
|
||||
"@tanstack/react-table" "^8.21.3"
|
||||
"@tanstack/react-virtual" "^3.13.9"
|
||||
"@types/lodash-es" "^4.17.12"
|
||||
class-variance-authority "^0.7.0"
|
||||
clsx "^2.1.1"
|
||||
cmdk "^1.1.1"
|
||||
date-fns "^4.1.0"
|
||||
dayjs "^1.11.10"
|
||||
lodash-es "^4.17.21"
|
||||
lucide-react "^0.445.0"
|
||||
lucide-solid "^0.510.0"
|
||||
motion "^11.11.17"
|
||||
next-themes "^0.4.6"
|
||||
nuqs "^2.8.9"
|
||||
react-day-picker "^9.8.1"
|
||||
react-resizable-panels "^4.7.1"
|
||||
sonner "^2.0.7"
|
||||
tailwind-merge "^3.5.0"
|
||||
|
||||
"@sinclair/typebox@^0.25.16":
|
||||
version "0.25.24"
|
||||
resolved "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz"
|
||||
@@ -9573,6 +9660,11 @@ dayjs@^1.10.7, dayjs@^1.11.1:
|
||||
resolved "https://registry.npmjs.org/dayjs/-/dayjs-1.11.7.tgz"
|
||||
integrity sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ==
|
||||
|
||||
dayjs@^1.11.10:
|
||||
version "1.11.20"
|
||||
resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.20.tgz#88d919fd639dc991415da5f4cb6f1b6650811938"
|
||||
integrity sha512-YbwwqR/uYpeoP4pu043q+LTDLFBLApUP6VxRihdfNTqu4ubqMlGDLd6ErXhEgsyvY0K6nCs7nggYumAN+9uEuQ==
|
||||
|
||||
debounce@^1.2.1:
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/debounce/-/debounce-1.2.1.tgz#38881d8f4166a5c5848020c11827b834bcb3e0a5"
|
||||
@@ -11092,6 +11184,15 @@ fraction.js@^4.3.7:
|
||||
resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-4.3.7.tgz#06ca0085157e42fda7f9e726e79fefc4068840f7"
|
||||
integrity sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==
|
||||
|
||||
framer-motion@^11.18.2:
|
||||
version "11.18.2"
|
||||
resolved "https://registry.yarnpkg.com/framer-motion/-/framer-motion-11.18.2.tgz#0c6bd05677f4cfd3b3bdead4eb5ecdd5ed245718"
|
||||
integrity sha512-5F5Och7wrvtLVElIpclDT0CBzMVg3dL22B64aZwHtsIY8RB4mXICLrkajK4G9R+ieSAGcgrLeae2SeUTg2pr6w==
|
||||
dependencies:
|
||||
motion-dom "^11.18.1"
|
||||
motion-utils "^11.18.1"
|
||||
tslib "^2.4.0"
|
||||
|
||||
framer-motion@^12.4.13:
|
||||
version "12.4.13"
|
||||
resolved "https://registry.yarnpkg.com/framer-motion/-/framer-motion-12.4.13.tgz#1efd954f95e6a54685b660929c00f5a61e35256a"
|
||||
@@ -15002,6 +15103,13 @@ moment@^2.29.4:
|
||||
resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.4.tgz#3dbe052889fe7c1b2ed966fcb3a77328964ef108"
|
||||
integrity sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==
|
||||
|
||||
motion-dom@^11.18.1:
|
||||
version "11.18.1"
|
||||
resolved "https://registry.yarnpkg.com/motion-dom/-/motion-dom-11.18.1.tgz#e7fed7b7dc6ae1223ef1cce29ee54bec826dc3f2"
|
||||
integrity sha512-g76KvA001z+atjfxczdRtw/RXOM3OMSdd1f4DL77qCTF/+avrRJiawSG4yDibEQ215sr9kpinSlX2pCTJ9zbhw==
|
||||
dependencies:
|
||||
motion-utils "^11.18.1"
|
||||
|
||||
motion-dom@^12.4.11:
|
||||
version "12.4.11"
|
||||
resolved "https://registry.yarnpkg.com/motion-dom/-/motion-dom-12.4.11.tgz#0419c8686cda4d523f08249deeb8fa6683a9b9d3"
|
||||
@@ -15009,6 +15117,11 @@ motion-dom@^12.4.11:
|
||||
dependencies:
|
||||
motion-utils "^12.4.10"
|
||||
|
||||
motion-utils@^11.18.1:
|
||||
version "11.18.1"
|
||||
resolved "https://registry.yarnpkg.com/motion-utils/-/motion-utils-11.18.1.tgz#671227669833e991c55813cf337899f41327db5b"
|
||||
integrity sha512-49Kt+HKjtbJKLtgO/LKj9Ld+6vw9BjH5d9sc40R/kVyH8GLAXgT42M2NnuPcJNuA3s9ZfZBUcwIgpmZWGEE+hA==
|
||||
|
||||
motion-utils@^12.4.10:
|
||||
version "12.4.10"
|
||||
resolved "https://registry.yarnpkg.com/motion-utils/-/motion-utils-12.4.10.tgz#3d93acea5454419eaaad8d5e5425cb71cbfa1e7f"
|
||||
@@ -15022,6 +15135,14 @@ motion@12.4.13:
|
||||
framer-motion "^12.4.13"
|
||||
tslib "^2.4.0"
|
||||
|
||||
motion@^11.11.17:
|
||||
version "11.18.2"
|
||||
resolved "https://registry.yarnpkg.com/motion/-/motion-11.18.2.tgz#17fb372f3ed94fc9ee1384a25a9068e9da1951e7"
|
||||
integrity sha512-JLjvFDuFr42NFtcVoMAyC2sEjnpA8xpy6qWPyzQvCloznAyQ8FIXioxWfHiLtgYhoVpfUqSWpn1h9++skj9+Wg==
|
||||
dependencies:
|
||||
framer-motion "^11.18.2"
|
||||
tslib "^2.4.0"
|
||||
|
||||
mri@^1.1.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/mri/-/mri-1.2.0.tgz#6721480fec2a11a4889861115a48b6cbe7cc8f0b"
|
||||
@@ -15292,6 +15413,13 @@ nuqs@2.8.8:
|
||||
dependencies:
|
||||
"@standard-schema/spec" "1.0.0"
|
||||
|
||||
nuqs@^2.8.9:
|
||||
version "2.8.9"
|
||||
resolved "https://registry.yarnpkg.com/nuqs/-/nuqs-2.8.9.tgz#e2c27d87c0dd0e3b4412fe867bcd0947cc4c998f"
|
||||
integrity sha512-8ou6AEwsxMWSYo2qkfZtYFVzngwbKmg4c00HVxC1fF6CEJv3Fwm6eoZmfVPALB+vw8Udo7KL5uy96PFcYe1BIQ==
|
||||
dependencies:
|
||||
"@standard-schema/spec" "1.0.0"
|
||||
|
||||
nwsapi@^2.2.2:
|
||||
version "2.2.23"
|
||||
resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.23.tgz#59712c3a88e6de2bb0b6ccc1070397267019cf6c"
|
||||
@@ -16957,6 +17085,11 @@ react-resizable-panels@^3.0.5:
|
||||
resolved "https://registry.yarnpkg.com/react-resizable-panels/-/react-resizable-panels-3.0.5.tgz#50a20645263eed02344de4a70d1319bbc0014bbd"
|
||||
integrity sha512-3z1yN25DMTXLg2wfyFrW32r5k4WEcUa3F7cJ2EgtNK07lnOs4mpM8yWLGunCpkhcQRwJX4fqoLcIh/pHPxzlmQ==
|
||||
|
||||
react-resizable-panels@^4.7.1:
|
||||
version "4.7.3"
|
||||
resolved "https://registry.yarnpkg.com/react-resizable-panels/-/react-resizable-panels-4.7.3.tgz#4040aa0f5c5c4cc4bb685cb69973601ccda3b014"
|
||||
integrity sha512-PYcYMLtvJD+Pr0TQNeMvddcnLOwUa/Yb4iNwU7ThNLlHaQYEEC9MIBWHaBGODzYuXIkPRZ/OWe5sbzG1Rzq5ew==
|
||||
|
||||
react-resizable@3.0.4:
|
||||
version "3.0.4"
|
||||
resolved "https://registry.npmjs.org/react-resizable/-/react-resizable-3.0.4.tgz"
|
||||
@@ -18797,6 +18930,11 @@ tailwind-merge@^2.5.2:
|
||||
resolved "https://registry.yarnpkg.com/tailwind-merge/-/tailwind-merge-2.6.0.tgz#ac5fb7e227910c038d458f396b7400d93a3142d5"
|
||||
integrity sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA==
|
||||
|
||||
tailwind-merge@^3.5.0:
|
||||
version "3.5.0"
|
||||
resolved "https://registry.yarnpkg.com/tailwind-merge/-/tailwind-merge-3.5.0.tgz#06502f4496ba15151445d97d916a26564d50d1ca"
|
||||
integrity sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==
|
||||
|
||||
tailwindcss-animate@^1.0.7:
|
||||
version "1.0.7"
|
||||
resolved "https://registry.yarnpkg.com/tailwindcss-animate/-/tailwindcss-animate-1.0.7.tgz#318b692c4c42676cc9e67b19b78775742388bef4"
|
||||
|
||||
@@ -27,12 +27,7 @@ type OrgConfig struct {
|
||||
}
|
||||
|
||||
type PasswordConfig struct {
|
||||
Invite InviteConfig `mapstructure:"invite"`
|
||||
Reset ResetConfig `mapstructure:"reset"`
|
||||
}
|
||||
|
||||
type InviteConfig struct {
|
||||
MaxTokenLifetime time.Duration `mapstructure:"max_token_lifetime"`
|
||||
Reset ResetConfig `mapstructure:"reset"`
|
||||
}
|
||||
|
||||
type ResetConfig struct {
|
||||
@@ -51,9 +46,6 @@ func newConfig() factory.Config {
|
||||
AllowSelf: false,
|
||||
MaxTokenLifetime: 6 * time.Hour,
|
||||
},
|
||||
Invite: InviteConfig{
|
||||
MaxTokenLifetime: 48 * time.Hour,
|
||||
},
|
||||
},
|
||||
Root: RootConfig{
|
||||
Enabled: false,
|
||||
@@ -69,10 +61,6 @@ func (c Config) Validate() error {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::reset::max_token_lifetime must be positive")
|
||||
}
|
||||
|
||||
if c.Password.Invite.MaxTokenLifetime <= 0 {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::invite::max_token_lifetime must be positive")
|
||||
}
|
||||
|
||||
if c.Root.Enabled {
|
||||
if c.Root.Email.IsZero() {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::email is required when root user is enabled")
|
||||
|
||||
@@ -203,7 +203,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
|
||||
|
||||
resetLink := userWithToken.ResetPasswordToken.FactorPasswordResetLink(frontendBaseUrl)
|
||||
|
||||
tokenLifetime := m.config.Password.Invite.MaxTokenLifetime
|
||||
tokenLifetime := m.config.Password.Reset.MaxTokenLifetime
|
||||
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
|
||||
|
||||
if err := m.emailing.SendHTML(ctx, userWithToken.User.Email.String(), "You're Invited to Join SigNoz", emailtypes.TemplateNameInvitationEmail, map[string]any{
|
||||
@@ -460,11 +460,7 @@ func (module *Module) GetOrCreateResetPasswordToken(ctx context.Context, userID
|
||||
}
|
||||
|
||||
// create a new token
|
||||
tokenLifetime := module.config.Password.Reset.MaxTokenLifetime
|
||||
if user.Status == types.UserStatusPendingInvite {
|
||||
tokenLifetime = module.config.Password.Invite.MaxTokenLifetime
|
||||
}
|
||||
resetPasswordToken, err := types.NewResetPasswordToken(password.ID, time.Now().Add(tokenLifetime))
|
||||
resetPasswordToken, err := types.NewResetPasswordToken(password.ID, time.Now().Add(module.config.Password.Reset.MaxTokenLifetime))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -504,9 +500,6 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
|
||||
resetLink := token.FactorPasswordResetLink(frontendBaseURL)
|
||||
|
||||
tokenLifetime := module.config.Password.Reset.MaxTokenLifetime
|
||||
if user.Status == types.UserStatusPendingInvite {
|
||||
tokenLifetime = module.config.Password.Invite.MaxTokenLifetime
|
||||
}
|
||||
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
|
||||
|
||||
if err := module.emailing.SendHTML(
|
||||
|
||||
@@ -69,7 +69,6 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
key string // deterministic join of label values
|
||||
}
|
||||
seriesMap := map[sKey]*qbtypes.TimeSeries{}
|
||||
var keyOrder []sKey // preserves ClickHouse row-arrival order
|
||||
|
||||
stepMs := uint64(step.Duration.Milliseconds())
|
||||
|
||||
@@ -220,7 +219,6 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
if !ok {
|
||||
series = &qbtypes.TimeSeries{Labels: lblObjs}
|
||||
seriesMap[key] = series
|
||||
keyOrder = append(keyOrder, key)
|
||||
}
|
||||
series.Values = append(series.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: ts,
|
||||
@@ -252,8 +250,8 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
Alias: "__result_" + strconv.Itoa(i),
|
||||
}
|
||||
}
|
||||
for _, k := range keyOrder {
|
||||
buckets[k.agg].Series = append(buckets[k.agg].Series, seriesMap[k])
|
||||
for k, s := range seriesMap {
|
||||
buckets[k.agg].Series = append(buckets[k.agg].Series, s)
|
||||
}
|
||||
|
||||
var nonEmpty []*qbtypes.AggregationBucket
|
||||
|
||||
@@ -185,6 +185,22 @@ func postProcessMetricQuery(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
config := query.Aggregations[0]
|
||||
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
|
||||
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
|
||||
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
|
||||
|
||||
for idx := range query.Order {
|
||||
if query.Order[idx].Key.Name == spaceAggOrderBy ||
|
||||
query.Order[idx].Key.Name == timeAggOrderBy ||
|
||||
query.Order[idx].Key.Name == timeSpaceAggOrderBy {
|
||||
query.Order[idx].Key.Name = qbtypes.DefaultOrderByKey
|
||||
}
|
||||
}
|
||||
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
|
||||
if len(query.Functions) > 0 {
|
||||
step := query.StepInterval.Duration.Milliseconds()
|
||||
functions := q.prepareFillZeroArgsWithStep(query.Functions, req, step)
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
|
||||
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
@@ -547,16 +546,6 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
) (*qbtypes.Statement, error) {
|
||||
metricType := query.Aggregations[0].Type
|
||||
spaceAgg := query.Aggregations[0].SpaceAggregation
|
||||
finalCTE := "__spatial_aggregation_cte"
|
||||
if metricType == metrictypes.HistogramType {
|
||||
histogramCTE, histogramCTEArgs, err := b.buildHistogramCTE(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cteFragments = append(cteFragments, histogramCTE)
|
||||
cteArgs = append(cteArgs, histogramCTEArgs)
|
||||
finalCTE = "__histogram_cte"
|
||||
}
|
||||
|
||||
combined := querybuilder.CombineCTEs(cteFragments)
|
||||
|
||||
@@ -566,104 +555,60 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
}
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("*")
|
||||
sb.From(finalCTE)
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Where(rewrittenExpr)
|
||||
}
|
||||
|
||||
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
|
||||
hasOrder := len(query.Order) > 0
|
||||
hasLimit := query.Limit > 0
|
||||
hasGroupBy := len(groupByKeys) > 0
|
||||
|
||||
isMetricAggOrderByKey := func(key string, config qbtypes.MetricAggregation) bool {
|
||||
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
|
||||
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
|
||||
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
|
||||
return key == spaceAggOrderBy || key == timeAggOrderBy || key == timeSpaceAggOrderBy
|
||||
}
|
||||
|
||||
if !hasGroupBy {
|
||||
// do nothing, limits and orders don't mean anything
|
||||
} else if hasOrder && hasLimit {
|
||||
labelSelectorSubQueryBuilder := sqlbuilder.NewSelectBuilder()
|
||||
labelSelectorSubQueryBuilder.Select(groupByKeys...)
|
||||
labelSelectorSubQueryBuilder.From(finalCTE)
|
||||
labelSelectorOrderClauses := []string{}
|
||||
orderedKeys := map[string]struct{}{} // this will be used to add the remaining keys as tie breakers in the end
|
||||
for _, o := range query.Order {
|
||||
key := o.Key.Name
|
||||
var clause string
|
||||
if isMetricAggOrderByKey(key, query.Aggregations[0]) {
|
||||
clause = fmt.Sprintf("avg(value) %s", o.Direction.StringValue())
|
||||
} else {
|
||||
clause = fmt.Sprintf("`%s` %s", key, o.Direction.StringValue())
|
||||
orderedKeys[fmt.Sprintf("`%s`", key)] = struct{}{}
|
||||
}
|
||||
labelSelectorOrderClauses = append(labelSelectorOrderClauses, clause)
|
||||
if metricType == metrictypes.HistogramType && spaceAgg.IsPercentile() {
|
||||
quantile := query.Aggregations[0].SpaceAggregation.Percentile()
|
||||
sb.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
for _, gk := range groupByKeys { // keys that haven't been added via order by keys will be added at the end as tie breakers
|
||||
if _, ok := orderedKeys[gk]; !ok {
|
||||
labelSelectorOrderClauses = append(labelSelectorOrderClauses, fmt.Sprintf("%s ASC", gk))
|
||||
}
|
||||
sb.SelectMore(fmt.Sprintf(
|
||||
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
|
||||
quantile,
|
||||
))
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
labelSelectorSubQueryBuilder.GroupBy(groupByKeys...)
|
||||
labelSelectorSubQueryBuilder.OrderBy(labelSelectorOrderClauses...)
|
||||
labelSelectorSubQuery, _ := labelSelectorSubQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
labelSelectorSubQuery = fmt.Sprintf("%s LIMIT %d", labelSelectorSubQuery, query.Limit)
|
||||
} else if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
|
||||
sb.Select("ts")
|
||||
|
||||
sb.Where(fmt.Sprintf("(%s) IN (%s)", strings.Join(groupByKeys, ", "), labelSelectorSubQuery))
|
||||
for _, o := range query.Order {
|
||||
key := o.Key.Name
|
||||
var clause string
|
||||
if isMetricAggOrderByKey(key, query.Aggregations[0]) {
|
||||
clause = fmt.Sprintf("avg(value) OVER (PARTITION BY %s) %s", strings.Join(groupByKeys, ", "), o.Direction.StringValue())
|
||||
} else {
|
||||
clause = fmt.Sprintf("`%s` %s", key, o.Direction.StringValue())
|
||||
}
|
||||
sb.OrderBy(clause)
|
||||
for _, g := range query.GroupBy {
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
} else if hasOrder {
|
||||
// order by without limit: apply order by clauses directly
|
||||
for _, o := range query.Order {
|
||||
key := o.Key.Name
|
||||
if isMetricAggOrderByKey(key, query.Aggregations[0]) {
|
||||
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) %s", strings.Join(groupByKeys, ", "), o.Direction.StringValue()))
|
||||
continue
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", o.Key.Name, o.Direction.StringValue()))
|
||||
}
|
||||
} else if hasLimit {
|
||||
labelSelectorSubQueryBuilder := sqlbuilder.NewSelectBuilder()
|
||||
labelSelectorSubQueryBuilder.Select(groupByKeys...)
|
||||
labelSelectorSubQueryBuilder.From(finalCTE)
|
||||
labelSelectorSubQueryBuilder.GroupBy(groupByKeys...)
|
||||
labelSelectorSubQueryBuilder.OrderBy("avg(value) DESC")
|
||||
labelSelectorSubQuery, _ := labelSelectorSubQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
labelSelectorSubQuery = fmt.Sprintf("%s LIMIT %d", labelSelectorSubQuery, query.Limit)
|
||||
|
||||
sb.Where(fmt.Sprintf("(%s) IN (%s)", strings.Join(groupByKeys, ", "), labelSelectorSubQuery))
|
||||
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) DESC", strings.Join(groupByKeys, ", ")))
|
||||
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.SelectMore(aggQuery)
|
||||
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
} else {
|
||||
// grouping without order by or limit: sort by avg(value) DESC with labels as tiebreakers
|
||||
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) DESC", strings.Join(groupByKeys, ", ")))
|
||||
}
|
||||
|
||||
// add any group-by keys not already in the order-by as tiebreakers
|
||||
orderKeySet := make(map[string]struct{})
|
||||
for _, o := range query.Order {
|
||||
orderKeySet[fmt.Sprintf("`%s`", o.Key.Name)] = struct{}{}
|
||||
}
|
||||
for _, g := range groupByKeys {
|
||||
if _, exists := orderKeySet[g]; !exists {
|
||||
sb.OrderBy(g)
|
||||
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
|
||||
sb.Select("*")
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Where(rewrittenExpr)
|
||||
}
|
||||
}
|
||||
|
||||
sb.OrderBy("ts ASC")
|
||||
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.OrderBy("ts")
|
||||
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
|
||||
sb.OrderBy("toFloat64(le)")
|
||||
}
|
||||
@@ -671,45 +616,3 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildHistogramCTE(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
) (string, []any, error) {
|
||||
spaceAgg := query.Aggregations[0].SpaceAggregation
|
||||
histogramCTEQueryBuilder := sqlbuilder.NewSelectBuilder()
|
||||
if spaceAgg.IsPercentile() {
|
||||
histogramCTEQueryBuilder.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
quantile := spaceAgg.Percentile()
|
||||
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf(
|
||||
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
|
||||
quantile,
|
||||
))
|
||||
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
|
||||
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
histogramCTEQueryBuilder.GroupBy("ts")
|
||||
} else if spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
|
||||
histogramCTEQueryBuilder.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
histogramCTEQueryBuilder.SelectMore(aggQuery)
|
||||
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
|
||||
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
histogramCTEQueryBuilder.GroupBy("ts")
|
||||
} else {
|
||||
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
|
||||
histogramCTEQueryBuilder.Select("*")
|
||||
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
|
||||
}
|
||||
histogramQueryCTE, histogramQueryCTEArgs := histogramCTEQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
histogramCTE := fmt.Sprintf("__histogram_cte AS (%s)", histogramQueryCTE)
|
||||
|
||||
return histogramCTE, histogramQueryCTEArgs, nil
|
||||
}
|
||||
|
||||
@@ -15,17 +15,16 @@ import (
|
||||
)
|
||||
|
||||
func TestStatementBuilder(t *testing.T) {
|
||||
type baseQuery struct {
|
||||
name string
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
orderKey string
|
||||
args []any
|
||||
cte string
|
||||
}
|
||||
|
||||
bases := []baseQuery{
|
||||
cases := []struct {
|
||||
name string
|
||||
requestType qbtypes.RequestType
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
expected qbtypes.Statement
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "cumulative_rate_sum",
|
||||
name: "test_cumulative_rate_sum",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -41,16 +40,24 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "cumulative_rate_sum_with_mat_column",
|
||||
name: "test_cumulative_rate_sum_with_mat_column",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -66,16 +73,24 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "materialized.key.name REGEXP 'cartservice' OR service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "delta_rate_sum",
|
||||
name: "test_delta_rate_sum",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -91,16 +106,24 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
|
||||
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`)",
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "histogram_percentile1",
|
||||
name: "test_histogram_percentile1",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -116,38 +139,24 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
|
||||
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
|
||||
},
|
||||
{
|
||||
name: "histogram_percentile2",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "http_server_duration_bucket",
|
||||
Type: metrictypes.HistogramType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
orderKey: "service.name",
|
||||
args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "gauge_avg_sum",
|
||||
name: "test_gauge_avg_sum",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -163,81 +172,51 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "host.name = 'big-data-node-1'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "host.name"}},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "host.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
orderKey: "host.name",
|
||||
args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`)",
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
|
||||
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "test_histogram_percentile2",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "http_server_duration_bucket",
|
||||
Type: metrictypes.HistogramType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
|
||||
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
type variant struct {
|
||||
name string
|
||||
limit int
|
||||
hasOrder bool
|
||||
}
|
||||
|
||||
variants := []variant{
|
||||
{"with_limits", 10, false},
|
||||
{"without_limits", 0, false},
|
||||
{"with_order_by", 0, true},
|
||||
{"with_order_by_and_limits", 10, true},
|
||||
}
|
||||
|
||||
sumMetricsFinalSelects := map[string]string{
|
||||
"with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
|
||||
"with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
|
||||
}
|
||||
|
||||
histogramMetricsFinalSelects := map[string]string{
|
||||
"with_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"without_limits": " SELECT * FROM __histogram_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"with_order_by": " SELECT * FROM __histogram_cte ORDER BY `service.name` asc, ts ASC",
|
||||
"with_order_by_and_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
|
||||
}
|
||||
|
||||
// expectedFinalSelects maps "base/variant" to the final SELECT portion after the CTE.
|
||||
// The full expected query is: base.cte + expectedFinalSelects[name]
|
||||
expectedFinalSelects := map[string]string{
|
||||
// cumulative_rate_sum
|
||||
"cumulative_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
|
||||
"cumulative_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
|
||||
"cumulative_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
|
||||
"cumulative_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// cumulative_rate_sum_with_mat_column
|
||||
"cumulative_rate_sum_with_mat_column/with_limits": sumMetricsFinalSelects["with_limits"],
|
||||
"cumulative_rate_sum_with_mat_column/without_limits": sumMetricsFinalSelects["without_limits"],
|
||||
"cumulative_rate_sum_with_mat_column/with_order_by": sumMetricsFinalSelects["with_order_by"],
|
||||
"cumulative_rate_sum_with_mat_column/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// delta_rate_sum
|
||||
"delta_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
|
||||
"delta_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
|
||||
"delta_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
|
||||
"delta_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// histogram_percentile1
|
||||
"histogram_percentile1/with_limits": histogramMetricsFinalSelects["with_limits"],
|
||||
"histogram_percentile1/without_limits": histogramMetricsFinalSelects["without_limits"],
|
||||
"histogram_percentile1/with_order_by": histogramMetricsFinalSelects["with_order_by"],
|
||||
"histogram_percentile1/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// histogram_percentile2
|
||||
"histogram_percentile2/with_limits": histogramMetricsFinalSelects["with_limits"],
|
||||
"histogram_percentile2/without_limits": histogramMetricsFinalSelects["without_limits"],
|
||||
"histogram_percentile2/with_order_by": histogramMetricsFinalSelects["with_order_by"],
|
||||
"histogram_percentile2/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// gauge_avg_sum
|
||||
"gauge_avg_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
|
||||
"gauge_avg_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
|
||||
"gauge_avg_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name` asc, ts ASC",
|
||||
"gauge_avg_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY `host.name` asc LIMIT 10) ORDER BY `host.name` asc, ts ASC",
|
||||
}
|
||||
|
||||
fm := NewFieldMapper()
|
||||
@@ -248,13 +227,15 @@ func TestStatementBuilder(t *testing.T) {
|
||||
t.Fatalf("failed to load field keys: %v", err)
|
||||
}
|
||||
mockMetadataStore.KeysMap = keys
|
||||
// NOTE: LoadFieldKeysFromJSON doesn't set Materialized field
|
||||
// for keys, so we have to set it manually here for testing
|
||||
if _, ok := mockMetadataStore.KeysMap["materialized.key.name"]; ok {
|
||||
if len(mockMetadataStore.KeysMap["materialized.key.name"]) > 0 {
|
||||
mockMetadataStore.KeysMap["materialized.key.name"][0].Materialized = true
|
||||
}
|
||||
}
|
||||
|
||||
fl, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create flagger: %v", err)
|
||||
}
|
||||
@@ -264,30 +245,23 @@ func TestStatementBuilder(t *testing.T) {
|
||||
mockMetadataStore,
|
||||
fm,
|
||||
cb,
|
||||
fl,
|
||||
flagger,
|
||||
)
|
||||
|
||||
for _, b := range bases {
|
||||
for _, v := range variants {
|
||||
name := b.name + "/" + v.name
|
||||
t.Run(name, func(t *testing.T) {
|
||||
q := b.query
|
||||
q.Limit = v.limit
|
||||
if v.hasOrder {
|
||||
q.Order = []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: b.orderKey}},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
|
||||
result, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, qbtypes.RequestTypeTimeSeries, q, nil)
|
||||
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
|
||||
|
||||
if c.expectedErr != nil {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.expectedErr.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b.cte+expectedFinalSelects[name], result.Query)
|
||||
require.Equal(t, b.args, result.Args)
|
||||
})
|
||||
}
|
||||
require.Equal(t, c.expected.Query, q.Query)
|
||||
require.Equal(t, c.expected.Args, q.Args)
|
||||
require.Equal(t, c.expected.Warnings, q.Warnings)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,8 +58,6 @@ def build_builder_query(
|
||||
step_interval: int = DEFAULT_STEP_INTERVAL,
|
||||
group_by: Optional[List[str]] = None,
|
||||
filter_expression: Optional[str] = None,
|
||||
order_by: Optional[List[Dict]] = None,
|
||||
limit: Optional[int] = None,
|
||||
functions: Optional[List[Dict]] = None,
|
||||
disabled: bool = False,
|
||||
) -> Dict:
|
||||
@@ -95,12 +93,6 @@ def build_builder_query(
|
||||
if filter_expression:
|
||||
spec["filter"] = {"expression": filter_expression}
|
||||
|
||||
if order_by:
|
||||
spec["order"] = order_by
|
||||
|
||||
if limit is not None:
|
||||
spec["limit"] = limit
|
||||
|
||||
if functions:
|
||||
spec["functions"] = functions
|
||||
|
||||
|
||||
@@ -2,20 +2,16 @@
|
||||
Look at the cumulative_counters_1h.jsonl file for the relevant data
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
from typing import Any, Callable, List
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
@@ -75,200 +71,16 @@ def test_rate_with_steady_values_and_reset(
|
||||
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
|
||||
|
||||
|
||||
def _assert_endpoint_rate_values(endpoint_values: dict) -> None:
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
# rate = 10/60 = 0.167
|
||||
if "/health" in endpoint_values:
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
|
||||
if "/products" in endpoint_values:
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
# most values should be 0.333, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
# check that non-0.333 values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < 0.333
|
||||
), f"Gap boundary values should be between 0 and 0.333, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
|
||||
if "/checkout" in endpoint_values:
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# rate = 5/60 = 0.0833
|
||||
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
|
||||
if "/orders" in endpoint_values:
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
|
||||
if "/users" in endpoint_values:
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None, # this is equivalent to sum(metric_name)
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None, # this is equivalent to sum(metric_name)
|
||||
3,
|
||||
3,
|
||||
["/products", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
["/checkout", "/health", "/orders", "/products", "/users"],
|
||||
),
|
||||
(
|
||||
"asc_lim3",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/users", "/products", "/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim3",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/users", "/products", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("sum(test_rate_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
5,
|
||||
["/users", "/orders", "/checkout", "/health", "/products"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim3",
|
||||
[build_order_by("sum(test_rate_groupby_asc_metric_name_lim3)", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/users", "/orders", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("sum(test_rate_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim3",
|
||||
[build_order_by("sum(test_rate_groupby_desc_metric_name_lim3)", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/products", "/health", "/checkout"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_rate_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
metric_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_rate_groupby_{metric_suffix}"
|
||||
metric_name = "test_rate_groupby"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
CUMULATIVE_COUNTERS_FILE,
|
||||
@@ -285,8 +97,6 @@ def test_rate_group_by_endpoint(
|
||||
"sum",
|
||||
temporality="cumulative",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
@@ -295,23 +105,10 @@ def test_rate_group_by_endpoint(
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
|
||||
# Should have 5 different endpoints
|
||||
assert (
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
endpoint_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(endpoint_labels) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
|
||||
else:
|
||||
assert endpoint_labels == expected_endpoints, (
|
||||
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
|
||||
)
|
||||
len(all_series) == 5
|
||||
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
|
||||
|
||||
# endpoint -> values
|
||||
endpoint_values = {}
|
||||
@@ -320,6 +117,11 @@ def test_rate_group_by_endpoint(
|
||||
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
|
||||
endpoint_values[endpoint] = values
|
||||
|
||||
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
|
||||
assert (
|
||||
set(endpoint_values.keys()) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
|
||||
|
||||
# at no point rate should be negative
|
||||
for endpoint, values in endpoint_values.items():
|
||||
for v in values:
|
||||
@@ -327,4 +129,103 @@ def test_rate_group_by_endpoint(
|
||||
v["value"] >= 0
|
||||
), f"Rate for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
_assert_endpoint_rate_values(endpoint_values)
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
# rate = 10/60 = 0.167
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
|
||||
# most values should be 0.333, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
|
||||
# check that non-0.333 values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < 0.333
|
||||
), f"Gap boundary values should be between 0 and 0.333, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
# consecutiveness
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# rate = 5/60 = 0.0833
|
||||
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
|
||||
@@ -5,7 +5,7 @@ Look at the multi_temporality_counters_1h.jsonl file for the relevant data
|
||||
import random
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List, Optional, Union
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -14,7 +14,6 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
@@ -92,198 +91,6 @@ def test_with_steady_values_and_reset(
|
||||
), f"{time_aggregation} should not be negative: {v['value']}"
|
||||
|
||||
|
||||
def _assert_endpoint_group_values( # pylint: disable=too-many-arguments
|
||||
endpoint_values: dict,
|
||||
stable_health_value: float,
|
||||
stable_products_value: float,
|
||||
stable_checkout_value: float,
|
||||
spike_checkout_value: float,
|
||||
stable_orders_value: float,
|
||||
spike_users_value: float,
|
||||
time_aggregation: str,
|
||||
) -> None:
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
if "/health" in endpoint_values:
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(
|
||||
1 for v in health_values if v["value"] == stable_health_value
|
||||
)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
|
||||
# all /health rates should be stable except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert (
|
||||
v["value"] == stable_health_value
|
||||
), f"Expected /health rate {stable_health_value}, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
if "/products" in endpoint_values:
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(
|
||||
1 for v in products_values if v["value"] == stable_products_value
|
||||
)
|
||||
# most values should be stable, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
|
||||
# check that non-stable values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [
|
||||
v["value"] for v in products_values if v["value"] != stable_products_value
|
||||
]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < stable_products_value
|
||||
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
if "/checkout" in endpoint_values:
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == stable_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == spike_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
|
||||
if "/orders" in endpoint_values:
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(
|
||||
1 for v in orders_values if v["value"] == stable_orders_value
|
||||
)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [
|
||||
v["value"] for v in orders_values if v["value"] != stable_orders_value
|
||||
]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
if "/users" in endpoint_values:
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be stable increment rate
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by_spec,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
("endpoint", "asc"),
|
||||
None,
|
||||
5,
|
||||
["/checkout", "/health", "/orders", "/products", "/users"],
|
||||
),
|
||||
(
|
||||
"asc_lim3",
|
||||
("endpoint", "asc"),
|
||||
3,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
("endpoint", "desc"),
|
||||
None,
|
||||
5,
|
||||
["/users", "/products", "/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim3",
|
||||
("endpoint", "desc"),
|
||||
3,
|
||||
3,
|
||||
["/users", "/products", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
("sum_metric", "asc"),
|
||||
None,
|
||||
5,
|
||||
["/users", "/orders", "/checkout", "/health", "/products"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim3",
|
||||
("sum_metric", "asc"),
|
||||
3,
|
||||
3,
|
||||
["/users", "/orders", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
("sum_metric", "desc"),
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim3",
|
||||
("sum_metric", "desc"),
|
||||
3,
|
||||
3,
|
||||
["/products", "/health", "/checkout"],
|
||||
),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"time_aggregation, stable_health_value, stable_products_value, stable_checkout_value, spike_checkout_value, stable_orders_value, spike_users_value",
|
||||
[
|
||||
@@ -303,24 +110,11 @@ def test_group_by_endpoint(
|
||||
spike_checkout_value: float,
|
||||
stable_orders_value: float,
|
||||
spike_users_value: float,
|
||||
order_suffix: str,
|
||||
order_by_spec: Optional[tuple],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_{time_aggregation}_groupby_{order_suffix}"
|
||||
|
||||
# Build order_by at runtime so metric name reflects actual time_aggregation
|
||||
order_by = None
|
||||
if order_by_spec is not None:
|
||||
key, direction = order_by_spec
|
||||
if key == "sum_metric":
|
||||
key = f"sum({metric_name})"
|
||||
order_by = [build_order_by(key, direction)]
|
||||
metric_name = f"test_{time_aggregation}_groupby"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
MULTI_TEMPORALITY_FILE,
|
||||
@@ -336,8 +130,6 @@ def test_group_by_endpoint(
|
||||
time_aggregation,
|
||||
"sum",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
@@ -345,23 +137,10 @@ def test_group_by_endpoint(
|
||||
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
# Should have 5 different endpoints
|
||||
assert (
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
endpoint_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(endpoint_labels) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
|
||||
else:
|
||||
assert endpoint_labels == expected_endpoints, (
|
||||
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
|
||||
)
|
||||
len(all_series) == 5
|
||||
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
|
||||
|
||||
# endpoint -> values
|
||||
endpoint_values = {}
|
||||
@@ -370,6 +149,11 @@ def test_group_by_endpoint(
|
||||
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
|
||||
endpoint_values[endpoint] = values
|
||||
|
||||
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
|
||||
assert (
|
||||
set(endpoint_values.keys()) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
|
||||
|
||||
# at no point rate should be negative
|
||||
for endpoint, values in endpoint_values.items():
|
||||
for v in values:
|
||||
@@ -377,16 +161,117 @@ def test_group_by_endpoint(
|
||||
v["value"] >= 0
|
||||
), f"Rate for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
_assert_endpoint_group_values(
|
||||
endpoint_values,
|
||||
stable_health_value,
|
||||
stable_products_value,
|
||||
stable_checkout_value,
|
||||
spike_checkout_value,
|
||||
stable_orders_value,
|
||||
spike_users_value,
|
||||
time_aggregation,
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(
|
||||
1 for v in health_values if v["value"] == stable_health_value
|
||||
)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
|
||||
# all /health rates should be state except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert (
|
||||
v["value"] == stable_health_value
|
||||
), f"Expected /health rate {stable_health_value}, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(
|
||||
1 for v in products_values if v["value"] == stable_products_value
|
||||
)
|
||||
|
||||
# most values should be stable, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
|
||||
|
||||
# check that non-stable values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [
|
||||
v["value"] for v in products_values if v["value"] != stable_products_value
|
||||
]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < stable_products_value
|
||||
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == stable_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == spike_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
|
||||
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
# consecutiveness
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(
|
||||
1 for v in orders_values if v["value"] == stable_orders_value
|
||||
)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [
|
||||
v["value"] for v in orders_values if v["value"] != stable_orders_value
|
||||
]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -4,7 +4,7 @@ Look at the histogram_data_1h.jsonl file for the relevant data
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List, Optional, Union
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -13,16 +13,13 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
|
||||
FILE = get_testdata_file_path("histogram_data_1h.jsonl")
|
||||
FILE_WITH_MANY_GROUPS = get_testdata_file_path("histogram_data_1h_many_groups.jsonl")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -524,564 +521,4 @@ def test_histogram_percentile_for_delta_service(
|
||||
assert len(result_values) == 60
|
||||
assert result_values[0]["value"] == zeroth_value
|
||||
assert result_values[1]["value"] == first_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
|
||||
def _assert_series_endpoint_labels(
|
||||
series: list,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
prefix: str,
|
||||
) -> None:
|
||||
labels = [s.get("labels", [{}])[0].get("value", "unknown") for s in series]
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(labels) == expected_endpoints
|
||||
), f"Expected {prefix} endpoints {expected_endpoints}, got {set(labels)}"
|
||||
else:
|
||||
assert labels == expected_endpoints, (
|
||||
f"Expected {prefix} endpoints in order {expected_endpoints}, got {labels}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_lim2",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/checkout", "/health"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
3,
|
||||
["/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim2",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/orders", "/health"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("count(test_histogram_count_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
3,
|
||||
["/health", "/orders", "/checkout"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim2",
|
||||
[build_order_by("count(test_histogram_count_groupby_asc_metric_name_lim2)", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("count(test_histogram_count_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("count(test_histogram_count_groupby_desc_metric_name_lim2)", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/checkout", "/health"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_histogram_count_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_histogram_count_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query_count = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"increase",
|
||||
"count",
|
||||
comparisonSpaceAggregationParam={"threshold": 1000, "operator": "<="},
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query_count])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
count_all_series = get_all_series(data, "A")
|
||||
|
||||
assert (
|
||||
len(count_all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(count_all_series)}"
|
||||
|
||||
_assert_series_endpoint_labels(count_all_series, expected_endpoints, "count")
|
||||
|
||||
count_values = {}
|
||||
for series in count_all_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
count_values[endpoint] = sorted(
|
||||
series.get("values", []), key=lambda x: x["timestamp"]
|
||||
)
|
||||
|
||||
for endpoint, values in count_values.items():
|
||||
for v in values:
|
||||
assert v["value"] >= 0, f"Count for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health (cumulative, service=api): 59 points, increase starts at 11/min → 69/min
|
||||
if "/health" in count_values:
|
||||
vals = count_values["/health"]
|
||||
assert vals[0]["value"] == 11, f"Expected /health count first=11, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 69, f"Expected /health count last=69, got {vals[-1]['value']}"
|
||||
|
||||
# /orders (cumulative, service=api): same distribution as /health
|
||||
if "/orders" in count_values:
|
||||
vals = count_values["/orders"]
|
||||
assert vals[0]["value"] == 11, f"Expected /orders count first=11, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 69, f"Expected /orders count last=69, got {vals[-1]['value']}"
|
||||
|
||||
# /checkout (delta, service=web): 60 points, zeroth=12345 (raw delta), then 11/min → 69/min
|
||||
if "/checkout" in count_values:
|
||||
vals = count_values["/checkout"]
|
||||
assert vals[0]["value"] == 12345, f"Expected /checkout count zeroth=12345, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 11, f"Expected /checkout count first=11, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 69, f"Expected /checkout count last=69, got {vals[-1]['value']}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
4,
|
||||
[ "/checkout", "/health", "/orders", "/coupon"],
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None,
|
||||
1,
|
||||
1,
|
||||
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
4,
|
||||
[ "/checkout", "/coupon", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_lim2",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/checkout", "/coupon"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
4,
|
||||
["/orders", "/health", "/coupon", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim2",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/orders", "/health"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
4,
|
||||
["/coupon", "/orders", "/checkout", "/health"], ## health and checkout have the same size so they are then sorted endpoint as a tiebreaker
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim2",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim2)", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/coupon", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim3",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim3)", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/coupon", "/orders", "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
4,
|
||||
[ "/checkout", "/health", "/orders", "/coupon"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
|
||||
2,
|
||||
2,
|
||||
[ "/checkout", "/health"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
|
||||
1,
|
||||
1,
|
||||
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_histogram_percentile_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE_WITH_MANY_GROUPS,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query_p75 = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"doesnotreallymatter",
|
||||
"p75",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
p75_series = get_all_series(data, "A")
|
||||
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
|
||||
assert (
|
||||
len(p75_series) == expected_count
|
||||
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
|
||||
|
||||
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
|
||||
|
||||
p75_values = {}
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
p75_values[endpoint] = sorted(
|
||||
series.get("values", []), key=lambda x: x["timestamp"]
|
||||
)
|
||||
|
||||
for endpoint, values in p75_values.items():
|
||||
for v in values:
|
||||
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health (cumulative, service=api)
|
||||
if "/health" in p75_values:
|
||||
vals = p75_values["/health"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /health p75 first=6000, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /orders (cumulative, service=api): same distribution as /health
|
||||
if "/orders" in p75_values:
|
||||
vals = p75_values["/orders"]
|
||||
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=4500, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=4500, got {vals[-1]['value']}"
|
||||
|
||||
# /checkout (delta, service=web): 60 points
|
||||
if "/checkout" in p75_values:
|
||||
vals = p75_values["/checkout"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=6000, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6000, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /coupon (delta, service=web): 60 points
|
||||
if "/coupon" in p75_values:
|
||||
vals = p75_values["/coupon"]
|
||||
assert vals[0]["value"] == 1125, f"Expected /coupon p75 zeroth=1125, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 1125, f"Expected /coupon p75 first=1125, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 1125, f"Expected /coupon p75 last=1125, got {vals[-1]['value']}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints, expected_status_codes",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/health", "/orders", "/coupon", "/coupon"], ## coupon has 200 and 500 status codes so it will appear twice
|
||||
[ "200", "200", "200", "200", "500"],
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None,
|
||||
1,
|
||||
1,
|
||||
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
[ "200"]
|
||||
),
|
||||
(
|
||||
"asc_endpoint",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
|
||||
[ "200", "200", "500", "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_endpoint_status_code",
|
||||
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
|
||||
[ "200", "200", "500", "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_status_code_endpoint",
|
||||
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/coupon", "/health", "/orders", "/coupon"],
|
||||
[ "200", "200", "200", "200", "500"],
|
||||
),
|
||||
(
|
||||
"asc_endpoint_limit_2",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
2,
|
||||
2,
|
||||
[ "/checkout", "/coupon"],
|
||||
[ "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_endpoint_status_code_limit_2",
|
||||
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
|
||||
2,
|
||||
2,
|
||||
[ "/checkout", "/coupon"],
|
||||
[ "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_status_code_endpoint_limit_4",
|
||||
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
|
||||
4,
|
||||
4,
|
||||
[ "/checkout", "/coupon", "/health", "/orders"],
|
||||
[ "200", "200", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
|
||||
[ "200", "200", "200", "500", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint_status_code",
|
||||
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
|
||||
[ "200", "200", "500", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_status_code_endpoint",
|
||||
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/coupon", "/orders", "/health", "/coupon", "/checkout"],
|
||||
[ "500", "200", "200", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint_limit2",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/orders", "/health", "/coupon"],
|
||||
[ "200", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint_status_code_limit3",
|
||||
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/orders", "/health", "/coupon"],
|
||||
[ "200", "200", "500"],
|
||||
),
|
||||
(
|
||||
"desc_status_code_endpoint_limit2",
|
||||
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/coupon", "/orders"],
|
||||
[ "500", "200"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_histogram_percentile_group_by_endpoint_and_status_code(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
expected_status_codes: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE_WITH_MANY_GROUPS,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query_p75 = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"doesnotreallymatter",
|
||||
"p75",
|
||||
group_by=["endpoint", "status_code"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
p75_series = get_all_series(data, "A")
|
||||
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
status_code = series.get("labels", [{}])[1].get("value", "unknown")
|
||||
|
||||
assert (
|
||||
len(p75_series) == expected_count
|
||||
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
|
||||
|
||||
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
|
||||
|
||||
endpoints = [s.get("labels", [{}])[0].get("value", "unknown") for s in p75_series]
|
||||
assert endpoints == expected_endpoints, (
|
||||
f"Expected p75 endpoints in order {expected_endpoints}, got {endpoints}"
|
||||
)
|
||||
status_codes = [s.get("labels", [{}])[1].get("value", "unknown") for s in p75_series]
|
||||
assert status_codes == expected_status_codes, (
|
||||
f"Expected p75 endpoints in order {expected_status_codes}, got {status_codes}"
|
||||
)
|
||||
|
||||
p75_values = {}
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
status_code = series.get("labels", [{}])[1].get("value", "unknown")
|
||||
p75_values[endpoint+status_code] = sorted(
|
||||
series.get("values", []), key=lambda x: x["timestamp"]
|
||||
)
|
||||
|
||||
for endpoint, values in p75_values.items():
|
||||
for v in values:
|
||||
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health (cumulative, service=api)
|
||||
if "/health200" in p75_values:
|
||||
vals = p75_values["/health200"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /health p75 first=6000, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /orders (cumulative, service=api): same distribution as /health
|
||||
if "/orders200" in p75_values:
|
||||
vals = p75_values["/orders200"]
|
||||
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=4500, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=4500, got {vals[-1]['value']}"
|
||||
|
||||
# /checkout (delta, service=web): 60 points
|
||||
if "/checkout200" in p75_values:
|
||||
vals = p75_values["/checkout200"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=6000, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6000, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /coupon (delta, service=web): 60 points
|
||||
if "/coupon200" in p75_values:
|
||||
vals = p75_values["/coupon200"]
|
||||
assert vals[0]["value"] == 1250, f"Expected /coupon200 p75 zeroth=1250, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 1250, f"Expected /coupon200 p75 first=1250, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 1250, f"Expected /coupon200 p75 last=1250, got {vals[-1]['value']}"
|
||||
|
||||
if "/coupon500" in p75_values:
|
||||
vals = p75_values["/coupon500"]
|
||||
assert vals[0]["value"] == 750, f"Expected /coupon500 p75 zeroth=750, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 750, f"Expected /coupon500 p75 first=750, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 750, f"Expected /coupon500 p75 last=750, got {vals[-1]['value']}"
|
||||
assert result_values[-1]["value"] == last_value
|
||||
@@ -1,6 +1,6 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List, Optional, Union
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -9,8 +9,6 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
@@ -141,137 +139,3 @@ def test_for_multiple_aggregations(
|
||||
assert result_values[19]["value"] == twentieth_min_val
|
||||
assert result_values[20]["value"] == twenty_first_min_val
|
||||
assert result_values[30]["value"] == thirty_first_min_val
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_suffix,order_by,limit,expected_count,expected_services",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None, # default ordering: desc by avg of all metric values for a group
|
||||
None,
|
||||
3,
|
||||
["lab", "web", "api"], # sum of all values: lab=42000, api=36000, web=34000. avg of all sums: lab=700, api=600, web=680
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None,
|
||||
2,
|
||||
2,
|
||||
["lab", "web"], # top 2 by default desc: lab=42000, api=36000
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("service", "asc")],
|
||||
None,
|
||||
3,
|
||||
["api", "lab", "web"],
|
||||
),
|
||||
(
|
||||
"asc_lim2",
|
||||
[build_order_by("service", "asc")],
|
||||
2,
|
||||
2,
|
||||
["api", "lab"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("service", "desc")],
|
||||
None,
|
||||
3,
|
||||
["web", "lab", "api"],
|
||||
),
|
||||
(
|
||||
"desc_lim2",
|
||||
[build_order_by("service", "desc")],
|
||||
2,
|
||||
2,
|
||||
["web", "lab"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("sum(test_gauge_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
3,
|
||||
["api", "web", "lab"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim2",
|
||||
[build_order_by("sum(test_gauge_groupby_asc_metric_name_lim2)", "asc")],
|
||||
2,
|
||||
2,
|
||||
["api", "web"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("sum(test_gauge_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
3,
|
||||
["lab", "web", "api"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("sum(test_gauge_groupby_desc_metric_name_lim2)", "desc")],
|
||||
2,
|
||||
2,
|
||||
["lab", "web"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_gauge_group_by_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
metric_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_services: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_gauge_groupby_{metric_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"max",
|
||||
"sum",
|
||||
group_by=["service"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
|
||||
assert (
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
service_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_services, set):
|
||||
assert (
|
||||
set(service_labels) == expected_services
|
||||
), f"Expected services {expected_services}, got {set(service_labels)}"
|
||||
else:
|
||||
assert service_labels == expected_services, (
|
||||
f"Expected services {expected_services}, got {service_labels}"
|
||||
)
|
||||
|
||||
@@ -5,16 +5,13 @@ Look at the delta_counters_1h.jsonl file for the relevant data
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
from typing import Any, Callable, List
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
@@ -72,61 +69,16 @@ def test_rate_with_steady_values_and_reset(
|
||||
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
5,
|
||||
{"/products", "/health", "/checkout", "/orders", "/users"},
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
["/checkout", "/health", "/orders", "/products", "/users"],
|
||||
),
|
||||
(
|
||||
"asc_lim3",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/users", "/products", "/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim3",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/users", "/products", "/orders"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_rate_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_rate_groupby_{order_suffix}"
|
||||
metric_name = "test_rate_groupby"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
DELTA_COUNTERS_FILE,
|
||||
@@ -142,8 +94,6 @@ def test_rate_group_by_endpoint(
|
||||
"rate",
|
||||
"sum",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
@@ -152,23 +102,10 @@ def test_rate_group_by_endpoint(
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
|
||||
# Should have 5 different endpoints
|
||||
assert (
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
endpoint_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(endpoint_labels) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
|
||||
else:
|
||||
assert endpoint_labels == expected_endpoints, (
|
||||
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
|
||||
)
|
||||
len(all_series) == 5
|
||||
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
|
||||
|
||||
# endpoint -> values
|
||||
endpoint_values = {}
|
||||
@@ -177,6 +114,11 @@ def test_rate_group_by_endpoint(
|
||||
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
|
||||
endpoint_values[endpoint] = values
|
||||
|
||||
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
|
||||
assert (
|
||||
set(endpoint_values.keys()) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
|
||||
|
||||
# at no point rate should be negative
|
||||
for endpoint, values in endpoint_values.items():
|
||||
for v in values:
|
||||
@@ -186,95 +128,93 @@ def test_rate_group_by_endpoint(
|
||||
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
# rate = 10/60 = 0.167
|
||||
if "/health" in endpoint_values:
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) == 60
|
||||
), f"Expected 60 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health == 60
|
||||
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) == 60
|
||||
), f"Expected 60 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health == 60
|
||||
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
|
||||
if "/products" in endpoint_values:
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) == 51
|
||||
), f"Expected 51 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
assert (
|
||||
count_steady_products == 51
|
||||
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) == 51
|
||||
), f"Expected 51 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
|
||||
assert (
|
||||
count_steady_products == 51
|
||||
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
|
||||
if "/checkout" in endpoint_values:
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) == 61
|
||||
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout == 56
|
||||
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout == 5
|
||||
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
|
||||
# consecutiveness
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
len(checkout_values) == 61
|
||||
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout == 56
|
||||
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout == 5
|
||||
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# rate = 5/60 = 0.0833
|
||||
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
|
||||
if "/orders" in endpoint_values:
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) == 60
|
||||
), f"Expected 60 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders == 58
|
||||
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) == 2
|
||||
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) == 1
|
||||
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) == 60
|
||||
), f"Expected 59 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders == 58
|
||||
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) == 2
|
||||
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) == 1
|
||||
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes (12 of them)
|
||||
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
|
||||
if "/users" in endpoint_values:
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) == 56
|
||||
), f"Expected 56 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users == 44
|
||||
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate == 12
|
||||
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) == 56
|
||||
), f"Expected 56 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users == 44
|
||||
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate == 12
|
||||
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user