mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-17 14:42:12 +00:00
Compare commits
1 Commits
main
...
feat/tests
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73945e1d12 |
@@ -176,6 +176,25 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
|
||||
|
||||
### Unsere Projektbetreuer
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
## Dokumentation
|
||||
|
||||
28
README.md
28
README.md
@@ -221,6 +221,34 @@ We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
|
||||
- [Ekansh Gupta](https://github.com/eKuG)
|
||||
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Vikrant Gupta](https://github.com/vikrantgupta25)
|
||||
- [Sagar Rajput](https://github.com/SagarRajput-7)
|
||||
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
||||
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
||||
- [Sahil Khan](https://github.com/sawhil)
|
||||
- [Aditya Singh](https://github.com/aks07)
|
||||
- [Abhi Kumar](https://github.com/ahrefabhi)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Vibhu Pandey](https://github.com/therealpandey)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
|
||||
@@ -187,6 +187,25 @@ Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metric
|
||||
|
||||
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
|
||||
|
||||
### 项目维护人员
|
||||
|
||||
#### 后端
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### 前端
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### 运维开发
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
## 文档
|
||||
|
||||
@@ -294,6 +294,7 @@ flagger:
|
||||
config:
|
||||
boolean:
|
||||
use_span_metrics: true
|
||||
interpolation_enabled: false
|
||||
kafka_span_eval: false
|
||||
string:
|
||||
float:
|
||||
|
||||
@@ -9,6 +9,74 @@
|
||||
padding: 0px;
|
||||
}
|
||||
|
||||
.dashboard-header {
|
||||
border-bottom: 1px solid var(--bg-slate-400);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: 16px;
|
||||
align-items: center;
|
||||
padding: 0 8px;
|
||||
box-sizing: border-box;
|
||||
|
||||
.dashboard-breadcrumbs {
|
||||
width: 100%;
|
||||
height: 48px;
|
||||
display: flex;
|
||||
gap: 6px;
|
||||
align-items: center;
|
||||
max-width: 80%;
|
||||
|
||||
.dashboard-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
color: var(--bg-vanilla-400);
|
||||
font-family: Inter;
|
||||
font-size: 14px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
padding: 0px;
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
.dashboard-btn:hover {
|
||||
background-color: unset;
|
||||
}
|
||||
|
||||
.id-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 0px 2px;
|
||||
border-radius: 2px;
|
||||
background: rgba(113, 144, 249, 0.1);
|
||||
color: var(--bg-robin-400);
|
||||
font-family: Inter;
|
||||
font-size: 14px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
height: 20px;
|
||||
|
||||
max-width: calc(100% - 120px);
|
||||
|
||||
span {
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.ant-btn-icon {
|
||||
margin-inline-end: 4px;
|
||||
}
|
||||
}
|
||||
.id-btn:hover {
|
||||
background: rgba(113, 144, 249, 0.1);
|
||||
color: var(--bg-robin-300);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.dashboard-details {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
@@ -467,6 +535,15 @@
|
||||
.dashboard-description-container {
|
||||
color: var(--bg-ink-400);
|
||||
|
||||
.dashboard-header {
|
||||
border-bottom: 1px solid var(--bg-vanilla-300);
|
||||
.dashboard-breadcrumbs {
|
||||
.dashboard-btn {
|
||||
color: var(--bg-ink-400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.dashboard-details {
|
||||
.left-section {
|
||||
.dashboard-title {
|
||||
|
||||
@@ -16,7 +16,9 @@ import {
|
||||
} from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import ConfigureIcon from 'assets/Integrations/ConfigureIcon';
|
||||
import HeaderRightSection from 'components/HeaderRightSection/HeaderRightSection';
|
||||
import { PANEL_GROUP_TYPES, PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { DeleteButton } from 'container/ListOfDashboard/TableComponents/DeleteButton';
|
||||
import DateTimeSelectionV2 from 'container/TopNav/DateTimeSelectionV2';
|
||||
import { useDashboardVariables } from 'hooks/dashboard/useDashboardVariables';
|
||||
@@ -25,6 +27,7 @@ import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
|
||||
import useComponentPermission from 'hooks/useComponentPermission';
|
||||
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import { useSafeNavigate } from 'hooks/useSafeNavigate';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
import {
|
||||
Check,
|
||||
@@ -34,6 +37,7 @@ import {
|
||||
FolderKanban,
|
||||
Fullscreen,
|
||||
Globe,
|
||||
LayoutGrid,
|
||||
LockKeyhole,
|
||||
PenLine,
|
||||
X,
|
||||
@@ -47,7 +51,6 @@ import { ROLES, USER_ROLES } from 'types/roles';
|
||||
import { ComponentTypes } from 'utils/permission';
|
||||
import { v4 as uuid } from 'uuid';
|
||||
|
||||
import DashboardHeader from '../components/DashboardHeader/DashboardHeader';
|
||||
import DashboardGraphSlider from '../ComponentsSlider';
|
||||
import DashboardSettings from '../DashboardSettings';
|
||||
import { Base64Icons } from '../DashboardSettings/General/utils';
|
||||
@@ -68,6 +71,7 @@ interface DashboardDescriptionProps {
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
|
||||
const { safeNavigate } = useSafeNavigate();
|
||||
const { handle } = props;
|
||||
const {
|
||||
selectedDashboard,
|
||||
@@ -76,6 +80,7 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
|
||||
layouts,
|
||||
setLayouts,
|
||||
isDashboardLocked,
|
||||
listSortOrder,
|
||||
setSelectedDashboard,
|
||||
handleToggleDashboardSlider,
|
||||
setSelectedRowWidgetId,
|
||||
@@ -287,6 +292,17 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
|
||||
});
|
||||
}
|
||||
|
||||
function goToListPage(): void {
|
||||
const urlParams = new URLSearchParams();
|
||||
urlParams.set('columnKey', listSortOrder.columnKey as string);
|
||||
urlParams.set('order', listSortOrder.order as string);
|
||||
urlParams.set('page', listSortOrder.pagination as string);
|
||||
urlParams.set('search', listSortOrder.search as string);
|
||||
|
||||
const generatedUrl = `${ROUTES.ALL_DASHBOARD}?${urlParams.toString()}`;
|
||||
safeNavigate(generatedUrl);
|
||||
}
|
||||
|
||||
const {
|
||||
data: publicDashboardResponse,
|
||||
isLoading: isLoadingPublicDashboardData,
|
||||
@@ -335,7 +351,32 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
|
||||
|
||||
return (
|
||||
<Card className="dashboard-description-container">
|
||||
<DashboardHeader />
|
||||
<div className="dashboard-header">
|
||||
<section className="dashboard-breadcrumbs">
|
||||
<Button
|
||||
type="text"
|
||||
icon={<LayoutGrid size={14} />}
|
||||
className="dashboard-btn"
|
||||
onClick={(): void => goToListPage()}
|
||||
>
|
||||
Dashboard /
|
||||
</Button>
|
||||
<Button type="text" className="id-btn dashboard-name-btn">
|
||||
<img
|
||||
src={image}
|
||||
alt="dashboard-icon"
|
||||
style={{ height: '14px', width: '14px' }}
|
||||
/>
|
||||
{title}
|
||||
</Button>
|
||||
</section>
|
||||
|
||||
<HeaderRightSection
|
||||
enableAnnouncements={false}
|
||||
enableShare
|
||||
enableFeedback
|
||||
/>
|
||||
</div>
|
||||
<section className="dashboard-details">
|
||||
<div className="left-section">
|
||||
<img src={image} alt="dashboard-img" className="dashboard-img" />
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
.dashboard-breadcrumbs {
|
||||
width: 100%;
|
||||
height: 48px;
|
||||
display: flex;
|
||||
gap: 6px;
|
||||
align-items: center;
|
||||
max-width: 80%;
|
||||
|
||||
.dashboard-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
color: var(--bg-vanilla-400);
|
||||
font-family: Inter;
|
||||
font-size: 14px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
padding: 0px;
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
.dashboard-btn:hover {
|
||||
background-color: unset;
|
||||
}
|
||||
|
||||
.id-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
padding: 0px 2px;
|
||||
border-radius: 2px;
|
||||
background: rgba(113, 144, 249, 0.1);
|
||||
color: var(--bg-robin-400);
|
||||
font-family: Inter;
|
||||
font-size: 14px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
height: 20px;
|
||||
|
||||
max-width: calc(100% - 120px);
|
||||
|
||||
span {
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.ant-btn-icon {
|
||||
margin-inline-end: 4px;
|
||||
}
|
||||
}
|
||||
.id-btn:hover {
|
||||
background: rgba(113, 144, 249, 0.1);
|
||||
color: var(--bg-robin-300);
|
||||
}
|
||||
|
||||
.dashboard-icon-image {
|
||||
height: 14px;
|
||||
width: 14px;
|
||||
}
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.dashboard-breadcrumbs {
|
||||
.dashboard-btn {
|
||||
color: var(--bg-ink-400);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
import { useCallback } from 'react';
|
||||
import { Button } from 'antd';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { useSafeNavigate } from 'hooks/useSafeNavigate';
|
||||
import { LayoutGrid } from 'lucide-react';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import { DashboardData } from 'types/api/dashboard/getAll';
|
||||
|
||||
import { Base64Icons } from '../../DashboardSettings/General/utils';
|
||||
|
||||
import './DashboardBreadcrumbs.styles.scss';
|
||||
|
||||
function DashboardBreadcrumbs(): JSX.Element {
|
||||
const { safeNavigate } = useSafeNavigate();
|
||||
const { selectedDashboard, listSortOrder } = useDashboard();
|
||||
|
||||
const selectedData = selectedDashboard
|
||||
? {
|
||||
...selectedDashboard.data,
|
||||
uuid: selectedDashboard.id,
|
||||
}
|
||||
: ({} as DashboardData);
|
||||
|
||||
const { title = '', image = Base64Icons[0] } = selectedData || {};
|
||||
|
||||
const goToListPage = useCallback(() => {
|
||||
const urlParams = new URLSearchParams();
|
||||
urlParams.set('columnKey', listSortOrder.columnKey as string);
|
||||
urlParams.set('order', listSortOrder.order as string);
|
||||
urlParams.set('page', listSortOrder.pagination as string);
|
||||
urlParams.set('search', listSortOrder.search as string);
|
||||
|
||||
const generatedUrl = `${ROUTES.ALL_DASHBOARD}?${urlParams.toString()}`;
|
||||
safeNavigate(generatedUrl);
|
||||
}, [listSortOrder, safeNavigate]);
|
||||
|
||||
return (
|
||||
<div className="dashboard-breadcrumbs">
|
||||
<Button
|
||||
type="text"
|
||||
icon={<LayoutGrid size={14} />}
|
||||
className="dashboard-btn"
|
||||
onClick={goToListPage}
|
||||
>
|
||||
Dashboard /
|
||||
</Button>
|
||||
<Button type="text" className="id-btn dashboard-name-btn">
|
||||
<img src={image} alt="dashboard-icon" className="dashboard-icon-image" />
|
||||
{title}
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default DashboardBreadcrumbs;
|
||||
@@ -1,15 +0,0 @@
|
||||
.dashboard-header {
|
||||
border-bottom: 1px solid var(--bg-slate-400);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: 16px;
|
||||
align-items: center;
|
||||
padding: 0 8px;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.dashboard-header {
|
||||
border-bottom: 1px solid var(--bg-vanilla-300);
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
import { memo } from 'react';
|
||||
import HeaderRightSection from 'components/HeaderRightSection/HeaderRightSection';
|
||||
|
||||
import DashboardBreadcrumbs from './DashboardBreadcrumbs';
|
||||
|
||||
import './DashboardHeader.styles.scss';
|
||||
|
||||
function DashboardHeader(): JSX.Element {
|
||||
return (
|
||||
<div className="dashboard-header">
|
||||
<DashboardBreadcrumbs />
|
||||
<HeaderRightSection enableAnnouncements={false} enableShare enableFeedback />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(DashboardHeader);
|
||||
@@ -23,7 +23,6 @@ export default function ChartWrapper({
|
||||
width: containerWidth,
|
||||
height: containerHeight,
|
||||
showTooltip = true,
|
||||
showLegend = true,
|
||||
canPinTooltip = false,
|
||||
syncMode,
|
||||
syncKey,
|
||||
@@ -37,9 +36,6 @@ export default function ChartWrapper({
|
||||
|
||||
const legendComponent = useCallback(
|
||||
(averageLegendWidth: number): React.ReactNode => {
|
||||
if (!showLegend) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<Legend
|
||||
config={config}
|
||||
@@ -48,7 +44,7 @@ export default function ChartWrapper({
|
||||
/>
|
||||
);
|
||||
},
|
||||
[config, legendConfig.position, showLegend],
|
||||
[config, legendConfig.position],
|
||||
);
|
||||
|
||||
const renderTooltipCallback = useCallback(
|
||||
@@ -64,7 +60,6 @@ export default function ChartWrapper({
|
||||
return (
|
||||
<PlotContextProvider>
|
||||
<ChartLayout
|
||||
showLegend={showLegend}
|
||||
config={config}
|
||||
containerWidth={containerWidth}
|
||||
containerHeight={containerHeight}
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
import { useCallback } from 'react';
|
||||
import ChartWrapper from 'container/DashboardContainer/visualization/charts/ChartWrapper/ChartWrapper';
|
||||
import HistogramTooltip from 'lib/uPlotV2/components/Tooltip/HistogramTooltip';
|
||||
import { buildTooltipContent } from 'lib/uPlotV2/components/Tooltip/utils';
|
||||
import {
|
||||
HistogramTooltipProps,
|
||||
TooltipRenderArgs,
|
||||
} from 'lib/uPlotV2/components/types';
|
||||
|
||||
import { HistogramChartProps } from '../types';
|
||||
|
||||
export default function Histogram(props: HistogramChartProps): JSX.Element {
|
||||
const {
|
||||
children,
|
||||
renderTooltip: customRenderTooltip,
|
||||
isQueriesMerged,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const renderTooltip = useCallback(
|
||||
(props: TooltipRenderArgs): React.ReactNode => {
|
||||
if (customRenderTooltip) {
|
||||
return customRenderTooltip(props);
|
||||
}
|
||||
const content = buildTooltipContent({
|
||||
data: props.uPlotInstance.data,
|
||||
series: props.uPlotInstance.series,
|
||||
dataIndexes: props.dataIndexes,
|
||||
activeSeriesIndex: props.seriesIndex,
|
||||
uPlotInstance: props.uPlotInstance,
|
||||
yAxisUnit: rest.yAxisUnit ?? '',
|
||||
decimalPrecision: rest.decimalPrecision,
|
||||
});
|
||||
const tooltipProps: HistogramTooltipProps = {
|
||||
...props,
|
||||
timezone: rest.timezone,
|
||||
yAxisUnit: rest.yAxisUnit,
|
||||
decimalPrecision: rest.decimalPrecision,
|
||||
content,
|
||||
};
|
||||
return <HistogramTooltip {...tooltipProps} />;
|
||||
},
|
||||
[customRenderTooltip, rest.timezone, rest.yAxisUnit, rest.decimalPrecision],
|
||||
);
|
||||
|
||||
return (
|
||||
<ChartWrapper
|
||||
showLegend={!isQueriesMerged}
|
||||
{...rest}
|
||||
renderTooltip={renderTooltip}
|
||||
>
|
||||
{children}
|
||||
</ChartWrapper>
|
||||
);
|
||||
}
|
||||
@@ -7,7 +7,6 @@ interface BaseChartProps {
|
||||
width: number;
|
||||
height: number;
|
||||
showTooltip?: boolean;
|
||||
showLegend?: boolean;
|
||||
timezone: string;
|
||||
canPinTooltip?: boolean;
|
||||
yAxisUnit?: string;
|
||||
@@ -18,7 +17,6 @@ interface BaseChartProps {
|
||||
interface UPlotBasedChartProps {
|
||||
config: UPlotConfigBuilder;
|
||||
data: uPlot.AlignedData;
|
||||
legendConfig: LegendConfig;
|
||||
syncMode?: DashboardCursorSync;
|
||||
syncKey?: string;
|
||||
plotRef?: (plot: uPlot | null) => void;
|
||||
@@ -28,20 +26,14 @@ interface UPlotBasedChartProps {
|
||||
}
|
||||
|
||||
export interface TimeSeriesChartProps
|
||||
extends BaseChartProps,
|
||||
UPlotBasedChartProps {}
|
||||
|
||||
export interface HistogramChartProps
|
||||
extends BaseChartProps,
|
||||
UPlotBasedChartProps {
|
||||
isQueriesMerged?: boolean;
|
||||
legendConfig: LegendConfig;
|
||||
}
|
||||
|
||||
export interface BarChartProps extends BaseChartProps, UPlotBasedChartProps {
|
||||
legendConfig: LegendConfig;
|
||||
isStackedBarChart?: boolean;
|
||||
}
|
||||
|
||||
export type ChartProps =
|
||||
| TimeSeriesChartProps
|
||||
| BarChartProps
|
||||
| HistogramChartProps;
|
||||
export type ChartProps = TimeSeriesChartProps | BarChartProps;
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
import { renderHook } from '@testing-library/react';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
|
||||
import { useScrollWidgetIntoView } from '../useScrollWidgetIntoView';
|
||||
|
||||
jest.mock('providers/Dashboard/Dashboard');
|
||||
|
||||
type MockHTMLElement = {
|
||||
scrollIntoView: jest.Mock;
|
||||
focus: jest.Mock;
|
||||
};
|
||||
|
||||
function createMockElement(): MockHTMLElement {
|
||||
return {
|
||||
scrollIntoView: jest.fn(),
|
||||
focus: jest.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
describe('useScrollWidgetIntoView', () => {
|
||||
const mockedUseDashboard = useDashboard as jest.MockedFunction<
|
||||
typeof useDashboard
|
||||
>;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('scrolls into view and focuses when toScrollWidgetId matches widget id', () => {
|
||||
const setToScrollWidgetId = jest.fn();
|
||||
const mockElement = createMockElement();
|
||||
const ref = ({
|
||||
current: mockElement,
|
||||
} as unknown) as React.RefObject<HTMLDivElement>;
|
||||
|
||||
mockedUseDashboard.mockReturnValue(({
|
||||
toScrollWidgetId: 'widget-id',
|
||||
setToScrollWidgetId,
|
||||
} as unknown) as ReturnType<typeof useDashboard>);
|
||||
|
||||
renderHook(() => useScrollWidgetIntoView('widget-id', ref));
|
||||
|
||||
expect(mockElement.scrollIntoView).toHaveBeenCalledWith({
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
});
|
||||
expect(mockElement.focus).toHaveBeenCalled();
|
||||
expect(setToScrollWidgetId).toHaveBeenCalledWith('');
|
||||
});
|
||||
|
||||
it('does nothing when toScrollWidgetId does not match widget id', () => {
|
||||
const setToScrollWidgetId = jest.fn();
|
||||
const mockElement = createMockElement();
|
||||
const ref = ({
|
||||
current: mockElement,
|
||||
} as unknown) as React.RefObject<HTMLDivElement>;
|
||||
|
||||
mockedUseDashboard.mockReturnValue(({
|
||||
toScrollWidgetId: 'other-widget',
|
||||
setToScrollWidgetId,
|
||||
} as unknown) as ReturnType<typeof useDashboard>);
|
||||
|
||||
renderHook(() => useScrollWidgetIntoView('widget-id', ref));
|
||||
|
||||
expect(mockElement.scrollIntoView).not.toHaveBeenCalled();
|
||||
expect(mockElement.focus).not.toHaveBeenCalled();
|
||||
expect(setToScrollWidgetId).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@@ -1,26 +0,0 @@
|
||||
import { RefObject, useEffect } from 'react';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
|
||||
/**
|
||||
* Scrolls the given widget container into view when the dashboard
|
||||
* requests it via `toScrollWidgetId`.
|
||||
*
|
||||
* Intended for use in panel components that render a single widget.
|
||||
*/
|
||||
export function useScrollWidgetIntoView<T extends HTMLElement>(
|
||||
widgetId: string,
|
||||
widgetContainerRef: RefObject<T>,
|
||||
): void {
|
||||
const { toScrollWidgetId, setToScrollWidgetId } = useDashboard();
|
||||
|
||||
useEffect(() => {
|
||||
if (toScrollWidgetId === widgetId) {
|
||||
widgetContainerRef.current?.scrollIntoView({
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
});
|
||||
widgetContainerRef.current?.focus();
|
||||
setToScrollWidgetId('');
|
||||
}
|
||||
}, [toScrollWidgetId, setToScrollWidgetId, widgetId, widgetContainerRef]);
|
||||
}
|
||||
@@ -1,14 +1,12 @@
|
||||
import { useMemo } from 'react';
|
||||
import cx from 'classnames';
|
||||
import { calculateChartDimensions } from 'container/DashboardContainer/visualization/charts/utils';
|
||||
import { MAX_LEGEND_WIDTH } from 'lib/uPlotV2/components/Legend/Legend';
|
||||
import { LegendConfig, LegendPosition } from 'lib/uPlotV2/components/types';
|
||||
import { UPlotConfigBuilder } from 'lib/uPlotV2/config/UPlotConfigBuilder';
|
||||
|
||||
import './ChartLayout.styles.scss';
|
||||
|
||||
export interface ChartLayoutProps {
|
||||
showLegend?: boolean;
|
||||
legendComponent: (legendPerSet: number) => React.ReactNode;
|
||||
children: (props: {
|
||||
chartWidth: number;
|
||||
@@ -22,7 +20,6 @@ export interface ChartLayoutProps {
|
||||
config: UPlotConfigBuilder;
|
||||
}
|
||||
export default function ChartLayout({
|
||||
showLegend = true,
|
||||
legendComponent,
|
||||
children,
|
||||
layoutChildren,
|
||||
@@ -33,15 +30,6 @@ export default function ChartLayout({
|
||||
}: ChartLayoutProps): JSX.Element {
|
||||
const chartDimensions = useMemo(
|
||||
() => {
|
||||
if (!showLegend) {
|
||||
return {
|
||||
width: containerWidth,
|
||||
height: containerHeight,
|
||||
legendWidth: 0,
|
||||
legendHeight: 0,
|
||||
averageLegendWidth: MAX_LEGEND_WIDTH,
|
||||
};
|
||||
}
|
||||
const legendItemsMap = config.getLegendItems();
|
||||
const seriesLabels = Object.values(legendItemsMap)
|
||||
.map((item) => item.label)
|
||||
@@ -54,7 +42,7 @@ export default function ChartLayout({
|
||||
});
|
||||
},
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[containerWidth, containerHeight, legendConfig, showLegend],
|
||||
[containerWidth, containerHeight, legendConfig],
|
||||
);
|
||||
|
||||
return (
|
||||
@@ -72,17 +60,15 @@ export default function ChartLayout({
|
||||
averageLegendWidth: chartDimensions.averageLegendWidth,
|
||||
})}
|
||||
</div>
|
||||
{showLegend && (
|
||||
<div
|
||||
className="chart-layout__legend-wrapper"
|
||||
style={{
|
||||
height: chartDimensions.legendHeight,
|
||||
width: chartDimensions.legendWidth,
|
||||
}}
|
||||
>
|
||||
{legendComponent(chartDimensions.averageLegendWidth)}
|
||||
</div>
|
||||
)}
|
||||
<div
|
||||
className="chart-layout__legend-wrapper"
|
||||
style={{
|
||||
height: chartDimensions.legendHeight,
|
||||
width: chartDimensions.legendWidth,
|
||||
}}
|
||||
>
|
||||
{legendComponent(chartDimensions.averageLegendWidth)}
|
||||
</div>
|
||||
</div>
|
||||
{layoutChildren}
|
||||
</div>
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||
import { useScrollWidgetIntoView } from 'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView';
|
||||
import { PanelWrapperProps } from 'container/PanelWrapper/panelWrapper.types';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
import { LegendPosition } from 'lib/uPlotV2/components/types';
|
||||
import ContextMenu from 'periscope/components/ContextMenu';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import { useTimezone } from 'providers/Timezone';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import uPlot from 'uplot';
|
||||
@@ -27,6 +27,7 @@ function BarPanel(props: PanelWrapperProps): JSX.Element {
|
||||
onToggleModelHandler,
|
||||
} = props;
|
||||
const uPlotRef = useRef<uPlot | null>(null);
|
||||
const { toScrollWidgetId, setToScrollWidgetId } = useDashboard();
|
||||
const graphRef = useRef<HTMLDivElement>(null);
|
||||
const [minTimeScale, setMinTimeScale] = useState<number>();
|
||||
const [maxTimeScale, setMaxTimeScale] = useState<number>();
|
||||
@@ -35,7 +36,16 @@ function BarPanel(props: PanelWrapperProps): JSX.Element {
|
||||
const isDarkMode = useIsDarkMode();
|
||||
const { timezone } = useTimezone();
|
||||
|
||||
useScrollWidgetIntoView(widget.id, graphRef);
|
||||
useEffect(() => {
|
||||
if (toScrollWidgetId === widget.id) {
|
||||
graphRef.current?.scrollIntoView({
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
});
|
||||
graphRef.current?.focus();
|
||||
setToScrollWidgetId('');
|
||||
}
|
||||
}, [toScrollWidgetId, setToScrollWidgetId, widget.id]);
|
||||
|
||||
useEffect((): void => {
|
||||
const { startTime, endTime } = getTimeRange(queryResponse);
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
import { useMemo, useRef } from 'react';
|
||||
import { useScrollWidgetIntoView } from 'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView';
|
||||
import { PanelWrapperProps } from 'container/PanelWrapper/panelWrapper.types';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
import { LegendPosition } from 'lib/uPlotV2/components/types';
|
||||
import { DashboardCursorSync } from 'lib/uPlotV2/plugins/TooltipPlugin/types';
|
||||
import { useTimezone } from 'providers/Timezone';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import uPlot from 'uplot';
|
||||
|
||||
import Histogram from '../../charts/Histogram/Histogram';
|
||||
import ChartManager from '../../components/ChartManager/ChartManager';
|
||||
import {
|
||||
prepareHistogramPanelConfig,
|
||||
prepareHistogramPanelData,
|
||||
} from './utils';
|
||||
|
||||
import '../Panel.styles.scss';
|
||||
|
||||
function HistogramPanel(props: PanelWrapperProps): JSX.Element {
|
||||
const {
|
||||
panelMode,
|
||||
queryResponse,
|
||||
widget,
|
||||
isFullViewMode,
|
||||
onToggleModelHandler,
|
||||
} = props;
|
||||
const uPlotRef = useRef<uPlot | null>(null);
|
||||
const graphRef = useRef<HTMLDivElement>(null);
|
||||
const containerDimensions = useResizeObserver(graphRef);
|
||||
|
||||
const isDarkMode = useIsDarkMode();
|
||||
const { timezone } = useTimezone();
|
||||
|
||||
useScrollWidgetIntoView(widget.id, graphRef);
|
||||
|
||||
const config = useMemo(() => {
|
||||
return prepareHistogramPanelConfig({
|
||||
widget,
|
||||
isDarkMode,
|
||||
apiResponse: queryResponse?.data?.payload as MetricRangePayloadProps,
|
||||
panelMode,
|
||||
});
|
||||
}, [widget, isDarkMode, queryResponse?.data?.payload, panelMode]);
|
||||
|
||||
const chartData = useMemo(() => {
|
||||
if (!queryResponse?.data?.payload) {
|
||||
return [];
|
||||
}
|
||||
return prepareHistogramPanelData({
|
||||
apiResponse: queryResponse?.data?.payload as MetricRangePayloadProps,
|
||||
bucketWidth: widget?.bucketWidth,
|
||||
bucketCount: widget?.bucketCount,
|
||||
mergeAllActiveQueries: widget?.mergeAllActiveQueries,
|
||||
});
|
||||
}, [
|
||||
queryResponse?.data?.payload,
|
||||
widget?.bucketWidth,
|
||||
widget?.bucketCount,
|
||||
widget?.mergeAllActiveQueries,
|
||||
]);
|
||||
|
||||
const layoutChildren = useMemo(() => {
|
||||
if (!isFullViewMode || widget.mergeAllActiveQueries) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<ChartManager
|
||||
config={config}
|
||||
alignedData={chartData}
|
||||
yAxisUnit={widget.yAxisUnit}
|
||||
onCancel={onToggleModelHandler}
|
||||
/>
|
||||
);
|
||||
}, [
|
||||
isFullViewMode,
|
||||
config,
|
||||
chartData,
|
||||
widget.yAxisUnit,
|
||||
onToggleModelHandler,
|
||||
widget.mergeAllActiveQueries,
|
||||
]);
|
||||
|
||||
return (
|
||||
<div className="panel-container" ref={graphRef}>
|
||||
{containerDimensions.width > 0 && containerDimensions.height > 0 && (
|
||||
<Histogram
|
||||
config={config}
|
||||
legendConfig={{
|
||||
position: widget?.legendPosition ?? LegendPosition.BOTTOM,
|
||||
}}
|
||||
plotRef={(plot: uPlot | null): void => {
|
||||
uPlotRef.current = plot;
|
||||
}}
|
||||
onDestroy={(): void => {
|
||||
uPlotRef.current = null;
|
||||
}}
|
||||
isQueriesMerged={widget.mergeAllActiveQueries}
|
||||
yAxisUnit={widget.yAxisUnit}
|
||||
decimalPrecision={widget.decimalPrecision}
|
||||
syncMode={DashboardCursorSync.Crosshair}
|
||||
timezone={timezone.value}
|
||||
data={chartData as uPlot.AlignedData}
|
||||
width={containerDimensions.width}
|
||||
height={containerDimensions.height}
|
||||
layoutChildren={layoutChildren}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default HistogramPanel;
|
||||
@@ -1,223 +0,0 @@
|
||||
/* eslint-disable simple-import-sort/imports */
|
||||
import type { UseQueryResult } from 'react-query';
|
||||
import { render, screen } from 'tests/test-utils';
|
||||
|
||||
import { PanelMode } from 'container/DashboardContainer/visualization/panels/types';
|
||||
import { LegendPosition } from 'lib/uPlotV2/components/types';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
import {
|
||||
MetricQueryRangeSuccessResponse,
|
||||
MetricRangePayloadProps,
|
||||
} from 'types/api/metrics/getQueryRange';
|
||||
|
||||
import HistogramPanel from '../HistogramPanel';
|
||||
import { HistogramChartProps } from 'container/DashboardContainer/visualization/charts/types';
|
||||
|
||||
jest.mock('hooks/useDimensions', () => ({
|
||||
useResizeObserver: jest.fn().mockReturnValue({ width: 800, height: 400 }),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useDarkMode', () => ({
|
||||
useIsDarkMode: jest.fn().mockReturnValue(false),
|
||||
}));
|
||||
|
||||
jest.mock('providers/Timezone', () => ({
|
||||
__esModule: true,
|
||||
// Provide a no-op provider component so AllTheProviders can render
|
||||
default: ({ children }: { children: React.ReactNode }): JSX.Element => (
|
||||
<>{children}</>
|
||||
),
|
||||
// And mock the hook used by HistogramPanel
|
||||
useTimezone: jest.fn().mockReturnValue({
|
||||
timezone: { value: 'UTC' },
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock(
|
||||
'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView',
|
||||
() => ({
|
||||
useScrollWidgetIntoView: jest.fn(),
|
||||
}),
|
||||
);
|
||||
|
||||
jest.mock(
|
||||
'container/DashboardContainer/visualization/charts/Histogram/Histogram',
|
||||
() => ({
|
||||
__esModule: true,
|
||||
default: (props: HistogramChartProps): JSX.Element => (
|
||||
<div data-testid="histogram-chart">
|
||||
<div data-testid="histogram-props">
|
||||
{JSON.stringify({
|
||||
legendPosition: props.legendConfig?.position,
|
||||
isQueriesMerged: props.isQueriesMerged,
|
||||
yAxisUnit: props.yAxisUnit,
|
||||
decimalPrecision: props.decimalPrecision,
|
||||
})}
|
||||
</div>
|
||||
{props.layoutChildren}
|
||||
</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
jest.mock(
|
||||
'container/DashboardContainer/visualization/components/ChartManager/ChartManager',
|
||||
() => ({
|
||||
__esModule: true,
|
||||
default: (): JSX.Element => (
|
||||
<div data-testid="chart-manager">ChartManager</div>
|
||||
),
|
||||
}),
|
||||
);
|
||||
|
||||
function createQueryResponse(
|
||||
payloadOverrides: Partial<MetricRangePayloadProps> = {},
|
||||
): { data: { payload: MetricRangePayloadProps } } {
|
||||
const basePayload: MetricRangePayloadProps = {
|
||||
data: {
|
||||
result: [
|
||||
{
|
||||
metric: {},
|
||||
queryName: 'A',
|
||||
legend: 'Series A',
|
||||
values: [
|
||||
[1, '10'],
|
||||
[2, '20'],
|
||||
],
|
||||
},
|
||||
],
|
||||
resultType: 'matrix',
|
||||
newResult: {
|
||||
data: {
|
||||
result: [],
|
||||
resultType: 'matrix',
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return {
|
||||
data: {
|
||||
payload: {
|
||||
...basePayload,
|
||||
...payloadOverrides,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
type WidgetLike = {
|
||||
id: string;
|
||||
yAxisUnit: string;
|
||||
decimalPrecision: number;
|
||||
legendPosition: LegendPosition;
|
||||
mergeAllActiveQueries: boolean;
|
||||
};
|
||||
|
||||
function createWidget(overrides: Partial<WidgetLike> = {}): WidgetLike {
|
||||
return {
|
||||
id: 'widget-id',
|
||||
yAxisUnit: 'ms',
|
||||
decimalPrecision: 2,
|
||||
legendPosition: LegendPosition.BOTTOM,
|
||||
mergeAllActiveQueries: false,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
describe('HistogramPanel', () => {
|
||||
it('renders Histogram when container has dimensions', () => {
|
||||
const widget = (createWidget() as unknown) as Widgets;
|
||||
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
|
||||
MetricQueryRangeSuccessResponse,
|
||||
Error
|
||||
>;
|
||||
|
||||
render(
|
||||
<HistogramPanel
|
||||
panelMode={PanelMode.DASHBOARD_VIEW}
|
||||
widget={widget}
|
||||
queryResponse={queryResponse}
|
||||
isFullViewMode={false}
|
||||
onToggleModelHandler={jest.fn()}
|
||||
onDragSelect={jest.fn()}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('histogram-chart')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('passes legend position and other props to Histogram', () => {
|
||||
const widget = (createWidget({
|
||||
legendPosition: LegendPosition.RIGHT,
|
||||
}) as unknown) as Widgets;
|
||||
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
|
||||
MetricQueryRangeSuccessResponse,
|
||||
Error
|
||||
>;
|
||||
|
||||
render(
|
||||
<HistogramPanel
|
||||
panelMode={PanelMode.DASHBOARD_VIEW}
|
||||
widget={widget}
|
||||
queryResponse={queryResponse}
|
||||
isFullViewMode={false}
|
||||
onToggleModelHandler={jest.fn()}
|
||||
onDragSelect={jest.fn()}
|
||||
/>,
|
||||
);
|
||||
|
||||
const propsJson = screen.getByTestId('histogram-props').textContent || '{}';
|
||||
const parsed = JSON.parse(propsJson);
|
||||
|
||||
expect(parsed.legendPosition).toBe(LegendPosition.RIGHT);
|
||||
expect(parsed.yAxisUnit).toBe('ms');
|
||||
expect(parsed.decimalPrecision).toBe(2);
|
||||
});
|
||||
|
||||
it('renders ChartManager in full view when queries are not merged', () => {
|
||||
const widget = (createWidget({
|
||||
mergeAllActiveQueries: false,
|
||||
}) as unknown) as Widgets;
|
||||
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
|
||||
MetricQueryRangeSuccessResponse,
|
||||
Error
|
||||
>;
|
||||
|
||||
render(
|
||||
<HistogramPanel
|
||||
panelMode={PanelMode.DASHBOARD_VIEW}
|
||||
widget={widget}
|
||||
queryResponse={queryResponse}
|
||||
isFullViewMode
|
||||
onToggleModelHandler={jest.fn()}
|
||||
onDragSelect={jest.fn()}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('chart-manager')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('does not render ChartManager when queries are merged', () => {
|
||||
const widget = (createWidget({
|
||||
mergeAllActiveQueries: true,
|
||||
}) as unknown) as Widgets;
|
||||
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
|
||||
MetricQueryRangeSuccessResponse,
|
||||
Error
|
||||
>;
|
||||
|
||||
render(
|
||||
<HistogramPanel
|
||||
panelMode={PanelMode.DASHBOARD_VIEW}
|
||||
widget={widget}
|
||||
queryResponse={queryResponse}
|
||||
isFullViewMode
|
||||
onToggleModelHandler={jest.fn()}
|
||||
onDragSelect={jest.fn()}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.queryByTestId('chart-manager')).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
@@ -1,231 +0,0 @@
|
||||
import { histogramBucketSizes } from '@grafana/data';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { DEFAULT_BUCKET_COUNT } from 'container/PanelWrapper/constants';
|
||||
import { getLegend } from 'lib/dashboard/getQueryResults';
|
||||
import getLabelName from 'lib/getLabelName';
|
||||
import { DrawStyle } from 'lib/uPlotV2/config/types';
|
||||
import { UPlotConfigBuilder } from 'lib/uPlotV2/config/UPlotConfigBuilder';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import { AlignedData } from 'uplot';
|
||||
import { incrRoundDn, roundDecimals } from 'utils/round';
|
||||
|
||||
import { PanelMode } from '../types';
|
||||
import { buildBaseConfig } from '../utils/baseConfigBuilder';
|
||||
import {
|
||||
buildHistogramBuckets,
|
||||
mergeAlignedDataTables,
|
||||
prependNullBinToFirstHistogramSeries,
|
||||
replaceUndefinedWithNullInAlignedData,
|
||||
} from '../utils/histogram';
|
||||
|
||||
export interface PrepareHistogramPanelDataParams {
|
||||
apiResponse: MetricRangePayloadProps;
|
||||
bucketWidth?: number;
|
||||
bucketCount?: number;
|
||||
mergeAllActiveQueries?: boolean;
|
||||
}
|
||||
|
||||
const BUCKET_OFFSET = 0;
|
||||
const HIST_SORT = (a: number, b: number): number => a - b;
|
||||
|
||||
function extractNumericValues(
|
||||
result: MetricRangePayloadProps['data']['result'],
|
||||
): number[] {
|
||||
const values: number[] = [];
|
||||
for (const item of result) {
|
||||
for (const [, valueStr] of item.values) {
|
||||
values.push(Number.parseFloat(valueStr) || 0);
|
||||
}
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
function computeSmallestDelta(sortedValues: number[]): number {
|
||||
if (sortedValues.length <= 1) {
|
||||
return 0;
|
||||
}
|
||||
let smallest = Infinity;
|
||||
for (let i = 1; i < sortedValues.length; i++) {
|
||||
const delta = sortedValues[i] - sortedValues[i - 1];
|
||||
if (delta > 0) {
|
||||
smallest = Math.min(smallest, delta);
|
||||
}
|
||||
}
|
||||
return smallest === Infinity ? 0 : smallest;
|
||||
}
|
||||
|
||||
function selectBucketSize({
|
||||
range,
|
||||
bucketCount,
|
||||
smallestDelta,
|
||||
bucketWidthOverride,
|
||||
}: {
|
||||
range: number;
|
||||
bucketCount: number;
|
||||
smallestDelta: number;
|
||||
bucketWidthOverride?: number;
|
||||
}): number {
|
||||
if (bucketWidthOverride != null && bucketWidthOverride > 0) {
|
||||
return bucketWidthOverride;
|
||||
}
|
||||
const targetSize = range / bucketCount;
|
||||
for (const candidate of histogramBucketSizes) {
|
||||
if (targetSize < candidate && candidate >= smallestDelta) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function buildFrames(
|
||||
result: MetricRangePayloadProps['data']['result'],
|
||||
mergeAllActiveQueries: boolean,
|
||||
): number[][] {
|
||||
const frames: number[][] = result.map((item) =>
|
||||
item.values.map(([, valueStr]) => Number.parseFloat(valueStr) || 0),
|
||||
);
|
||||
if (mergeAllActiveQueries && frames.length > 1) {
|
||||
const first = frames[0];
|
||||
for (let i = 1; i < frames.length; i++) {
|
||||
first.push(...frames[i]);
|
||||
frames[i] = [];
|
||||
}
|
||||
}
|
||||
return frames;
|
||||
}
|
||||
|
||||
export function prepareHistogramPanelData({
|
||||
apiResponse,
|
||||
bucketWidth,
|
||||
bucketCount: bucketCountProp = DEFAULT_BUCKET_COUNT,
|
||||
mergeAllActiveQueries = false,
|
||||
}: PrepareHistogramPanelDataParams): AlignedData {
|
||||
const bucketCount = bucketCountProp ?? DEFAULT_BUCKET_COUNT;
|
||||
const result = apiResponse.data.result;
|
||||
|
||||
const seriesValues = extractNumericValues(result);
|
||||
if (seriesValues.length === 0) {
|
||||
return [[]];
|
||||
}
|
||||
|
||||
const sorted = [...seriesValues].sort((a, b) => a - b);
|
||||
const min = sorted[0];
|
||||
const max = sorted[sorted.length - 1];
|
||||
const range = max - min;
|
||||
const smallestDelta = computeSmallestDelta(sorted);
|
||||
let bucketSize = selectBucketSize({
|
||||
range,
|
||||
bucketCount,
|
||||
smallestDelta,
|
||||
bucketWidthOverride: bucketWidth,
|
||||
});
|
||||
if (bucketSize <= 0) {
|
||||
bucketSize = range > 0 ? range / bucketCount : 1;
|
||||
}
|
||||
|
||||
const getBucket = (v: number): number =>
|
||||
roundDecimals(incrRoundDn(v - BUCKET_OFFSET, bucketSize) + BUCKET_OFFSET, 9);
|
||||
|
||||
const frames = buildFrames(result, mergeAllActiveQueries);
|
||||
const histogramsPerSeries: AlignedData[] = frames
|
||||
.filter((frame) => frame.length > 0)
|
||||
.map((frame) => buildHistogramBuckets(frame, getBucket, HIST_SORT));
|
||||
|
||||
if (histogramsPerSeries.length === 0) {
|
||||
return [[]];
|
||||
}
|
||||
|
||||
const mergedHistogramData = mergeAlignedDataTables(histogramsPerSeries);
|
||||
replaceUndefinedWithNullInAlignedData(mergedHistogramData);
|
||||
prependNullBinToFirstHistogramSeries(mergedHistogramData, bucketSize);
|
||||
return mergedHistogramData;
|
||||
}
|
||||
|
||||
export function prepareHistogramPanelConfig({
|
||||
widget,
|
||||
apiResponse,
|
||||
panelMode,
|
||||
isDarkMode,
|
||||
}: {
|
||||
widget: Widgets;
|
||||
apiResponse: MetricRangePayloadProps;
|
||||
panelMode: PanelMode;
|
||||
isDarkMode: boolean;
|
||||
}): UPlotConfigBuilder {
|
||||
const builder = buildBaseConfig({
|
||||
widget,
|
||||
isDarkMode,
|
||||
apiResponse,
|
||||
panelMode,
|
||||
panelType: PANEL_TYPES.HISTOGRAM,
|
||||
});
|
||||
builder.setCursor({
|
||||
drag: {
|
||||
x: false,
|
||||
y: false,
|
||||
setScale: true,
|
||||
},
|
||||
focus: {
|
||||
prox: 1e3,
|
||||
},
|
||||
});
|
||||
|
||||
builder.addScale({
|
||||
scaleKey: 'x',
|
||||
time: false,
|
||||
auto: true,
|
||||
});
|
||||
builder.addScale({
|
||||
scaleKey: 'y',
|
||||
time: false,
|
||||
auto: true,
|
||||
min: 0,
|
||||
});
|
||||
|
||||
const currentQuery = widget.query;
|
||||
const mergeAllActiveQueries = widget?.mergeAllActiveQueries ?? false;
|
||||
|
||||
// When merged, data has only one y column; add one series to match. Otherwise add one per result.
|
||||
if (mergeAllActiveQueries) {
|
||||
builder.addSeries({
|
||||
label: '',
|
||||
scaleKey: 'y',
|
||||
drawStyle: DrawStyle.Bar,
|
||||
panelType: PANEL_TYPES.HISTOGRAM,
|
||||
colorMapping: widget.customLegendColors ?? {},
|
||||
spanGaps: false,
|
||||
barWidthFactor: 1,
|
||||
pointSize: 5,
|
||||
lineColor: '#3f5ecc',
|
||||
fillColor: '#4E74F8',
|
||||
isDarkMode,
|
||||
});
|
||||
} else {
|
||||
apiResponse.data.result.forEach((series) => {
|
||||
const baseLabelName = getLabelName(
|
||||
series.metric,
|
||||
series.queryName || '', // query
|
||||
series.legend || '',
|
||||
);
|
||||
|
||||
const label = currentQuery
|
||||
? getLegend(series, currentQuery, baseLabelName)
|
||||
: baseLabelName;
|
||||
|
||||
builder.addSeries({
|
||||
label: label,
|
||||
scaleKey: 'y',
|
||||
drawStyle: DrawStyle.Bar,
|
||||
panelType: PANEL_TYPES.HISTOGRAM,
|
||||
colorMapping: widget.customLegendColors ?? {},
|
||||
spanGaps: false,
|
||||
barWidthFactor: 1,
|
||||
pointSize: 5,
|
||||
isDarkMode,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
@@ -2,12 +2,12 @@ import { useEffect, useMemo, useRef, useState } from 'react';
|
||||
import TimeSeries from 'container/DashboardContainer/visualization/charts/TimeSeries/TimeSeries';
|
||||
import ChartManager from 'container/DashboardContainer/visualization/components/ChartManager/ChartManager';
|
||||
import { usePanelContextMenu } from 'container/DashboardContainer/visualization/hooks/usePanelContextMenu';
|
||||
import { useScrollWidgetIntoView } from 'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView';
|
||||
import { PanelWrapperProps } from 'container/PanelWrapper/panelWrapper.types';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
import { LegendPosition } from 'lib/uPlotV2/components/types';
|
||||
import { ContextMenu } from 'periscope/components/ContextMenu';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import { useTimezone } from 'providers/Timezone';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import uPlot from 'uplot';
|
||||
@@ -26,6 +26,7 @@ function TimeSeriesPanel(props: PanelWrapperProps): JSX.Element {
|
||||
isFullViewMode,
|
||||
onToggleModelHandler,
|
||||
} = props;
|
||||
const { toScrollWidgetId, setToScrollWidgetId } = useDashboard();
|
||||
const graphRef = useRef<HTMLDivElement>(null);
|
||||
const [minTimeScale, setMinTimeScale] = useState<number>();
|
||||
const [maxTimeScale, setMaxTimeScale] = useState<number>();
|
||||
@@ -34,7 +35,16 @@ function TimeSeriesPanel(props: PanelWrapperProps): JSX.Element {
|
||||
const isDarkMode = useIsDarkMode();
|
||||
const { timezone } = useTimezone();
|
||||
|
||||
useScrollWidgetIntoView(widget.id, graphRef);
|
||||
useEffect(() => {
|
||||
if (toScrollWidgetId === widget.id) {
|
||||
graphRef.current?.scrollIntoView({
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
});
|
||||
graphRef.current?.focus();
|
||||
setToScrollWidgetId('');
|
||||
}
|
||||
}, [toScrollWidgetId, setToScrollWidgetId, widget.id]);
|
||||
|
||||
useEffect((): void => {
|
||||
const { startTime, endTime } = getTimeRange(queryResponse);
|
||||
|
||||
@@ -19,9 +19,9 @@ export interface BaseConfigBuilderProps {
|
||||
widget: Widgets;
|
||||
apiResponse: MetricRangePayloadProps;
|
||||
isDarkMode: boolean;
|
||||
onClick?: OnClickPluginOpts['onClick'];
|
||||
onDragSelect?: (startTime: number, endTime: number) => void;
|
||||
timezone?: Timezone;
|
||||
onClick: OnClickPluginOpts['onClick'];
|
||||
onDragSelect: (startTime: number, endTime: number) => void;
|
||||
timezone: Timezone;
|
||||
panelMode: PanelMode;
|
||||
panelType: PANEL_TYPES;
|
||||
minTimeScale?: number;
|
||||
@@ -40,10 +40,8 @@ export function buildBaseConfig({
|
||||
minTimeScale,
|
||||
maxTimeScale,
|
||||
}: BaseConfigBuilderProps): UPlotConfigBuilder {
|
||||
const tzDate = timezone
|
||||
? (timestamp: number): Date =>
|
||||
uPlot.tzDate(new Date(timestamp * 1e3), timezone.value)
|
||||
: undefined;
|
||||
const tzDate = (timestamp: number): Date =>
|
||||
uPlot.tzDate(new Date(timestamp * 1e3), timezone.value);
|
||||
|
||||
const builder = new UPlotConfigBuilder({
|
||||
onDragSelect,
|
||||
|
||||
@@ -1,225 +0,0 @@
|
||||
import {
|
||||
NULL_EXPAND,
|
||||
NULL_REMOVE,
|
||||
NULL_RETAIN,
|
||||
} from 'container/PanelWrapper/constants';
|
||||
import { AlignedData } from 'uplot';
|
||||
|
||||
/**
|
||||
* Expands contiguous runs of `null` values to the left and right of their
|
||||
* original positions so that visual gaps in the series are continuous.
|
||||
*
|
||||
* This is used when `NULL_EXPAND` mode is selected while joining series.
|
||||
*/
|
||||
function propagateNullsAcrossNeighbors(
|
||||
seriesValues: Array<number | null>,
|
||||
nullIndices: number[],
|
||||
alignedLength: number,
|
||||
): void {
|
||||
for (
|
||||
let i = 0, currentIndex, lastExpandedNullIndex = -1;
|
||||
i < nullIndices.length;
|
||||
i++
|
||||
) {
|
||||
const nullIndex = nullIndices[i];
|
||||
|
||||
if (nullIndex > lastExpandedNullIndex) {
|
||||
// expand left until we hit a non-null value
|
||||
currentIndex = nullIndex - 1;
|
||||
while (currentIndex >= 0 && seriesValues[currentIndex] == null) {
|
||||
seriesValues[currentIndex--] = null;
|
||||
}
|
||||
|
||||
// expand right until we hit a non-null value
|
||||
currentIndex = nullIndex + 1;
|
||||
while (currentIndex < alignedLength && seriesValues[currentIndex] == null) {
|
||||
seriesValues[(lastExpandedNullIndex = currentIndex++)] = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges multiple uPlot `AlignedData` tables into a single aligned table.
|
||||
*
|
||||
* - Merges and sorts all distinct x-values from each table.
|
||||
* - Re-aligns every series onto the merged x-axis.
|
||||
* - Applies per-series null handling (`NULL_REMOVE`, `NULL_RETAIN`, `NULL_EXPAND`).
|
||||
*/
|
||||
/* eslint-disable sonarjs/cognitive-complexity */
|
||||
export function mergeAlignedDataTables(
|
||||
alignedTables: AlignedData[],
|
||||
nullModes?: number[][],
|
||||
): AlignedData {
|
||||
let mergedXValues: Set<number>;
|
||||
|
||||
// eslint-disable-next-line prefer-const
|
||||
mergedXValues = new Set();
|
||||
|
||||
// Collect all unique x-values from every table.
|
||||
for (let tableIndex = 0; tableIndex < alignedTables.length; tableIndex++) {
|
||||
const table = alignedTables[tableIndex];
|
||||
const xValues = table[0];
|
||||
const xLength = xValues.length;
|
||||
|
||||
for (let i = 0; i < xLength; i++) {
|
||||
mergedXValues.add(xValues[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Sorted, merged x-axis used by the final result.
|
||||
const alignedData: (number | null | undefined)[][] = [
|
||||
Array.from(mergedXValues).sort((a, b) => a - b),
|
||||
];
|
||||
|
||||
const alignedLength = alignedData[0].length;
|
||||
|
||||
// Map from x-value to its index in the merged x-axis.
|
||||
const xValueToIndexMap = new Map<number, number>();
|
||||
|
||||
for (let i = 0; i < alignedLength; i++) {
|
||||
xValueToIndexMap.set(alignedData[0][i] as number, i);
|
||||
}
|
||||
|
||||
// Re-align all series from all tables onto the merged x-axis.
|
||||
for (let tableIndex = 0; tableIndex < alignedTables.length; tableIndex++) {
|
||||
const table = alignedTables[tableIndex];
|
||||
const xValues = table[0];
|
||||
|
||||
for (let seriesIndex = 1; seriesIndex < table.length; seriesIndex++) {
|
||||
const seriesValues = table[seriesIndex];
|
||||
|
||||
const alignedSeriesValues = Array(alignedLength).fill(undefined);
|
||||
|
||||
const nullHandlingMode = nullModes
|
||||
? nullModes[tableIndex][seriesIndex]
|
||||
: NULL_RETAIN;
|
||||
|
||||
const nullIndices: number[] = [];
|
||||
|
||||
for (let i = 0; i < seriesValues.length; i++) {
|
||||
const valueAtPoint = seriesValues[i];
|
||||
const alignedIndex = xValueToIndexMap.get(xValues[i]);
|
||||
|
||||
if (alignedIndex == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (valueAtPoint === null) {
|
||||
if (nullHandlingMode !== NULL_REMOVE) {
|
||||
alignedSeriesValues[alignedIndex] = valueAtPoint;
|
||||
|
||||
if (nullHandlingMode === NULL_EXPAND) {
|
||||
nullIndices.push(alignedIndex);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
alignedSeriesValues[alignedIndex] = valueAtPoint;
|
||||
}
|
||||
}
|
||||
|
||||
// Optionally expand nulls to visually preserve gaps.
|
||||
propagateNullsAcrossNeighbors(
|
||||
alignedSeriesValues,
|
||||
nullIndices,
|
||||
alignedLength,
|
||||
);
|
||||
|
||||
alignedData.push(alignedSeriesValues);
|
||||
}
|
||||
}
|
||||
|
||||
return alignedData as AlignedData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds histogram buckets from raw values.
|
||||
*
|
||||
* - Each value is mapped into a bucket via `getBucketForValue`.
|
||||
* - Counts how many values fall into each bucket.
|
||||
* - Optionally sorts buckets using the provided comparator.
|
||||
*/
|
||||
export function buildHistogramBuckets(
|
||||
values: number[],
|
||||
getBucketForValue: (value: number) => number,
|
||||
sortBuckets?: ((a: number, b: number) => number) | null,
|
||||
): AlignedData {
|
||||
const bucketMap = new Map<number, { value: number; count: number }>();
|
||||
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
let value = values[i];
|
||||
|
||||
if (value != null) {
|
||||
value = getBucketForValue(value);
|
||||
}
|
||||
|
||||
const bucket = bucketMap.get(value);
|
||||
|
||||
if (bucket) {
|
||||
bucket.count++;
|
||||
} else {
|
||||
bucketMap.set(value, { value, count: 1 });
|
||||
}
|
||||
}
|
||||
|
||||
const buckets = [...bucketMap.values()];
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-expressions
|
||||
sortBuckets && buckets.sort((a, b) => sortBuckets(a.value, b.value));
|
||||
|
||||
const bucketValues = Array(buckets.length);
|
||||
const bucketCounts = Array(buckets.length);
|
||||
|
||||
for (let i = 0; i < buckets.length; i++) {
|
||||
bucketValues[i] = buckets[i].value;
|
||||
bucketCounts[i] = buckets[i].count;
|
||||
}
|
||||
|
||||
return [bucketValues, bucketCounts];
|
||||
}
|
||||
|
||||
/**
|
||||
* Mutates an `AlignedData` instance, replacing all `undefined` entries
|
||||
* with explicit `null` values so uPlot treats them as gaps.
|
||||
*/
|
||||
export function replaceUndefinedWithNullInAlignedData(
|
||||
data: AlignedData,
|
||||
): AlignedData {
|
||||
const seriesList = data as (number | null | undefined)[][];
|
||||
for (let seriesIndex = 0; seriesIndex < seriesList.length; seriesIndex++) {
|
||||
for (
|
||||
let pointIndex = 0;
|
||||
pointIndex < seriesList[seriesIndex].length;
|
||||
pointIndex++
|
||||
) {
|
||||
if (seriesList[seriesIndex][pointIndex] === undefined) {
|
||||
seriesList[seriesIndex][pointIndex] = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the first histogram series has a leading "empty" bin so that
|
||||
* all series line up visually when rendered as bars.
|
||||
*
|
||||
* - Prepends a new x-value (first x - `bucketSize`) to the first series.
|
||||
* - Prepends `null` to all subsequent series at the same index.
|
||||
*/
|
||||
export function prependNullBinToFirstHistogramSeries(
|
||||
alignedData: AlignedData,
|
||||
bucketSize: number,
|
||||
): void {
|
||||
const seriesList = alignedData as (number | null)[][];
|
||||
if (
|
||||
seriesList.length > 0 &&
|
||||
seriesList[0].length > 0 &&
|
||||
seriesList[0][0] !== null
|
||||
) {
|
||||
seriesList[0].unshift(seriesList[0][0] - bucketSize);
|
||||
for (let seriesIndex = 1; seriesIndex < seriesList.length; seriesIndex++) {
|
||||
seriesList[seriesIndex].unshift(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import BarPanel from 'container/DashboardContainer/visualization/panels/BarPanel/BarPanel';
|
||||
import HistogramPanel from 'container/DashboardContainer/visualization/panels/HistogramPanel/HistogramPanel';
|
||||
|
||||
import TimeSeriesPanel from '../DashboardContainer/visualization/panels/TimeSeriesPanel/TimeSeriesPanel';
|
||||
import HistogramPanelWrapper from './HistogramPanelWrapper';
|
||||
import ListPanelWrapper from './ListPanelWrapper';
|
||||
import PiePanelWrapper from './PiePanelWrapper';
|
||||
import TablePanelWrapper from './TablePanelWrapper';
|
||||
@@ -17,7 +17,7 @@ export const PanelTypeVsPanelWrapper = {
|
||||
[PANEL_TYPES.EMPTY_WIDGET]: null,
|
||||
[PANEL_TYPES.PIE]: PiePanelWrapper,
|
||||
[PANEL_TYPES.BAR]: BarPanel,
|
||||
[PANEL_TYPES.HISTOGRAM]: HistogramPanel,
|
||||
[PANEL_TYPES.HISTOGRAM]: HistogramPanelWrapper,
|
||||
};
|
||||
|
||||
export const DEFAULT_BUCKET_COUNT = 30;
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
import { HistogramTooltipProps } from '../types';
|
||||
import Tooltip from './Tooltip';
|
||||
|
||||
export default function HistogramTooltip(
|
||||
props: HistogramTooltipProps,
|
||||
): JSX.Element {
|
||||
return <Tooltip {...props} showTooltipHeader={false} />;
|
||||
}
|
||||
@@ -16,16 +16,12 @@ export default function Tooltip({
|
||||
uPlotInstance,
|
||||
timezone,
|
||||
content,
|
||||
showTooltipHeader = true,
|
||||
}: TooltipProps): JSX.Element {
|
||||
const isDarkMode = useIsDarkMode();
|
||||
|
||||
const tooltipContent = content ?? [];
|
||||
|
||||
const headerTitle = useMemo(() => {
|
||||
if (!showTooltipHeader) {
|
||||
return null;
|
||||
}
|
||||
const data = uPlotInstance.data;
|
||||
const cursorIdx = uPlotInstance.cursor.idx;
|
||||
if (cursorIdx == null) {
|
||||
@@ -34,12 +30,7 @@ export default function Tooltip({
|
||||
return dayjs(data[0][cursorIdx] * 1000)
|
||||
.tz(timezone)
|
||||
.format(DATE_TIME_FORMATS.MONTH_DATETIME_SECONDS);
|
||||
}, [
|
||||
timezone,
|
||||
uPlotInstance.data,
|
||||
uPlotInstance.cursor.idx,
|
||||
showTooltipHeader,
|
||||
]);
|
||||
}, [timezone, uPlotInstance.data, uPlotInstance.cursor.idx]);
|
||||
|
||||
return (
|
||||
<div
|
||||
@@ -48,11 +39,9 @@ export default function Tooltip({
|
||||
isDarkMode ? 'darkMode' : 'lightMode',
|
||||
)}
|
||||
>
|
||||
{showTooltipHeader && (
|
||||
<div className="uplot-tooltip-header">
|
||||
<span>{headerTitle}</span>
|
||||
</div>
|
||||
)}
|
||||
<div className="uplot-tooltip-header">
|
||||
<span>{headerTitle}</span>
|
||||
</div>
|
||||
<div
|
||||
style={{
|
||||
height: Math.min(
|
||||
|
||||
@@ -60,7 +60,6 @@ export interface TooltipRenderArgs {
|
||||
}
|
||||
|
||||
export interface BaseTooltipProps {
|
||||
showTooltipHeader?: boolean;
|
||||
timezone: string;
|
||||
yAxisUnit?: string;
|
||||
decimalPrecision?: PrecisionOption;
|
||||
@@ -75,14 +74,7 @@ export interface BarTooltipProps extends BaseTooltipProps, TooltipRenderArgs {
|
||||
isStackedBarChart?: boolean;
|
||||
}
|
||||
|
||||
export interface HistogramTooltipProps
|
||||
extends BaseTooltipProps,
|
||||
TooltipRenderArgs {}
|
||||
|
||||
export type TooltipProps =
|
||||
| TimeSeriesTooltipProps
|
||||
| BarTooltipProps
|
||||
| HistogramTooltipProps;
|
||||
export type TooltipProps = TimeSeriesTooltipProps | BarTooltipProps;
|
||||
|
||||
export enum LegendPosition {
|
||||
BOTTOM = 'bottom',
|
||||
|
||||
@@ -387,7 +387,7 @@ export class UPlotConfigBuilder extends ConfigBuilder<
|
||||
} = this.getVisibilityResolutionState();
|
||||
|
||||
config.series = [
|
||||
{ value: (): string => '', label: 'Timestamp' }, // Base series for timestamp
|
||||
{ value: (): string => '' }, // Base series for timestamp
|
||||
...this.series.map((s) => {
|
||||
const series = s.getConfig();
|
||||
// Stored visibility[0] is x-axis/time; data series start at visibility[1]
|
||||
|
||||
@@ -49,7 +49,7 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
|
||||
}: {
|
||||
resolvedLineColor: string;
|
||||
}): Partial<Series> {
|
||||
const { lineWidth, lineStyle, lineCap, fillColor } = this.props;
|
||||
const { lineWidth, lineStyle, lineCap } = this.props;
|
||||
const lineConfig: Partial<Series> = {
|
||||
stroke: resolvedLineColor,
|
||||
width: lineWidth ?? 2,
|
||||
@@ -63,12 +63,8 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
|
||||
lineConfig.cap = lineCap;
|
||||
}
|
||||
|
||||
if (fillColor) {
|
||||
lineConfig.fill = fillColor;
|
||||
} else if (this.props.panelType === PANEL_TYPES.BAR) {
|
||||
if (this.props.panelType === PANEL_TYPES.BAR) {
|
||||
lineConfig.fill = resolvedLineColor;
|
||||
} else if (this.props.panelType === PANEL_TYPES.HISTOGRAM) {
|
||||
lineConfig.fill = `${resolvedLineColor}40`;
|
||||
}
|
||||
|
||||
return lineConfig;
|
||||
@@ -151,8 +147,6 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
|
||||
pointsConfig.show = false;
|
||||
} else if (showPoints === VisibilityMode.Always) {
|
||||
pointsConfig.show = true;
|
||||
} else {
|
||||
pointsConfig.show = false; // default to hidden
|
||||
}
|
||||
|
||||
return pointsConfig;
|
||||
|
||||
@@ -175,7 +175,6 @@ export interface SeriesProps extends LineConfig, PointsConfig, BarConfig {
|
||||
pointsBuilder?: Series.Points.Show;
|
||||
show?: boolean;
|
||||
spanGaps?: boolean;
|
||||
fillColor?: string;
|
||||
isDarkMode?: boolean;
|
||||
stepInterval?: number;
|
||||
}
|
||||
|
||||
@@ -11,6 +11,22 @@ import { Threshold } from '../hooks/types';
|
||||
import { findMinMaxThresholdValues } from './threshold';
|
||||
import { LogScaleLimits, RangeFunctionParams } from './types';
|
||||
|
||||
/**
|
||||
* Rounds a number down to the nearest multiple of incr.
|
||||
* Used for linear scale min so the axis starts on a clean tick.
|
||||
*/
|
||||
export function incrRoundDn(num: number, incr: number): number {
|
||||
return Math.floor(num / incr) * incr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rounds a number up to the nearest multiple of incr.
|
||||
* Used for linear scale max so the axis ends on a clean tick.
|
||||
*/
|
||||
export function incrRoundUp(num: number, incr: number): number {
|
||||
return Math.ceil(num / incr) * incr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snaps min/max/softMin/softMax to valid log-scale values (powers of logBase).
|
||||
* Only applies when distribution is logarithmic; otherwise returns limits unchanged.
|
||||
@@ -197,6 +213,25 @@ function getLogScaleRange(
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Snaps linear scale min down and max up to whole numbers so axis bounds are clean.
|
||||
*/
|
||||
function roundLinearRange(minMax: Range.MinMax): Range.MinMax {
|
||||
const [currentMin, currentMax] = minMax;
|
||||
let roundedMin = currentMin;
|
||||
let roundedMax = currentMax;
|
||||
|
||||
if (roundedMin != null) {
|
||||
roundedMin = incrRoundDn(roundedMin, 1);
|
||||
}
|
||||
|
||||
if (roundedMax != null) {
|
||||
roundedMax = incrRoundUp(roundedMax, 1);
|
||||
}
|
||||
|
||||
return [roundedMin, roundedMax];
|
||||
}
|
||||
|
||||
/**
|
||||
* Snaps log-scale [min, max] to exact powers of logBase (nearest magnitude below/above).
|
||||
* If min and max would be equal after snapping, max is increased by one magnitude so the range is valid.
|
||||
@@ -295,6 +330,7 @@ export function createRangeFunction(
|
||||
|
||||
if (scale.distr === 1) {
|
||||
minMax = getLinearScaleRange(minMax, params, dataMin, dataMax);
|
||||
minMax = roundLinearRange(minMax);
|
||||
} else if (scale.distr === 3) {
|
||||
minMax = getLogScaleRange(minMax, params, dataMin, dataMax, logBase);
|
||||
const logFn = scale.log === 2 ? Math.log2 : Math.log10;
|
||||
|
||||
@@ -3,33 +3,3 @@
|
||||
* Example: 1.5 → 2, 1.49 → 1
|
||||
*/
|
||||
export const roundHalfUp = (value: number): number => Math.floor(value + 0.5);
|
||||
|
||||
/**
|
||||
* Rounds a number down to the nearest multiple of incr.
|
||||
* Used for linear scale min so the axis starts on a clean tick.
|
||||
*/
|
||||
export function incrRoundDn(num: number, incr: number): number {
|
||||
return Math.floor(num / incr) * incr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rounds a number up to the nearest multiple of incr.
|
||||
* Used for linear scale max so the axis ends on a clean tick.
|
||||
*/
|
||||
export function incrRoundUp(num: number, incr: number): number {
|
||||
return Math.ceil(num / incr) * incr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rounds a number to the nearest multiple of 10^dec.
|
||||
* Used for decimal precision.
|
||||
*/
|
||||
export function roundDecimals(val: number, dec = 0): number {
|
||||
if (Number.isInteger(val)) {
|
||||
return val;
|
||||
}
|
||||
|
||||
const p = 10 ** dec;
|
||||
const n = val * p * (1 + Number.EPSILON);
|
||||
return Math.round(n) / p;
|
||||
}
|
||||
|
||||
@@ -3,8 +3,9 @@ package flagger
|
||||
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
var (
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureInterpolationEnabled = featuretypes.MustNewName("interpolation_enabled")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
)
|
||||
|
||||
func MustNewRegistry() featuretypes.Registry {
|
||||
@@ -17,6 +18,14 @@ func MustNewRegistry() featuretypes.Registry {
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureInterpolationEnabled,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Stage: featuretypes.StageExperimental,
|
||||
Description: "Controls whether to enable interpolation",
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureKafkaSpanEval,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
|
||||
@@ -205,7 +205,7 @@ func AdjustKey(key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemet
|
||||
key.Indexes = matchingKey.Indexes
|
||||
key.Materialized = matchingKey.Materialized
|
||||
key.JSONPlan = matchingKey.JSONPlan
|
||||
|
||||
|
||||
return actions
|
||||
} else {
|
||||
// multiple matching keys, set materialized only if all the keys are materialized
|
||||
|
||||
@@ -483,22 +483,6 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
|
||||
value1 := v.Visit(values[0])
|
||||
value2 := v.Visit(values[1])
|
||||
|
||||
switch value1.(type) {
|
||||
case float64:
|
||||
if _, ok := value2.(float64); !ok {
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected number for both operands", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
case string:
|
||||
if _, ok := value2.(string); !ok {
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected string for both operands", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
default:
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: operands must be number or string", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, []any{value1, value2}, v.builder, v.startNs, v.endNs)
|
||||
@@ -871,7 +855,7 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
// 1. either user meant key ( this is already handled above in fieldKeysForName )
|
||||
// 2. or user meant `attribute.key` we look up in the map for all possible field keys with name 'attribute.key'
|
||||
|
||||
// Note:
|
||||
// Note:
|
||||
// If user only wants to search `attribute.key`, then they have to use `attribute.attribute.key`
|
||||
// If user only wants to search `key`, then they have to use `key`
|
||||
// If user wants to search both, they can use `attribute.key` and we will resolve the ambiguity
|
||||
|
||||
@@ -375,6 +375,13 @@ func mergeAndEnsureBackwardCompatibility(ctx context.Context, logger *slog.Logge
|
||||
config.Flagger.Config.Boolean[flagger.FeatureKafkaSpanEval.String()] = os.Getenv("KAFKA_SPAN_EVAL") == "true"
|
||||
}
|
||||
|
||||
if os.Getenv("INTERPOLATION_ENABLED") != "" {
|
||||
logger.WarnContext(ctx, "[Deprecated] env INTERPOLATION_ENABLED is deprecated and scheduled for removal. Please use SIGNOZ_FLAGGER_CONFIG_BOOLEAN_INTERPOLATION__ENABLED instead.")
|
||||
if config.Flagger.Config.Boolean == nil {
|
||||
config.Flagger.Config.Boolean = make(map[string]bool)
|
||||
}
|
||||
config.Flagger.Config.Boolean[flagger.FeatureInterpolationEnabled.String()] = os.Getenv("INTERPOLATION_ENABLED") == "true"
|
||||
}
|
||||
}
|
||||
|
||||
func (config Config) Collect(_ context.Context, _ valuer.UUID) (map[string]any, error) {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
@@ -74,7 +73,7 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
|
||||
cteArgs [][]any
|
||||
)
|
||||
|
||||
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
|
||||
if b.metricsStatementBuilder.CanShortCircuitDelta(query) {
|
||||
// spatial_aggregation_cte directly for certain delta queries
|
||||
if frag, args, err := b.buildTemporalAggDeltaFastPath(ctx, start, end, query, keys, variables); err != nil {
|
||||
return nil, err
|
||||
@@ -92,9 +91,8 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// spatial_aggregation_cte
|
||||
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
|
||||
if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -124,16 +122,13 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
for _, g := range query.GroupBy {
|
||||
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", []any{}, err
|
||||
}
|
||||
sb.SelectMore(col)
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
@@ -155,7 +150,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
Variables: variables,
|
||||
}, start, end)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", []any{}, err
|
||||
}
|
||||
}
|
||||
if filterWhere != nil {
|
||||
@@ -213,11 +208,8 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
@@ -286,10 +278,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
|
||||
|
||||
baseSb.From(fmt.Sprintf("%s.%s AS points", DBName, tbl))
|
||||
@@ -326,23 +315,25 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(telemetrymetrics.RateWithoutNegative, start, start)
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.RateTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
incExpr := fmt.Sprintf(telemetrymetrics.IncreaseWithoutNegative, start, start)
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.IncreaseTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
@@ -357,15 +348,7 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
_ uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
_ map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any, error) {
|
||||
|
||||
if query.Aggregations[0].SpaceAggregation.IsZero() {
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`]",
|
||||
)
|
||||
}
|
||||
) (string, []any) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
sb.Select("ts")
|
||||
@@ -382,5 +365,5 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747785600000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747785600000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
|
||||
@@ -3,7 +3,6 @@ package telemetrymeter
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
@@ -64,7 +63,7 @@ func AggregationColumnForSamplesTable(
|
||||
temporality metrictypes.Temporality,
|
||||
timeAggregation metrictypes.TimeAggregation,
|
||||
tableHints *metrictypes.MetricTableHints,
|
||||
) (string, error) {
|
||||
) string {
|
||||
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
|
||||
var aggregationColumn string
|
||||
switch temporality {
|
||||
@@ -191,13 +190,5 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if aggregationColumn == "" {
|
||||
return "", errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
|
||||
)
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
return aggregationColumn
|
||||
}
|
||||
|
||||
@@ -29,7 +29,13 @@ func (c *conditionBuilder) conditionFor(
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
|
||||
if operator.IsStringSearchOperator() {
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains,
|
||||
qbtypes.FilterOperatorNotContains,
|
||||
qbtypes.FilterOperatorILike,
|
||||
qbtypes.FilterOperatorNotILike,
|
||||
qbtypes.FilterOperatorLike,
|
||||
qbtypes.FilterOperatorNotLike:
|
||||
value = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
@@ -38,18 +44,6 @@ func (c *conditionBuilder) conditionFor(
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): use the same data type collision handling when metrics schemas are updated
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
|
||||
case []any:
|
||||
if len(v) > 0 && (operator == qbtypes.FilterOperatorBetween || operator == qbtypes.FilterOperatorNotBetween) {
|
||||
if _, ok := v[0].(float64); ok {
|
||||
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
|
||||
@@ -5,27 +5,67 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
RateTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))`
|
||||
RateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
|
||||
IncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, ((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
|
||||
|
||||
IncreaseTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value, per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)`
|
||||
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
|
||||
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s, ((%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
|
||||
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
|
||||
|
||||
RateMultiTemporalityTmpl = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1) OVER rate_window), (%s - lagInFrame(%s, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))) AS per_series_value`
|
||||
RateWithInterpolation = `
|
||||
CASE
|
||||
WHEN row_number() OVER rate_window = 1 THEN
|
||||
-- First row: try to interpolate using next value
|
||||
CASE
|
||||
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
|
||||
-- Assume linear growth to next point
|
||||
(leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)
|
||||
ELSE
|
||||
0 -- No next value either, can't interpolate
|
||||
END
|
||||
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
|
||||
-- Counter reset detected
|
||||
per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window)
|
||||
ELSE
|
||||
-- Normal case: calculate rate
|
||||
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) /
|
||||
(ts - lagInFrame(ts, 1) OVER rate_window)
|
||||
END`
|
||||
|
||||
IncreaseMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s, (%s - lagInFrame(%s, 1) OVER rate_window))) AS per_series_value`
|
||||
|
||||
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
|
||||
IncreaseWithInterpolation = `
|
||||
CASE
|
||||
WHEN row_number() OVER rate_window = 1 THEN
|
||||
-- First row: try to interpolate using next value
|
||||
CASE
|
||||
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
|
||||
-- Calculate the interpolated increase for this interval
|
||||
((leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)) *
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)
|
||||
ELSE
|
||||
0 -- No next value either, can't interpolate
|
||||
END
|
||||
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
|
||||
-- Counter reset detected: the increase is the current value
|
||||
per_series_value
|
||||
ELSE
|
||||
-- Normal case: calculate increase
|
||||
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)
|
||||
END`
|
||||
)
|
||||
|
||||
type MetricQueryStatementBuilder struct {
|
||||
@@ -107,6 +147,54 @@ func (b *MetricQueryStatementBuilder) Build(
|
||||
return b.buildPipelineStatement(ctx, start, end, query, keys, variables)
|
||||
}
|
||||
|
||||
// Fast‑path (no fingerprint grouping)
|
||||
// canShortCircuitDelta returns true if we can use the optimized query
|
||||
// for the given query
|
||||
// This is used to avoid the group by fingerprint thus improving the performance
|
||||
// for certain queries
|
||||
// cases where we can short circuit:
|
||||
// 1. time aggregation = (rate|increase) and space aggregation = sum
|
||||
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
|
||||
//
|
||||
// 2. time aggregation = sum and space aggregation = sum
|
||||
// - sum of sums is same as sum of all values
|
||||
//
|
||||
// 3. time aggregation = min and space aggregation = min
|
||||
// - min of mins is same as min of all values
|
||||
//
|
||||
// 4. time aggregation = max and space aggregation = max
|
||||
// - max of maxs is same as max of all values
|
||||
//
|
||||
// 5. special case exphist, there is no need for per series/fingerprint aggregation
|
||||
// we can directly use the quantilesDDMerge function
|
||||
//
|
||||
// all of this is true only for delta metrics
|
||||
func (b *MetricQueryStatementBuilder) CanShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool {
|
||||
if q.Aggregations[0].Temporality != metrictypes.Delta {
|
||||
return false
|
||||
}
|
||||
|
||||
ta := q.Aggregations[0].TimeAggregation
|
||||
sa := q.Aggregations[0].SpaceAggregation
|
||||
|
||||
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
|
||||
return true
|
||||
}
|
||||
if q.Aggregations[0].Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
ctx context.Context,
|
||||
start, end uint64,
|
||||
@@ -168,11 +256,10 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
|
||||
if b.CanShortCircuitDelta(query) {
|
||||
// spatial_aggregation_cte directly for certain delta queries
|
||||
if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
frag, args := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs)
|
||||
if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -186,9 +273,8 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// spatial_aggregation_cte
|
||||
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
|
||||
if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -208,7 +294,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
timeSeriesCTE string,
|
||||
timeSeriesCTEArgs []any,
|
||||
) (string, []any, error) {
|
||||
) (string, []any) {
|
||||
stepSec := int64(query.StepInterval.Seconds())
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
@@ -221,15 +307,11 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol, err := AggregationColumnForSamplesTable(
|
||||
aggCol := AggregationColumnForSamplesTable(
|
||||
start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints,
|
||||
)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
|
||||
@@ -252,7 +334,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
|
||||
@@ -355,12 +437,8 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
|
||||
@@ -383,7 +461,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
_ context.Context,
|
||||
ctx context.Context,
|
||||
start, end uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
timeSeriesCTE string,
|
||||
@@ -401,10 +479,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
baseSb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
@@ -421,25 +496,36 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
|
||||
innerQuery, innerArgs := baseSb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
|
||||
|
||||
// ! TODO (balanikaran) Get OrgID via function parameter instead of valuer.GenerateUUID()
|
||||
interpolationEnabled := b.flagger.BooleanOrEmpty(ctx, flagger.FeatureInterpolationEnabled, featuretypes.NewFlaggerEvaluationContext(valuer.GenerateUUID()))
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegative, start, start)
|
||||
if interpolationEnabled {
|
||||
rateExpr = RateWithInterpolation
|
||||
}
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", RateTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
incExpr := fmt.Sprintf(IncreaseWithoutNegative, start, start)
|
||||
if interpolationEnabled {
|
||||
incExpr = IncreaseWithInterpolation
|
||||
}
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", IncreaseTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
@@ -448,6 +534,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
}
|
||||
}
|
||||
|
||||
// because RateInterpolation is not enabled anywhere due to some gaps in the logic wrt cache handling, it hasn't been considered for the multi temporality
|
||||
func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
|
||||
_ context.Context,
|
||||
start, end uint64,
|
||||
@@ -466,32 +553,18 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggForDeltaTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggForCumulativeTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggForDeltaTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggForCumulativeTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggForDeltaTemporality = fmt.Sprintf("%s/%d", aggForDeltaTemporality, stepSec)
|
||||
}
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(RateMultiTemporalityTmpl,
|
||||
aggForDeltaTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
)
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, aggForCumulativeTemporality, aggForCumulativeTemporality, start)
|
||||
sb.SelectMore(rateExpr)
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
increaseExpr := fmt.Sprintf(IncreaseMultiTemporality,
|
||||
aggForDeltaTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
)
|
||||
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, start)
|
||||
sb.SelectMore(increaseExpr)
|
||||
default:
|
||||
expr := fmt.Sprintf(OthersMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality)
|
||||
@@ -519,14 +592,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
_ uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
_ map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any, error) {
|
||||
if query.Aggregations[0].SpaceAggregation.IsZero() {
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`]",
|
||||
)
|
||||
}
|
||||
) (string, []any) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
sb.Select("ts")
|
||||
@@ -543,7 +609,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
@@ -575,7 +641,9 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
quantile,
|
||||
))
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
for _, g := range query.GroupBy {
|
||||
sb.GroupBy(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
sb.GroupBy("ts")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
@@ -591,8 +659,6 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
sb.Where(rewrittenExpr)
|
||||
}
|
||||
}
|
||||
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.OrderBy("ts")
|
||||
|
||||
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -83,7 +83,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -116,7 +116,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -148,7 +148,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts",
|
||||
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -181,7 +181,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte",
|
||||
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -210,7 +210,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts",
|
||||
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947390000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
|
||||
@@ -3,7 +3,6 @@ package telemetrymetrics
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
@@ -169,7 +168,7 @@ func AggregationColumnForSamplesTable(
|
||||
temporality metrictypes.Temporality,
|
||||
timeAggregation metrictypes.TimeAggregation,
|
||||
tableHints *metrictypes.MetricTableHints,
|
||||
) (string, error) {
|
||||
) string {
|
||||
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
|
||||
var aggregationColumn string
|
||||
switch temporality {
|
||||
@@ -299,12 +298,5 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
}
|
||||
}
|
||||
if aggregationColumn == "" {
|
||||
return "", errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
|
||||
)
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
return aggregationColumn
|
||||
}
|
||||
|
||||
@@ -35,7 +35,13 @@ func (c *conditionBuilder) conditionFor(
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
|
||||
if operator.IsStringSearchOperator() {
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains,
|
||||
qbtypes.FilterOperatorNotContains,
|
||||
qbtypes.FilterOperatorILike,
|
||||
qbtypes.FilterOperatorNotILike,
|
||||
qbtypes.FilterOperatorLike,
|
||||
qbtypes.FilterOperatorNotLike:
|
||||
value = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -152,9 +152,7 @@ func (f FilterOperator) IsStringSearchOperator() bool {
|
||||
FilterOperatorILike,
|
||||
FilterOperatorNotILike,
|
||||
FilterOperatorLike,
|
||||
FilterOperatorNotLike,
|
||||
FilterOperatorRegexp,
|
||||
FilterOperatorNotRegexp:
|
||||
FilterOperatorNotLike:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
@@ -3,7 +3,6 @@ package querybuildertypesv5
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
@@ -175,54 +174,3 @@ func (q *QueryBuilderQuery[T]) Normalize() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Fast‑path (no fingerprint grouping)
|
||||
// canShortCircuitDelta returns true if we can use the optimized query
|
||||
// for the given query
|
||||
// This is used to avoid the group by fingerprint thus improving the performance
|
||||
// for certain queries
|
||||
// cases where we can short circuit:
|
||||
// 1. time aggregation = (rate|increase) and space aggregation = sum
|
||||
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
|
||||
//
|
||||
// 2. time aggregation = sum and space aggregation = sum
|
||||
// - sum of sums is same as sum of all values
|
||||
//
|
||||
// 3. time aggregation = min and space aggregation = min
|
||||
// - min of mins is same as min of all values
|
||||
//
|
||||
// 4. time aggregation = max and space aggregation = max
|
||||
// - max of maxs is same as max of all values
|
||||
//
|
||||
// 5. special case exphist, there is no need for per series/fingerprint aggregation
|
||||
// we can directly use the quantilesDDMerge function
|
||||
//
|
||||
// all of this is true only for delta metrics
|
||||
func CanShortCircuitDelta(metricAgg MetricAggregation) bool {
|
||||
|
||||
if metricAgg.Temporality != metrictypes.Delta {
|
||||
return false
|
||||
}
|
||||
|
||||
ta := metricAgg.TimeAggregation
|
||||
sa := metricAgg.SpaceAggregation
|
||||
|
||||
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) &&
|
||||
sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
|
||||
return true
|
||||
}
|
||||
if metricAgg.Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
var (
|
||||
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "field not found")
|
||||
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
|
||||
ErrBetweenValuesType = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values of the number type")
|
||||
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
|
||||
ErrUnsupportedOperator = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported operator")
|
||||
)
|
||||
|
||||
@@ -75,7 +75,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeFormula:
|
||||
var spec QueryBuilderFormula
|
||||
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderFormula
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderFormula
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid formula spec: %v", err)
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeJoin:
|
||||
var spec QueryBuilderJoin
|
||||
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderJoin
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderJoin
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid join spec: %v", err)
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypePromQL:
|
||||
var spec PromQuery
|
||||
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for PromQuery
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for PromQuery
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid PromQL spec: %v", err)
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeClickHouseSQL:
|
||||
var spec ClickHouseQuery
|
||||
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for ClickHouseQuery
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for ClickHouseQuery
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid ClickHouse SQL spec: %v", err)
|
||||
}
|
||||
@@ -439,7 +439,7 @@ func (r *QueryRangeRequest) GetQueriesSupportingZeroDefault() map[string]bool {
|
||||
expr = strings.ToLower(expr)
|
||||
// only pure additive/counting operations should default to zero,
|
||||
// while statistical/analytical operations should show gaps when there's no data to analyze.
|
||||
// TODO(srikanthccv): use newExprVisitor for getting the function used in the expression
|
||||
// TODO: use newExprVisitor for getting the function used in the expression
|
||||
if strings.HasPrefix(expr, "count(") ||
|
||||
strings.HasPrefix(expr, "count_distinct(") ||
|
||||
strings.HasPrefix(expr, "sum(") ||
|
||||
|
||||
@@ -21,12 +21,3 @@ var (
|
||||
// []Bucket (struct{Lower,Upper,Count float64}), example: histogram
|
||||
RequestTypeDistribution = RequestType{valuer.NewString("distribution")}
|
||||
)
|
||||
|
||||
// IsAggregation returns true for request types that produce aggregated results
|
||||
// (time_series, scalar, distribution). For these types, fields like groupBy,
|
||||
// having, aggregations, and orderBy (with aggregation key validation) are meaningful.
|
||||
// For non-aggregation types (raw, raw_stream, trace), those fields are ignored
|
||||
// and don't need to be validated.
|
||||
func (r RequestType) IsAggregation() bool {
|
||||
return r == RequestTypeTimeSeries || r == RequestTypeScalar || r == RequestTypeDistribution
|
||||
}
|
||||
|
||||
@@ -10,78 +10,54 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// queryName returns the name from any query envelope spec type.
|
||||
func (e QueryEnvelope) queryName() string {
|
||||
switch spec := e.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderFormula:
|
||||
return spec.Name
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Name
|
||||
case QueryBuilderJoin:
|
||||
return spec.Name
|
||||
case PromQuery:
|
||||
return spec.Name
|
||||
case ClickHouseQuery:
|
||||
return spec.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// isDisabled returns the disabled status from any query envelope spec type.
|
||||
func (e QueryEnvelope) isDisabled() bool {
|
||||
switch spec := e.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderFormula:
|
||||
return spec.Disabled
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Disabled
|
||||
case QueryBuilderJoin:
|
||||
return spec.Disabled
|
||||
case PromQuery:
|
||||
return spec.Disabled
|
||||
case ClickHouseQuery:
|
||||
return spec.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
|
||||
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
|
||||
name := envelope.queryName()
|
||||
|
||||
var typeLabel string
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
typeLabel = "query"
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("trace query at position %d", index+1)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("log query at position %d", index+1)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("metric query at position %d", index+1)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
typeLabel = "formula"
|
||||
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("formula '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("formula at position %d", index+1)
|
||||
case QueryTypeTraceOperator:
|
||||
typeLabel = "trace operator"
|
||||
if spec, ok := envelope.Spec.(QueryBuilderTraceOperator); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("trace operator '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("trace operator at position %d", index+1)
|
||||
case QueryTypeJoin:
|
||||
typeLabel = "join"
|
||||
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("join '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("join at position %d", index+1)
|
||||
case QueryTypePromQL:
|
||||
typeLabel = "PromQL query"
|
||||
if spec, ok := envelope.Spec.(PromQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("PromQL query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("PromQL query at position %d", index+1)
|
||||
case QueryTypeClickHouseSQL:
|
||||
typeLabel = "ClickHouse query"
|
||||
default:
|
||||
typeLabel = "query"
|
||||
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("ClickHouse query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("ClickHouse query at position %d", index+1)
|
||||
}
|
||||
|
||||
if name != "" {
|
||||
return fmt.Sprintf("%s '%s'", typeLabel, name)
|
||||
}
|
||||
return fmt.Sprintf("%s at position %d", typeLabel, index+1)
|
||||
return fmt.Sprintf("query at position %d", index+1)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -96,12 +72,11 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateAggregations(requestType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateGroupBy(requestType); err != nil {
|
||||
return err
|
||||
// Validate aggregations only for non-raw request types
|
||||
if requestType != RequestTypeRaw && requestType != RequestTypeRawStream && requestType != RequestTypeTrace {
|
||||
if err := q.validateAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate limit and pagination
|
||||
@@ -119,23 +94,32 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateOrderBy(requestType); err != nil {
|
||||
return err
|
||||
if requestType != RequestTypeRaw && requestType != RequestTypeTrace && len(q.Aggregations) > 0 {
|
||||
if err := q.validateOrderByForAggregation(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := q.validateOrderBy(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := q.validateSelectFields(requestType); err != nil {
|
||||
return err
|
||||
if requestType != RequestTypeRaw && requestType != RequestTypeTrace {
|
||||
if err := q.validateHaving(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if requestType == RequestTypeRaw {
|
||||
if err := q.validateSelectFields(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSelectFields(requestType RequestType) error {
|
||||
// selectFields don't apply to aggregation queries, skip validation
|
||||
if requestType.IsAggregation() {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSelectFields() error {
|
||||
// isRoot and isEntryPoint are returned by the Metadata API, so if someone sends them, we have to reject the request.
|
||||
for _, v := range q.SelectFields {
|
||||
if v.Name == "isRoot" || v.Name == "isEntryPoint" {
|
||||
@@ -148,21 +132,6 @@ func (q *QueryBuilderQuery[T]) validateSelectFields(requestType RequestType) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateGroupBy(requestType RequestType) error {
|
||||
// groupBy doesn't apply to non-aggregation queries, skip validation
|
||||
if !requestType.IsAggregation() {
|
||||
return nil
|
||||
}
|
||||
for idx, item := range q.GroupBy {
|
||||
if item.TelemetryFieldKey.Name == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput, "invalid empty key name for group by at index %d", idx,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSignal() error {
|
||||
// Signal validation is handled during unmarshaling in req.go
|
||||
// Valid signals are: metrics, traces, logs
|
||||
@@ -183,12 +152,7 @@ func (q *QueryBuilderQuery[T]) validateSignal() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations(requestType RequestType) error {
|
||||
// aggregations don't apply to non-aggregation queries, skip validation
|
||||
if !requestType.IsAggregation() {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations() error {
|
||||
// At least one aggregation required for non-disabled queries
|
||||
if len(q.Aggregations) == 0 && !q.Disabled {
|
||||
return errors.NewInvalidInputf(
|
||||
@@ -215,6 +179,14 @@ func (q *QueryBuilderQuery[T]) validateAggregations(requestType RequestType) err
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
// Validate metric-specific aggregations
|
||||
if err := validateMetricAggregation(v); err != nil {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return wrapValidationError(err, aggId, "invalid metric %s: %s")
|
||||
}
|
||||
case TraceAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
@@ -329,7 +301,7 @@ func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy(requestType RequestType) error {
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy() error {
|
||||
for i, order := range q.Order {
|
||||
// Direction validation is handled by the OrderDirection type
|
||||
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
|
||||
@@ -347,12 +319,6 @@ func (q *QueryBuilderQuery[T]) validateOrderBy(requestType RequestType) error {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// aggregation-specific order key validation only applies to aggregation queries
|
||||
if requestType.IsAggregation() {
|
||||
return q.validateOrderByForAggregation()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -362,6 +328,10 @@ func (q *QueryBuilderQuery[T]) validateOrderBy(requestType RequestType) error {
|
||||
// 2. Aggregation expressions or aliases
|
||||
// 3. Aggregation index (0, 1, 2, etc.)
|
||||
func (q *QueryBuilderQuery[T]) validateOrderByForAggregation() error {
|
||||
// First validate basic order by constraints
|
||||
if err := q.validateOrderBy(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
validOrderKeys := make(map[string]bool)
|
||||
|
||||
@@ -431,6 +401,22 @@ func (q *QueryBuilderQuery[T]) validateOrderByForAggregation() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateHaving() error {
|
||||
if q.Having == nil || q.Having.Expression == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensure that having is only used with aggregations
|
||||
if len(q.Aggregations) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"having clause can only be used with aggregation queries. Use `filter.expression` instead",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateQueryRangeRequest validates the entire query range request
|
||||
func (r *QueryRangeRequest) Validate() error {
|
||||
// Validate time range
|
||||
@@ -470,20 +456,236 @@ func (r *QueryRangeRequest) Validate() error {
|
||||
|
||||
// validateAllQueriesNotDisabled validates that at least one query in the composite query is enabled
|
||||
func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
|
||||
allDisabled := true
|
||||
for _, envelope := range r.CompositeQuery.Queries {
|
||||
if !envelope.isDisabled() {
|
||||
return nil
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
case QueryTypeTraceOperator:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderTraceOperator); ok && !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
case QueryTypeJoin:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
case QueryTypePromQL:
|
||||
if spec, ok := envelope.Spec.(PromQuery); ok && !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
case QueryTypeClickHouseSQL:
|
||||
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && !spec.Disabled {
|
||||
allDisabled = false
|
||||
}
|
||||
}
|
||||
|
||||
// Early exit if we find at least one enabled query
|
||||
if !allDisabled {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"all queries are disabled - at least one query must be enabled",
|
||||
)
|
||||
if allDisabled {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"all queries are disabled - at least one query must be enabled",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) validateCompositeQuery() error {
|
||||
return r.CompositeQuery.Validate(r.RequestType)
|
||||
// Validate queries in composite query
|
||||
if len(r.CompositeQuery.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Track query names for uniqueness (only for non-formula queries)
|
||||
queryNames := make(map[string]bool)
|
||||
|
||||
// Validate each query based on its type
|
||||
for i, envelope := range r.CompositeQuery.Queries {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
// Validate based on the concrete type
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown spec type for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
// Formula validation is handled separately
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeJoin:
|
||||
// Join validation is handled separately
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeTraceOperator:
|
||||
spec, ok := envelope.Spec.(QueryBuilderTraceOperator)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypePromQL:
|
||||
// PromQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeClickHouseSQL:
|
||||
// ClickHouse SQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type '%s' for %s",
|
||||
envelope.Type,
|
||||
queryId,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_formula, builder_join, promql, clickhouse_sql, trace_operator",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate performs validation on CompositeQuery
|
||||
@@ -495,29 +697,12 @@ func (c *CompositeQuery) Validate(requestType RequestType) error {
|
||||
)
|
||||
}
|
||||
|
||||
// Track query names for uniqueness (only for builder queries)
|
||||
queryNames := make(map[string]bool)
|
||||
|
||||
// Validate each query
|
||||
for i, envelope := range c.Queries {
|
||||
if err := validateQueryEnvelope(envelope, requestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
|
||||
// Check name uniqueness for builder queries
|
||||
if envelope.Type == QueryTypeBuilder || envelope.Type == QueryTypeSubQuery {
|
||||
name := envelope.queryName()
|
||||
if name != "" {
|
||||
if queryNames[name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
name,
|
||||
)
|
||||
}
|
||||
queryNames[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -618,3 +803,85 @@ func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) erro
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMetricAggregation validates metric-specific aggregation parameters
|
||||
func validateMetricAggregation(agg MetricAggregation) error {
|
||||
// we can't decide anything here without known temporality
|
||||
if agg.Temporality == metrictypes.Unknown {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate that rate/increase are only used with appropriate temporalities
|
||||
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
|
||||
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
|
||||
if agg.Temporality == metrictypes.Unspecified {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate percentile aggregations are only used with histogram types
|
||||
if agg.SpaceAggregation.IsPercentile() {
|
||||
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"percentile aggregation can only be used with histogram or summary metric types",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate time aggregation values
|
||||
validTimeAggregations := []metrictypes.TimeAggregation{
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
metrictypes.TimeAggregationLatest,
|
||||
metrictypes.TimeAggregationSum,
|
||||
metrictypes.TimeAggregationAvg,
|
||||
metrictypes.TimeAggregationMin,
|
||||
metrictypes.TimeAggregationMax,
|
||||
metrictypes.TimeAggregationCount,
|
||||
metrictypes.TimeAggregationCountDistinct,
|
||||
metrictypes.TimeAggregationRate,
|
||||
metrictypes.TimeAggregationIncrease,
|
||||
}
|
||||
|
||||
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
|
||||
if !validTimeAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation: %s",
|
||||
agg.TimeAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate space aggregation values
|
||||
validSpaceAggregations := []metrictypes.SpaceAggregation{
|
||||
metrictypes.SpaceAggregationUnspecified,
|
||||
metrictypes.SpaceAggregationSum,
|
||||
metrictypes.SpaceAggregationAvg,
|
||||
metrictypes.SpaceAggregationMin,
|
||||
metrictypes.SpaceAggregationMax,
|
||||
metrictypes.SpaceAggregationCount,
|
||||
metrictypes.SpaceAggregationPercentile50,
|
||||
metrictypes.SpaceAggregationPercentile75,
|
||||
metrictypes.SpaceAggregationPercentile90,
|
||||
metrictypes.SpaceAggregationPercentile95,
|
||||
metrictypes.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
|
||||
if !validSpaceAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation: %s",
|
||||
agg.SpaceAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -333,617 +333,6 @@ func TestQueryRangeRequest_ValidateAllQueriesNotDisabled(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryRangeRequest_ValidateCompositeQuery(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
request QueryRangeRequest
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "empty composite query should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "at least one query is required",
|
||||
},
|
||||
{
|
||||
name: "duplicate builder query names should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Disabled: true,
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Disabled: true,
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "duplicate query name 'A'",
|
||||
},
|
||||
{
|
||||
name: "duplicate names across log and metric builder queries should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[LogAggregation]{
|
||||
Name: "X",
|
||||
Disabled: true,
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[MetricAggregation]{
|
||||
Name: "X",
|
||||
Disabled: true,
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "duplicate query name 'X'",
|
||||
},
|
||||
{
|
||||
name: "same name on formula and builder should not conflict",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "A",
|
||||
Expression: "A + 1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "formula with empty expression should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "F1",
|
||||
Expression: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "expression is required",
|
||||
},
|
||||
{
|
||||
name: "promql with empty query should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypePromQL,
|
||||
Spec: PromQuery{
|
||||
Name: "P1",
|
||||
Query: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "PromQL query is required",
|
||||
},
|
||||
{
|
||||
name: "clickhouse with empty query should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeClickHouseSQL,
|
||||
Spec: ClickHouseQuery{
|
||||
Name: "CH1",
|
||||
Query: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "ClickHouse SQL query is required",
|
||||
},
|
||||
{
|
||||
name: "trace operator with empty expression should return error",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "TO1",
|
||||
Expression: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "expression is required",
|
||||
},
|
||||
{
|
||||
name: "valid promql query should pass",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypePromQL,
|
||||
Spec: PromQuery{
|
||||
Name: "P1",
|
||||
Query: "up",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid clickhouse query should pass",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeClickHouseSQL,
|
||||
Spec: ClickHouseQuery{
|
||||
Name: "CH1",
|
||||
Query: "SELECT count() FROM logs",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid mixed queries with unique builder names should pass",
|
||||
request: QueryRangeRequest{
|
||||
Start: 1640995200000,
|
||||
End: 1640998800000,
|
||||
RequestType: RequestTypeTimeSeries,
|
||||
CompositeQuery: CompositeQuery{
|
||||
Queries: []QueryEnvelope{
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Aggregations: []TraceAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: QueryTypePromQL,
|
||||
Spec: PromQuery{
|
||||
Name: "C",
|
||||
Query: "up",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.request.Validate()
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("Validate() expected error but got none")
|
||||
return
|
||||
}
|
||||
if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("Validate() error = %v, want to contain %v", err.Error(), tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Validate() unexpected error = %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateQueryEnvelope(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envelope QueryEnvelope
|
||||
requestType RequestType
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid builder query with trace aggregation",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeBuilder,
|
||||
Spec: QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Aggregations: []TraceAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid formula with expression",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "F1",
|
||||
Expression: "A + B",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "formula with empty expression should fail",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "F1",
|
||||
Expression: "",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "expression is required",
|
||||
},
|
||||
{
|
||||
name: "valid join spec",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeJoin,
|
||||
Spec: QueryBuilderJoin{
|
||||
Name: "J1",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid trace operator",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "TO1",
|
||||
Expression: "count()",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "trace operator with empty expression should fail",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeTraceOperator,
|
||||
Spec: QueryBuilderTraceOperator{
|
||||
Name: "TO1",
|
||||
Expression: "",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "expression is required",
|
||||
},
|
||||
{
|
||||
name: "promql with empty query should fail",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypePromQL,
|
||||
Spec: PromQuery{
|
||||
Name: "P1",
|
||||
Query: "",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "PromQL query is required",
|
||||
},
|
||||
{
|
||||
name: "clickhouse with empty query should fail",
|
||||
envelope: QueryEnvelope{
|
||||
Type: QueryTypeClickHouseSQL,
|
||||
Spec: ClickHouseQuery{
|
||||
Name: "CH1",
|
||||
Query: "",
|
||||
},
|
||||
},
|
||||
requestType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "ClickHouse SQL query is required",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateQueryEnvelope(tt.envelope, tt.requestType)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("validateQueryEnvelope() expected error but got none")
|
||||
return
|
||||
}
|
||||
if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("validateQueryEnvelope() error = %v, want to contain %v", err.Error(), tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("validateQueryEnvelope() unexpected error = %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryEnvelope_Helpers(t *testing.T) {
|
||||
t.Run("queryName", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envelope QueryEnvelope
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "trace builder query",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[TraceAggregation]{Name: "A"}},
|
||||
want: "A",
|
||||
},
|
||||
{
|
||||
name: "log builder query",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Name: "B"}},
|
||||
want: "B",
|
||||
},
|
||||
{
|
||||
name: "metric builder query",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[MetricAggregation]{Name: "C"}},
|
||||
want: "C",
|
||||
},
|
||||
{
|
||||
name: "formula",
|
||||
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{Name: "F1"}},
|
||||
want: "F1",
|
||||
},
|
||||
{
|
||||
name: "promql",
|
||||
envelope: QueryEnvelope{Type: QueryTypePromQL, Spec: PromQuery{Name: "P1"}},
|
||||
want: "P1",
|
||||
},
|
||||
{
|
||||
name: "clickhouse",
|
||||
envelope: QueryEnvelope{Type: QueryTypeClickHouseSQL, Spec: ClickHouseQuery{Name: "CH1"}},
|
||||
want: "CH1",
|
||||
},
|
||||
{
|
||||
name: "trace operator",
|
||||
envelope: QueryEnvelope{Type: QueryTypeTraceOperator, Spec: QueryBuilderTraceOperator{Name: "TO1"}},
|
||||
want: "TO1",
|
||||
},
|
||||
{
|
||||
name: "join",
|
||||
envelope: QueryEnvelope{Type: QueryTypeJoin, Spec: QueryBuilderJoin{Name: "J1"}},
|
||||
want: "J1",
|
||||
},
|
||||
{
|
||||
name: "empty name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{}},
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.envelope.queryName()
|
||||
if got != tt.want {
|
||||
t.Errorf("queryName() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("isDisabled", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envelope QueryEnvelope
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "enabled builder query",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Disabled: false}},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "disabled builder query",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Disabled: true}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "disabled formula",
|
||||
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{Disabled: true}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "enabled promql",
|
||||
envelope: QueryEnvelope{Type: QueryTypePromQL, Spec: PromQuery{Disabled: false}},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "disabled clickhouse",
|
||||
envelope: QueryEnvelope{Type: QueryTypeClickHouseSQL, Spec: ClickHouseQuery{Disabled: true}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "disabled trace operator",
|
||||
envelope: QueryEnvelope{Type: QueryTypeTraceOperator, Spec: QueryBuilderTraceOperator{Disabled: true}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "disabled join",
|
||||
envelope: QueryEnvelope{Type: QueryTypeJoin, Spec: QueryBuilderJoin{Disabled: true}},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.envelope.isDisabled()
|
||||
if got != tt.want {
|
||||
t.Errorf("isDisabled() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetQueryIdentifier(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envelope QueryEnvelope
|
||||
index int
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "builder query with name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Name: "A"}},
|
||||
index: 0,
|
||||
want: "query 'A'",
|
||||
},
|
||||
{
|
||||
name: "builder query without name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{}},
|
||||
index: 2,
|
||||
want: "query at position 3",
|
||||
},
|
||||
{
|
||||
name: "formula with name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{Name: "F1"}},
|
||||
index: 0,
|
||||
want: "formula 'F1'",
|
||||
},
|
||||
{
|
||||
name: "formula without name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{}},
|
||||
index: 1,
|
||||
want: "formula at position 2",
|
||||
},
|
||||
{
|
||||
name: "promql with name",
|
||||
envelope: QueryEnvelope{Type: QueryTypePromQL, Spec: PromQuery{Name: "P1"}},
|
||||
index: 0,
|
||||
want: "PromQL query 'P1'",
|
||||
},
|
||||
{
|
||||
name: "clickhouse with name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeClickHouseSQL, Spec: ClickHouseQuery{Name: "CH1"}},
|
||||
index: 0,
|
||||
want: "ClickHouse query 'CH1'",
|
||||
},
|
||||
{
|
||||
name: "trace operator with name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeTraceOperator, Spec: QueryBuilderTraceOperator{Name: "TO1"}},
|
||||
index: 0,
|
||||
want: "trace operator 'TO1'",
|
||||
},
|
||||
{
|
||||
name: "join without name",
|
||||
envelope: QueryEnvelope{Type: QueryTypeJoin, Spec: QueryBuilderJoin{}},
|
||||
index: 0,
|
||||
want: "join at position 1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := getQueryIdentifier(tt.envelope, tt.index)
|
||||
if got != tt.want {
|
||||
t.Errorf("getQueryIdentifier() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryRangeRequest_ValidateOrderByForAggregation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1123,139 +512,4 @@ func TestQueryRangeRequest_ValidateOrderByForAggregation(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestType_IsAggregation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requestType RequestType
|
||||
want bool
|
||||
}{
|
||||
{"time_series is aggregation", RequestTypeTimeSeries, true},
|
||||
{"scalar is aggregation", RequestTypeScalar, true},
|
||||
{"distribution is aggregation", RequestTypeDistribution, true},
|
||||
{"raw is not aggregation", RequestTypeRaw, false},
|
||||
{"raw_stream is not aggregation", RequestTypeRawStream, false},
|
||||
{"trace is not aggregation", RequestTypeTrace, false},
|
||||
{"unknown is not aggregation", RequestTypeUnknown, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.requestType.IsAggregation()
|
||||
if got != tt.want {
|
||||
t.Errorf("IsAggregation() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
// Fields that only apply to aggregation queries (groupBy, having, aggregations)
|
||||
// should be silently skipped for non-aggregation request types.
|
||||
t.Run("groupBy ignored for raw request type", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
GroupBy: []GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
}
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for groupBy with raw request type, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("groupBy validated for timeseries request type", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: ""}},
|
||||
},
|
||||
}
|
||||
err := query.Validate(RequestTypeTimeSeries)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for empty groupBy key with timeseries request type")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("having ignored for raw request type", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Having: &Having{Expression: "count() > 10"},
|
||||
}
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for having with raw request type, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("having ignored for trace request type", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Having: &Having{Expression: "count() > 10"},
|
||||
}
|
||||
err := query.Validate(RequestTypeTrace)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for having with trace request type, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("aggregations ignored for raw request type", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
}
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for aggregations with raw request type, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("aggregations ignored for raw_stream request type", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
}
|
||||
err := query.Validate(RequestTypeRawStream)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for aggregations with raw_stream request type, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("selectFields validated for raw but not timeseries", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Aggregations: []TraceAggregation{
|
||||
{Expression: "count()"},
|
||||
},
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "isRoot"},
|
||||
},
|
||||
}
|
||||
// Should error for raw (selectFields are validated)
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for isRoot in selectFields with raw request type")
|
||||
}
|
||||
// Should pass for timeseries (selectFields skipped)
|
||||
err = query.Validate(RequestTypeTimeSeries)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for isRoot in selectFields with timeseries request type, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -78,18 +78,18 @@ func (f *TelemetryFieldKey) ArrayParentSelectors() []*FieldKeySelector {
|
||||
|
||||
func (f TelemetryFieldKey) String() string {
|
||||
var sb strings.Builder
|
||||
fmt.Fprintf(&sb, "name=%s", f.Name)
|
||||
sb.WriteString(fmt.Sprintf("name=%s", f.Name))
|
||||
if f.FieldContext != FieldContextUnspecified {
|
||||
fmt.Fprintf(&sb, ",context=%s", f.FieldContext.String)
|
||||
sb.WriteString(fmt.Sprintf(",context=%s", f.FieldContext.String))
|
||||
}
|
||||
if f.FieldDataType != FieldDataTypeUnspecified {
|
||||
fmt.Fprintf(&sb, ",datatype=%s", f.FieldDataType.StringValue())
|
||||
sb.WriteString(fmt.Sprintf(",datatype=%s", f.FieldDataType.StringValue()))
|
||||
}
|
||||
if f.Materialized {
|
||||
sb.WriteString(",materialized=true")
|
||||
}
|
||||
if f.JSONDataType != nil {
|
||||
fmt.Fprintf(&sb, ",jsondatatype=%s", f.JSONDataType.StringValue())
|
||||
sb.WriteString(fmt.Sprintf(",jsondatatype=%s", f.JSONDataType.StringValue()))
|
||||
}
|
||||
if len(f.Indexes) > 0 {
|
||||
sb.WriteString(",indexes=[")
|
||||
@@ -97,7 +97,7 @@ func (f TelemetryFieldKey) String() string {
|
||||
if i > 0 {
|
||||
sb.WriteString("; ")
|
||||
}
|
||||
fmt.Fprintf(&sb, "{type=%s, columnExpr=%s, indexExpr=%s}", index.Type.StringValue(), index.ColumnExpression, index.IndexExpression)
|
||||
sb.WriteString(fmt.Sprintf("{type=%s, columnExpr=%s, indexExpr=%s}", index.Type.StringValue(), index.ColumnExpression, index.IndexExpression))
|
||||
}
|
||||
sb.WriteString("]")
|
||||
}
|
||||
@@ -108,17 +108,6 @@ func (f TelemetryFieldKey) Text() string {
|
||||
return TelemetryFieldKeyToText(&f)
|
||||
}
|
||||
|
||||
// OverrideMetadataFrom copies the resolved metadata fields from src into f.
|
||||
// This is used when adjusting user-provided keys to match known field definitions.
|
||||
func (f *TelemetryFieldKey) OverrideMetadataFrom(src *TelemetryFieldKey) {
|
||||
f.FieldContext = src.FieldContext
|
||||
f.FieldDataType = src.FieldDataType
|
||||
f.JSONDataType = src.JSONDataType
|
||||
f.Indexes = src.Indexes
|
||||
f.Materialized = src.Materialized
|
||||
f.JSONPlan = src.JSONPlan
|
||||
}
|
||||
|
||||
func (f *TelemetryFieldKey) Equal(key *TelemetryFieldKey) bool {
|
||||
return f.Name == key.Name &&
|
||||
f.FieldContext == key.FieldContext &&
|
||||
@@ -236,19 +225,11 @@ func TelemetryFieldKeyToText(key *TelemetryFieldKey) string {
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("`%s_%s_%s`",
|
||||
key.FieldContext.String,
|
||||
fieldDataTypes[key.FieldDataType.StringValue()].StringValue(),
|
||||
strings.ReplaceAll(key.Name, ".", "$$"),
|
||||
)
|
||||
return fmt.Sprintf("`%s_%s_%s`", key.FieldContext.String, fieldDataTypes[key.FieldDataType.StringValue()].StringValue(), strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnNameForExists(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("`%s_%s_%s_exists`",
|
||||
key.FieldContext.String,
|
||||
fieldDataTypes[key.FieldDataType.StringValue()].StringValue(),
|
||||
strings.ReplaceAll(key.Name, ".", "$$"),
|
||||
)
|
||||
return fmt.Sprintf("`%s_%s_%s_exists`", key.FieldContext.String, fieldDataTypes[key.FieldDataType.StringValue()].StringValue(), strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
type TelemetryFieldValues struct {
|
||||
|
||||
@@ -14,10 +14,10 @@ from fixtures.alertutils import (
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
# Alert test cases use a 30-second wait time to verify expected alert firing.
|
||||
# Alert data is set up to trigger on the first rule manager evaluation.
|
||||
# With a 15-second eval frequency for most rules, plus alertmanager's
|
||||
# group_wait and group_interval delays, alerts should fire well within 30 seconds.
|
||||
# test cases for match type and compare operators have wait time of 30 seconds to verify the alert expectation.
|
||||
# we've poistioned the alert data to fire the alert on first eval of rule manager, the eval frequency
|
||||
# for most alert rules are set of 15s so considering this delay plus some delay from alert manager's
|
||||
# group_wait and group_interval, even in worst case most alerts should be triggered in about 30 seconds
|
||||
TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_above_at_least_once",
|
||||
@@ -25,7 +25,6 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
# active requests dummy data
|
||||
data_path="alerts/test_scenarios/threshold_above_at_least_once/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
@@ -116,28 +115,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_above_last",
|
||||
rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_above_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last, pylint: disable=W0511
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_above_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_above_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_below_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_below_at_least_once/rule.json",
|
||||
@@ -188,7 +189,6 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
# one rate ~5 + rest 0.01 so it remains in total below 10
|
||||
data_path="alerts/test_scenarios/threshold_below_in_total/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
@@ -227,28 +227,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_below_last",
|
||||
rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_below_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_below_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_below_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_equal_to_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_equal_to_at_least_once/rule.json",
|
||||
@@ -337,28 +339,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_equal_to_last",
|
||||
rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_equal_to_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_equal_to_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_equal_to_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_not_equal_to_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_not_equal_to_at_least_once/rule.json",
|
||||
@@ -447,28 +451,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_not_equal_to_last",
|
||||
rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_not_equal_to_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_not_equal_to_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_not_equal_to_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
]
|
||||
|
||||
# test cases unit conversion
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -54,17 +54,17 @@ def test_rate_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 58
|
||||
assert len(result_values) >= 59
|
||||
# the counter reset happened at 31st minute
|
||||
assert (
|
||||
result_values[29]["value"] == 0.0167
|
||||
result_values[30]["value"] == 0.0167
|
||||
) # i.e 2/120 i.e 29th to 31st minute changes
|
||||
assert (
|
||||
result_values[30]["value"] == 0.133
|
||||
result_values[31]["value"] == 0.133
|
||||
) # i.e 10/60 i.e 31st to 32nd minute changes
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_of_steady_rate >= 55
|
||||
count_of_steady_rate >= 56
|
||||
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
|
||||
@@ -72,17 +72,16 @@ def test_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 58
|
||||
assert len(result_values) >= 59
|
||||
# the counter reset happened at 31st minute
|
||||
# we skip the rate value for the first data point without previous value
|
||||
assert result_values[29]["value"] == expected_value_at_31st_minute
|
||||
assert result_values[30]["value"] == expected_value_at_32nd_minute
|
||||
assert result_values[30]["value"] == expected_value_at_31st_minute
|
||||
assert result_values[31]["value"] == expected_value_at_32nd_minute
|
||||
assert (
|
||||
result_values[38]["value"] == steady_value
|
||||
) # 38th minute is when cumulative shifts to delta
|
||||
result_values[39]["value"] == steady_value
|
||||
) # 39th minute is when cumulative shifts to delta
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == steady_value)
|
||||
assert (
|
||||
count_of_steady_rate >= 55
|
||||
count_of_steady_rate >= 56
|
||||
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
@@ -317,12 +316,12 @@ def test_for_service_with_switch(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 59
|
||||
assert result_values[29]["value"] == expected_value_at_30th_minute # 0.183
|
||||
assert result_values[30]["value"] == expected_value_at_31st_minute # 0.183
|
||||
assert result_values[37]["value"] == value_at_switch # 0.25
|
||||
assert len(result_values) >= 60
|
||||
assert result_values[30]["value"] == expected_value_at_30th_minute # 0.183
|
||||
assert result_values[31]["value"] == expected_value_at_31st_minute # 0.183
|
||||
assert result_values[38]["value"] == value_at_switch # 0.25
|
||||
assert (
|
||||
result_values[38]["value"] == value_at_switch # 0.25
|
||||
result_values[39]["value"] == value_at_switch # 0.25
|
||||
) # 39th minute is when cumulative shifts to delta
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
|
||||
70
tests/integration/src/user/01_unique_index.py
Normal file
70
tests/integration/src/user/01_unique_index.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.types import Operation, SigNoz
|
||||
|
||||
DUPLICATE_USER_EMAIL = "duplicate@integration.test"
|
||||
|
||||
|
||||
def test_duplicate_user_invite_rejected(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
"""
|
||||
Verify that the unique index on (email, org_id) in the users table prevents
|
||||
creating duplicate users. This invites a new user, accepts the invite, then
|
||||
tries to invite and accept the same email again expecting a failure.
|
||||
"""
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Step 1: Invite a new user.
|
||||
invite_response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite"),
|
||||
json={"email": DUPLICATE_USER_EMAIL, "role": "EDITOR"},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=2,
|
||||
)
|
||||
assert invite_response.status_code == HTTPStatus.CREATED
|
||||
first_invite_token = invite_response.json()["data"]["token"]
|
||||
|
||||
# Step 2: Accept the invite to create the user.
|
||||
accept_response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite/accept"),
|
||||
json={"token": first_invite_token, "password": "password123Z$"},
|
||||
timeout=2,
|
||||
)
|
||||
assert accept_response.status_code == HTTPStatus.CREATED
|
||||
|
||||
# Step 3: Invite the same email again.
|
||||
second_invite_response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite"),
|
||||
json={"email": DUPLICATE_USER_EMAIL, "role": "VIEWER"},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=2,
|
||||
)
|
||||
|
||||
# The invite creation itself may be rejected if the app checks for existing users.
|
||||
if second_invite_response.status_code != HTTPStatus.CREATED:
|
||||
assert second_invite_response.status_code in (
|
||||
HTTPStatus.CONFLICT,
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
)
|
||||
return
|
||||
|
||||
second_invite_token = second_invite_response.json()["data"]["token"]
|
||||
|
||||
# Step 4: Accept the second invite — should fail due to unique constraint.
|
||||
second_accept_response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite/accept"),
|
||||
json={"token": second_invite_token, "password": "password123Z$"},
|
||||
timeout=2,
|
||||
)
|
||||
assert second_accept_response.status_code in (
|
||||
HTTPStatus.CONFLICT,
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
HTTPStatus.INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":19,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":38,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"type": "clickhouse_sql",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= $start_timestamp_ms \n AND unix_milli < $end_timestamp_ms \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n sum(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
|
||||
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= {{.start_timestamp_ms}} \n AND unix_milli < {{.end_timestamp_ms}} \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n avg(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":12,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":23,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":46,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":76,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":34,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":30,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":50,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":55,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":70,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
Reference in New Issue
Block a user