mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-08 10:49:56 +00:00
Compare commits
22 Commits
update-que
...
feat/7274
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1224222d5b | ||
|
|
c8e7abaa85 | ||
|
|
0b7cd4c1a7 | ||
|
|
62c033ccf8 | ||
|
|
e637487984 | ||
|
|
8fc43a00f8 | ||
|
|
031d62ca44 | ||
|
|
8c4c357351 | ||
|
|
d8d8191a32 | ||
|
|
a876c0a744 | ||
|
|
c36f913a90 | ||
|
|
ed597f00c0 | ||
|
|
4957d3ae93 | ||
|
|
8835e3493d | ||
|
|
027a1631ef | ||
|
|
d7a6607a25 | ||
|
|
7a58bc58c9 | ||
|
|
88be23c3e3 | ||
|
|
8f095dfbc9 | ||
|
|
72207691a3 | ||
|
|
8998ca652e | ||
|
|
f4ae5f19ff |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -54,6 +54,7 @@ ee/query-service/tests/test-deploy/data/
|
||||
bin/
|
||||
.local/
|
||||
*/query-service/queries.active
|
||||
ee/query-service/db
|
||||
|
||||
# e2e
|
||||
|
||||
|
||||
@@ -77,4 +77,4 @@ Need assistance? Join our Slack community:
|
||||
## Where do I go from here?
|
||||
|
||||
- Set up your [development environment](docs/contributing/development.md)
|
||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo/otel-demo-docs.md)
|
||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
||||
|
||||
@@ -313,6 +313,9 @@ func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
if expectedValue < 0 {
|
||||
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
|
||||
@@ -94,12 +96,17 @@ func main() {
|
||||
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
sqlStoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(
|
||||
context.Background(),
|
||||
config,
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
signoz.NewSQLStoreProviderFactories(),
|
||||
sqlStoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -157,6 +157,13 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
@@ -279,6 +286,13 @@ var ProPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
@@ -415,4 +429,11 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -60,10 +60,14 @@
|
||||
"INTEGRATIONS": "SigNoz | Integrations",
|
||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
|
||||
"MESSAGING_QUEUES": "SigNoz | Messaging Queues",
|
||||
"MESSAGING_QUEUES_OVERVIEW": "SigNoz | Messaging Queues",
|
||||
"MESSAGING_QUEUES_KAFKA": "SigNoz | Messaging Queues | Kafka",
|
||||
"MESSAGING_QUEUES_KAFKA_DETAIL": "SigNoz | Messaging Queues | Kafka",
|
||||
"MESSAGING_QUEUES_CELERY_TASK": "SigNoz | Messaging Queues | Celery",
|
||||
"INFRASTRUCTURE_MONITORING_HOSTS": "SigNoz | Infra Monitoring",
|
||||
"INFRASTRUCTURE_MONITORING_KUBERNETES": "SigNoz | Infra Monitoring",
|
||||
"METRICS_EXPLORER": "SigNoz | Metrics Explorer",
|
||||
"METRICS_EXPLORER_EXPLORER": "SigNoz | Metrics Explorer",
|
||||
"METRICS_EXPLORER_VIEWS": "SigNoz | Metrics Explorer"
|
||||
"METRICS_EXPLORER_VIEWS": "SigNoz | Metrics Explorer",
|
||||
"API_MONITORING": "SigNoz | API Monitoring"
|
||||
}
|
||||
|
||||
@@ -521,7 +521,7 @@ export default function CeleryOverviewTable({
|
||||
locale={{
|
||||
emptyText: isLoading ? null : <Typography.Text>No data</Typography.Text>,
|
||||
}}
|
||||
scroll={{ x: true }}
|
||||
scroll={{ x: 'max-content' }}
|
||||
showSorterTooltip
|
||||
onDragColumn={handleDragColumn}
|
||||
onRow={(record): { onClick: () => void; className: string } => ({
|
||||
|
||||
@@ -18,6 +18,7 @@ function CopyClipboardHOC({
|
||||
|
||||
notifications.success({
|
||||
message: notificationMessage,
|
||||
key: notificationMessage,
|
||||
});
|
||||
}
|
||||
}, [value, notifications, entityKey]);
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import './ResizeTable.styles.scss';
|
||||
|
||||
import { SyntheticEvent, useMemo } from 'react';
|
||||
import { Resizable, ResizeCallbackData } from 'react-resizable';
|
||||
|
||||
@@ -10,8 +12,8 @@ function ResizableHeader(props: ResizableHeaderProps): JSX.Element {
|
||||
const handle = useMemo(
|
||||
() => (
|
||||
<SpanStyle
|
||||
className="react-resizable-handle"
|
||||
onClick={(e): void => e.stopPropagation()}
|
||||
className="resize-handle"
|
||||
/>
|
||||
),
|
||||
[],
|
||||
@@ -19,7 +21,7 @@ function ResizableHeader(props: ResizableHeaderProps): JSX.Element {
|
||||
|
||||
if (!width) {
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
return <th {...restProps} />;
|
||||
return <th {...restProps} className="resizable-header" />;
|
||||
}
|
||||
|
||||
return (
|
||||
@@ -29,9 +31,10 @@ function ResizableHeader(props: ResizableHeaderProps): JSX.Element {
|
||||
handle={handle}
|
||||
onResize={onResize}
|
||||
draggableOpts={enableUserSelectHack}
|
||||
minConstraints={[150, 0]}
|
||||
>
|
||||
{/* eslint-disable-next-line react/jsx-props-no-spreading */}
|
||||
<th {...restProps} />
|
||||
<th {...restProps} className="resizable-header" />
|
||||
</Resizable>
|
||||
);
|
||||
}
|
||||
|
||||
53
frontend/src/components/ResizeTable/ResizeTable.styles.scss
Normal file
53
frontend/src/components/ResizeTable/ResizeTable.styles.scss
Normal file
@@ -0,0 +1,53 @@
|
||||
.resizable-header {
|
||||
user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
position: relative;
|
||||
|
||||
.ant-table-column-title {
|
||||
white-space: normal;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
}
|
||||
|
||||
.resize-main-table {
|
||||
.ant-table-body {
|
||||
.ant-table-tbody {
|
||||
.ant-table-row {
|
||||
.ant-table-cell {
|
||||
.ant-typography {
|
||||
white-space: unset;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.logs-table,
|
||||
.traces-table {
|
||||
.resize-table {
|
||||
.resize-handle {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
inset-inline-end: -5px;
|
||||
width: 10px;
|
||||
cursor: col-resize;
|
||||
|
||||
&::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
width: 1px;
|
||||
height: 1.6em;
|
||||
background-color: var(--bg-slate-200);
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,35 +2,63 @@
|
||||
|
||||
import { Table } from 'antd';
|
||||
import { ColumnsType } from 'antd/lib/table';
|
||||
import cx from 'classnames';
|
||||
import { dragColumnParams } from 'hooks/useDragColumns/configs';
|
||||
import { set } from 'lodash-es';
|
||||
import { RowData } from 'lib/query/createTableColumnsFromQuery';
|
||||
import { debounce, set } from 'lodash-es';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import {
|
||||
SyntheticEvent,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react';
|
||||
import ReactDragListView from 'react-drag-listview';
|
||||
import { ResizeCallbackData } from 'react-resizable';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
|
||||
import ResizableHeader from './ResizableHeader';
|
||||
import { DragSpanStyle } from './styles';
|
||||
import { ResizeTableProps } from './types';
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
function ResizeTable({
|
||||
columns,
|
||||
onDragColumn,
|
||||
pagination,
|
||||
widgetId,
|
||||
shouldPersistColumnWidths = false,
|
||||
...restProps
|
||||
}: ResizeTableProps): JSX.Element {
|
||||
const [columnsData, setColumns] = useState<ColumnsType>([]);
|
||||
const { setColumnWidths, selectedDashboard } = useDashboard();
|
||||
|
||||
const columnWidths = shouldPersistColumnWidths
|
||||
? (selectedDashboard?.data?.widgets?.find(
|
||||
(widget) => widget.id === widgetId,
|
||||
) as Widgets)?.columnWidths
|
||||
: undefined;
|
||||
|
||||
const updateAllColumnWidths = useRef(
|
||||
debounce((widthsConfig: Record<string, number>) => {
|
||||
if (!widgetId || !shouldPersistColumnWidths) return;
|
||||
setColumnWidths?.((prev) => ({
|
||||
...prev,
|
||||
[widgetId]: widthsConfig,
|
||||
}));
|
||||
}, 1000),
|
||||
).current;
|
||||
|
||||
const handleResize = useCallback(
|
||||
(index: number) => (
|
||||
_e: SyntheticEvent<Element>,
|
||||
e: SyntheticEvent<Element>,
|
||||
{ size }: ResizeCallbackData,
|
||||
): void => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
const newColumns = [...columnsData];
|
||||
newColumns[index] = {
|
||||
...newColumns[index],
|
||||
@@ -65,6 +93,7 @@ function ResizeTable({
|
||||
...restProps,
|
||||
components: { header: { cell: ResizableHeader } },
|
||||
columns: mergedColumns,
|
||||
className: cx('resize-main-table', restProps.className),
|
||||
};
|
||||
|
||||
set(
|
||||
@@ -78,9 +107,39 @@ function ResizeTable({
|
||||
|
||||
useEffect(() => {
|
||||
if (columns) {
|
||||
setColumns(columns);
|
||||
// Apply stored column widths from widget configuration
|
||||
const columnsWithStoredWidths = columns.map((col) => {
|
||||
const dataIndex = (col as RowData).dataIndex as string;
|
||||
if (dataIndex && columnWidths && columnWidths[dataIndex]) {
|
||||
return {
|
||||
...col,
|
||||
width: columnWidths[dataIndex], // Apply stored width
|
||||
};
|
||||
}
|
||||
return col;
|
||||
});
|
||||
|
||||
setColumns(columnsWithStoredWidths);
|
||||
}
|
||||
}, [columns]);
|
||||
}, [columns, columnWidths]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!shouldPersistColumnWidths) return;
|
||||
// Collect all column widths in a single object
|
||||
const newColumnWidths: Record<string, number> = {};
|
||||
|
||||
mergedColumns.forEach((col) => {
|
||||
if (col.width && (col as RowData).dataIndex) {
|
||||
const dataIndex = (col as RowData).dataIndex as string;
|
||||
newColumnWidths[dataIndex] = col.width as number;
|
||||
}
|
||||
});
|
||||
|
||||
// Only update if there are actual widths to set
|
||||
if (Object.keys(newColumnWidths).length > 0) {
|
||||
updateAllColumnWidths(newColumnWidths);
|
||||
}
|
||||
}, [mergedColumns, updateAllColumnWidths, shouldPersistColumnWidths]);
|
||||
|
||||
return onDragColumn ? (
|
||||
<ReactDragListView.DragColumn {...dragColumnParams} onDragEnd={onDragColumn}>
|
||||
|
||||
@@ -8,6 +8,8 @@ export const SpanStyle = styled.span`
|
||||
width: 0.625rem;
|
||||
height: 100%;
|
||||
cursor: col-resize;
|
||||
margin-left: 4px;
|
||||
margin-right: 4px;
|
||||
`;
|
||||
|
||||
export const DragSpanStyle = styled.span`
|
||||
|
||||
@@ -9,6 +9,8 @@ import { TableDataSource } from './contants';
|
||||
|
||||
export interface ResizeTableProps extends TableProps<any> {
|
||||
onDragColumn?: (fromIndex: number, toIndex: number) => void;
|
||||
widgetId?: string;
|
||||
shouldPersistColumnWidths?: boolean;
|
||||
}
|
||||
export interface DynamicColumnTableProps extends TableProps<any> {
|
||||
tablesource: typeof TableDataSource[keyof typeof TableDataSource];
|
||||
|
||||
@@ -25,4 +25,6 @@ export enum FeatureKeys {
|
||||
ANOMALY_DETECTION = 'ANOMALY_DETECTION',
|
||||
AWS_INTEGRATION = 'AWS_INTEGRATION',
|
||||
ONBOARDING_V3 = 'ONBOARDING_V3',
|
||||
THIRD_PARTY_API = 'THIRD_PARTY_API',
|
||||
TRACE_FUNNELS = 'TRACE_FUNNELS',
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import AlertChannels from 'container/AllAlertChannels';
|
||||
import { allAlertChannels } from 'mocks-server/__mockdata__/alerts';
|
||||
import { act, fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
@@ -20,6 +21,13 @@ jest.mock('hooks/useNotifications', () => ({
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: (): { pathname: string } => ({
|
||||
pathname: `${process.env.FRONTEND_API_ENDPOINT}${ROUTES.ALL_CHANNELS}`,
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('Alert Channels Settings List page', () => {
|
||||
beforeEach(() => {
|
||||
render(<AlertChannels />);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import AlertChannels from 'container/AllAlertChannels';
|
||||
import { allAlertChannels } from 'mocks-server/__mockdata__/alerts';
|
||||
import { fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
@@ -25,6 +26,13 @@ jest.mock('hooks/useComponentPermission', () => ({
|
||||
default: jest.fn().mockImplementation(() => [false]),
|
||||
}));
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useLocation: (): { pathname: string } => ({
|
||||
pathname: `${process.env.FRONTEND_API_ENDPOINT}${ROUTES.ALL_CHANNELS}`,
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('Alert Channels Settings List page (Normal User)', () => {
|
||||
beforeEach(() => {
|
||||
render(<AlertChannels />);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Select, Spin, Table, Typography } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
||||
import {
|
||||
@@ -151,6 +152,7 @@ function AllEndPoints({
|
||||
if (groupBy.length === 0) {
|
||||
setSelectedEndPointName(record.endpointName); // this will open up the endpoint details tab
|
||||
setSelectedView(VIEW_TYPES.ENDPOINT_DETAILS);
|
||||
logEvent('API Monitoring: Endpoint name row clicked', {});
|
||||
} else {
|
||||
handleGroupByRowClick(record); // this will prepare the nested query payload
|
||||
}
|
||||
|
||||
@@ -392,6 +392,39 @@
|
||||
gap: 20px;
|
||||
padding-top: 20px;
|
||||
|
||||
.endpoint-meta-data {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
.endpoint-meta-data-pill {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--bg-slate-300);
|
||||
width: fit-content;
|
||||
.endpoint-meta-data-label {
|
||||
display: flex;
|
||||
padding: 6px 8px;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
border-right: 1px solid var(--bg-slate-300);
|
||||
color: var(--text-vanilla-100);
|
||||
background: var(--bg-slate-500);
|
||||
height: calc(100% - 12px);
|
||||
}
|
||||
|
||||
.endpoint-meta-data-value {
|
||||
display: flex;
|
||||
padding: 6px 8px;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
color: var(--text-vanilla-400);
|
||||
background: var(--bg-slate-400);
|
||||
height: calc(100% - 12px);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.endpoint-details-filters-container {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
@@ -405,6 +438,13 @@
|
||||
}
|
||||
}
|
||||
|
||||
.ant-select-item,
|
||||
.ant-select-item-option-content {
|
||||
flex: auto;
|
||||
white-space: normal;
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
|
||||
.status-code-table-container {
|
||||
border-radius: 3px;
|
||||
border: 1px solid var(--bg-slate-500);
|
||||
@@ -809,6 +849,13 @@
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
.ant-select-item,
|
||||
.ant-select-item-option-content {
|
||||
flex: auto;
|
||||
white-space: normal;
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
@@ -917,6 +964,20 @@
|
||||
}
|
||||
}
|
||||
|
||||
.endpoint-meta-data {
|
||||
.endpoint-meta-data-pill {
|
||||
.endpoint-meta-data-label {
|
||||
color: var(--text-ink-300);
|
||||
background: var(--bg-vanilla-100);
|
||||
}
|
||||
|
||||
.endpoint-meta-data-value {
|
||||
color: var(--text-ink-300);
|
||||
background: var(--bg-vanilla-100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.status-code-table-container {
|
||||
.ant-table {
|
||||
.ant-table-thead > tr > th {
|
||||
|
||||
@@ -19,12 +19,14 @@ function DomainDetails({
|
||||
selectedDomainIndex,
|
||||
setSelectedDomainIndex,
|
||||
domainListLength,
|
||||
domainListFilters,
|
||||
}: {
|
||||
domainData: any;
|
||||
handleClose: () => void;
|
||||
selectedDomainIndex: number;
|
||||
setSelectedDomainIndex: (index: number) => void;
|
||||
domainListLength: number;
|
||||
domainListFilters: IBuilderQuery['filters'];
|
||||
}): JSX.Element {
|
||||
const [selectedView, setSelectedView] = useState<VIEWS>(VIEWS.ALL_ENDPOINTS);
|
||||
const [selectedEndPointName, setSelectedEndPointName] = useState<string>('');
|
||||
@@ -132,6 +134,7 @@ function DomainDetails({
|
||||
domainName={domainData.domainName}
|
||||
endPointName={selectedEndPointName}
|
||||
setSelectedEndPointName={setSelectedEndPointName}
|
||||
domainListFilters={domainListFilters}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
|
||||
@@ -2,7 +2,10 @@ import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import {
|
||||
END_POINT_DETAILS_QUERY_KEYS_ARRAY,
|
||||
extractPortAndEndpoint,
|
||||
getEndPointDetailsQueryPayload,
|
||||
getLatencyOverTimeWidgetData,
|
||||
getRateOverTimeWidgetData,
|
||||
} from 'container/ApiMonitoring/utils';
|
||||
import QueryBuilderSearchV2 from 'container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2';
|
||||
import { GetMetricQueryRange } from 'lib/dashboard/getQueryResults';
|
||||
@@ -27,10 +30,12 @@ function EndPointDetails({
|
||||
domainName,
|
||||
endPointName,
|
||||
setSelectedEndPointName,
|
||||
domainListFilters,
|
||||
}: {
|
||||
domainName: string;
|
||||
endPointName: string;
|
||||
setSelectedEndPointName: (value: string) => void;
|
||||
domainListFilters: IBuilderQuery['filters'];
|
||||
}): JSX.Element {
|
||||
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
@@ -101,8 +106,6 @@ function EndPointDetails({
|
||||
const [
|
||||
endPointMetricsDataQuery,
|
||||
endPointStatusCodeDataQuery,
|
||||
endPointRateOverTimeDataQuery,
|
||||
endPointLatencyOverTimeDataQuery,
|
||||
endPointDropDownDataQuery,
|
||||
endPointDependentServicesDataQuery,
|
||||
endPointStatusCodeBarChartsDataQuery,
|
||||
@@ -115,12 +118,29 @@ function EndPointDetails({
|
||||
endPointDetailsDataQueries[3],
|
||||
endPointDetailsDataQueries[4],
|
||||
endPointDetailsDataQueries[5],
|
||||
endPointDetailsDataQueries[6],
|
||||
endPointDetailsDataQueries[7],
|
||||
],
|
||||
[endPointDetailsDataQueries],
|
||||
);
|
||||
|
||||
const { endpoint, port } = useMemo(
|
||||
() => extractPortAndEndpoint(endPointName),
|
||||
[endPointName],
|
||||
);
|
||||
|
||||
const [rateOverTimeWidget, latencyOverTimeWidget] = useMemo(
|
||||
() => [
|
||||
getRateOverTimeWidgetData(domainName, endPointName, {
|
||||
items: [...domainListFilters.items, ...filters.items],
|
||||
op: filters.op,
|
||||
}),
|
||||
getLatencyOverTimeWidgetData(domainName, endPointName, {
|
||||
items: [...domainListFilters.items, ...filters.items],
|
||||
op: filters.op,
|
||||
}),
|
||||
],
|
||||
[domainName, endPointName, filters, domainListFilters],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="endpoint-details-container">
|
||||
<div className="endpoint-details-filters-container">
|
||||
@@ -129,6 +149,8 @@ function EndPointDetails({
|
||||
selectedEndPointName={endPointName}
|
||||
setSelectedEndPointName={setSelectedEndPointName}
|
||||
endPointDropDownDataQuery={endPointDropDownDataQuery}
|
||||
parentContainerDiv=".endpoint-details-filters-container"
|
||||
dropdownStyle={{ width: 'calc(100% - 36px)' }}
|
||||
/>
|
||||
</div>
|
||||
<div className="endpoint-details-filters-container-search">
|
||||
@@ -141,6 +163,16 @@ function EndPointDetails({
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="endpoint-meta-data">
|
||||
<div className="endpoint-meta-data-pill">
|
||||
<div className="endpoint-meta-data-label">Endpoint</div>
|
||||
<div className="endpoint-meta-data-value">{endpoint || '-'}</div>
|
||||
</div>
|
||||
<div className="endpoint-meta-data-pill">
|
||||
<div className="endpoint-meta-data-label">Port</div>
|
||||
<div className="endpoint-meta-data-value">{port || '-'}</div>
|
||||
</div>
|
||||
</div>
|
||||
<EndPointMetrics endPointMetricsDataQuery={endPointMetricsDataQuery} />
|
||||
{!isServicesFilterApplied && (
|
||||
<DependentServices
|
||||
@@ -152,18 +184,14 @@ function EndPointDetails({
|
||||
endPointStatusCodeLatencyBarChartsDataQuery={
|
||||
endPointStatusCodeLatencyBarChartsDataQuery
|
||||
}
|
||||
domainName={domainName}
|
||||
endPointName={endPointName}
|
||||
domainListFilters={domainListFilters}
|
||||
filters={filters}
|
||||
/>
|
||||
<StatusCodeTable endPointStatusCodeDataQuery={endPointStatusCodeDataQuery} />
|
||||
<MetricOverTimeGraph
|
||||
metricOverTimeDataQuery={endPointRateOverTimeDataQuery}
|
||||
widgetInfoIndex={0}
|
||||
endPointName={endPointName}
|
||||
/>
|
||||
<MetricOverTimeGraph
|
||||
metricOverTimeDataQuery={endPointLatencyOverTimeDataQuery}
|
||||
widgetInfoIndex={1}
|
||||
endPointName={endPointName}
|
||||
/>
|
||||
<MetricOverTimeGraph widget={rateOverTimeWidget} />
|
||||
<MetricOverTimeGraph widget={latencyOverTimeWidget} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import { useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { SuccessResponse } from 'types/api';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||
|
||||
import EndPointDetailsZeroState from './components/EndPointDetailsZeroState';
|
||||
@@ -17,10 +18,12 @@ function EndPointDetailsWrapper({
|
||||
domainName,
|
||||
endPointName,
|
||||
setSelectedEndPointName,
|
||||
domainListFilters,
|
||||
}: {
|
||||
domainName: string;
|
||||
endPointName: string;
|
||||
setSelectedEndPointName: (value: string) => void;
|
||||
domainListFilters: IBuilderQuery['filters'];
|
||||
}): JSX.Element {
|
||||
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
@@ -69,6 +72,7 @@ function EndPointDetailsWrapper({
|
||||
domainName={domainName}
|
||||
endPointName={endPointName}
|
||||
setSelectedEndPointName={setSelectedEndPointName}
|
||||
domainListFilters={domainListFilters}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -28,6 +28,8 @@ function EndPointDetailsZeroState({
|
||||
<EndPointsDropDown
|
||||
setSelectedEndPointName={setSelectedEndPointName}
|
||||
endPointDropDownDataQuery={endPointDropDownDataQuery}
|
||||
parentContainerDiv=".end-point-details-zero-state-wrapper"
|
||||
dropdownStyle={{ width: '60%' }}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -70,7 +70,7 @@ function EndPointMetrics({
|
||||
<Skeleton.Button active size="small" />
|
||||
) : (
|
||||
<Tooltip title={metricsData?.rate}>
|
||||
<span className="round-metric-tag">{metricsData?.rate}/sec</span>
|
||||
<span className="round-metric-tag">{metricsData?.rate} ops/sec</span>
|
||||
</Tooltip>
|
||||
)}
|
||||
</Typography.Text>
|
||||
|
||||
@@ -8,16 +8,22 @@ interface EndPointsDropDownProps {
|
||||
selectedEndPointName?: string;
|
||||
setSelectedEndPointName: (value: string) => void;
|
||||
endPointDropDownDataQuery: UseQueryResult<SuccessResponse<any>, unknown>;
|
||||
parentContainerDiv?: string;
|
||||
dropdownStyle?: React.CSSProperties;
|
||||
}
|
||||
|
||||
const defaultProps = {
|
||||
selectedEndPointName: '',
|
||||
parentContainerDiv: '',
|
||||
dropdownStyle: {},
|
||||
};
|
||||
|
||||
function EndPointsDropDown({
|
||||
selectedEndPointName,
|
||||
setSelectedEndPointName,
|
||||
endPointDropDownDataQuery,
|
||||
parentContainerDiv,
|
||||
dropdownStyle,
|
||||
}: EndPointsDropDownProps): JSX.Element {
|
||||
const { data, isLoading, isFetching } = endPointDropDownDataQuery;
|
||||
|
||||
@@ -39,6 +45,13 @@ function EndPointsDropDown({
|
||||
style={{ width: '100%' }}
|
||||
onChange={handleChange}
|
||||
options={formattedData}
|
||||
getPopupContainer={
|
||||
parentContainerDiv
|
||||
? (): HTMLElement =>
|
||||
document.querySelector(parentContainerDiv) as HTMLElement
|
||||
: (triggerNode): HTMLElement => triggerNode.parentNode as HTMLElement
|
||||
}
|
||||
dropdownStyle={dropdownStyle}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Spin, Table } from 'antd';
|
||||
import { ColumnType } from 'antd/lib/table';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
||||
import {
|
||||
@@ -114,6 +115,7 @@ function ExpandedRow({
|
||||
onClick: (): void => {
|
||||
setSelectedEndPointName(record.endpointName);
|
||||
setSelectedView(VIEW_TYPES.ENDPOINT_DETAILS);
|
||||
logEvent('API Monitoring: Endpoint name row clicked', {});
|
||||
},
|
||||
className: 'expanded-clickable-row',
|
||||
})}
|
||||
|
||||
@@ -1,110 +1,18 @@
|
||||
import { Card, Skeleton, Typography } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import Uplot from 'components/Uplot';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import {
|
||||
apiWidgetInfo,
|
||||
extractPortAndEndpoint,
|
||||
getFormattedChartData,
|
||||
} from 'container/ApiMonitoring/utils';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions';
|
||||
import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData';
|
||||
import { useCallback, useMemo, useRef } from 'react';
|
||||
import { UseQueryResult } from 'react-query';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { SuccessResponse } from 'types/api';
|
||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||
import { Options } from 'uplot';
|
||||
|
||||
import ErrorState from './ErrorState';
|
||||
|
||||
function MetricOverTimeGraph({
|
||||
metricOverTimeDataQuery,
|
||||
widgetInfoIndex,
|
||||
endPointName,
|
||||
}: {
|
||||
metricOverTimeDataQuery: UseQueryResult<SuccessResponse<any>, unknown>;
|
||||
widgetInfoIndex: number;
|
||||
endPointName: string;
|
||||
}): JSX.Element {
|
||||
const { data } = metricOverTimeDataQuery;
|
||||
|
||||
const { minTime, maxTime } = useSelector<AppState, GlobalReducer>(
|
||||
(state) => state.globalTime,
|
||||
);
|
||||
|
||||
const graphRef = useRef<HTMLDivElement>(null);
|
||||
const dimensions = useResizeObserver(graphRef);
|
||||
|
||||
const { endpoint } = extractPortAndEndpoint(endPointName);
|
||||
|
||||
const formattedChartData = useMemo(
|
||||
() => getFormattedChartData(data?.payload, [endpoint]),
|
||||
[data?.payload, endpoint],
|
||||
);
|
||||
|
||||
const chartData = useMemo(() => getUPlotChartData(formattedChartData), [
|
||||
formattedChartData,
|
||||
]);
|
||||
|
||||
const isDarkMode = useIsDarkMode();
|
||||
|
||||
const options = useMemo(
|
||||
() =>
|
||||
getUPlotChartOptions({
|
||||
apiResponse: formattedChartData,
|
||||
isDarkMode,
|
||||
dimensions,
|
||||
yAxisUnit: apiWidgetInfo[widgetInfoIndex].yAxisUnit,
|
||||
softMax: null,
|
||||
softMin: null,
|
||||
minTimeScale: Math.floor(minTime / 1e9),
|
||||
maxTimeScale: Math.floor(maxTime / 1e9),
|
||||
panelType: PANEL_TYPES.TIME_SERIES,
|
||||
}),
|
||||
[
|
||||
formattedChartData,
|
||||
minTime,
|
||||
maxTime,
|
||||
widgetInfoIndex,
|
||||
dimensions,
|
||||
isDarkMode,
|
||||
],
|
||||
);
|
||||
|
||||
const renderCardContent = useCallback(
|
||||
(query: UseQueryResult<SuccessResponse<any>, unknown>): JSX.Element => {
|
||||
if (query.isLoading) {
|
||||
return <Skeleton />;
|
||||
}
|
||||
|
||||
if (query.error) {
|
||||
return <ErrorState refetch={query.refetch} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cx('chart-container', {
|
||||
'no-data-container':
|
||||
!query.isLoading && !query?.data?.payload?.data?.result?.length,
|
||||
})}
|
||||
>
|
||||
<Uplot options={options as Options} data={chartData} />
|
||||
</div>
|
||||
);
|
||||
},
|
||||
[options, chartData],
|
||||
);
|
||||
import { Card } from 'antd';
|
||||
import GridCard from 'container/GridCardLayout/GridCard';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
|
||||
function MetricOverTimeGraph({ widget }: { widget: Widgets }): JSX.Element {
|
||||
return (
|
||||
<div>
|
||||
<Card bordered className="endpoint-details-card">
|
||||
<Typography.Text>{apiWidgetInfo[widgetInfoIndex].title}</Typography.Text>
|
||||
<div className="graph-container" ref={graphRef}>
|
||||
{renderCardContent(metricOverTimeDataQuery)}
|
||||
<div className="graph-container">
|
||||
<GridCard
|
||||
widget={widget}
|
||||
isQueryEnabled
|
||||
onDragSelect={(): void => {}}
|
||||
customOnDragSelect={(): void => {}}
|
||||
/>
|
||||
</div>
|
||||
</Card>
|
||||
</div>
|
||||
|
||||
@@ -1,13 +1,22 @@
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Button, Card, Skeleton, Typography } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import { useGetGraphCustomSeries } from 'components/CeleryTask/useGetGraphCustomSeries';
|
||||
import { useNavigateToExplorer } from 'components/CeleryTask/useNavigateToExplorer';
|
||||
import Uplot from 'components/Uplot';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import {
|
||||
getCustomFiltersForBarChart,
|
||||
getFormattedEndPointStatusCodeChartData,
|
||||
getStatusCodeBarChartWidgetData,
|
||||
statusCodeWidgetInfo,
|
||||
} from 'container/ApiMonitoring/utils';
|
||||
import { handleGraphClick } from 'container/GridCardLayout/GridCard/utils';
|
||||
import { useGraphClickToShowButton } from 'container/GridCardLayout/useGraphClickToShowButton';
|
||||
import useNavigateToExplorerPages from 'container/GridCardLayout/useNavigateToExplorerPages';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { useResizeObserver } from 'hooks/useDimensions';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions';
|
||||
import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData';
|
||||
import { useCallback, useMemo, useRef, useState } from 'react';
|
||||
@@ -15,6 +24,8 @@ import { UseQueryResult } from 'react-query';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { SuccessResponse } from 'types/api';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||
import { Options } from 'uplot';
|
||||
|
||||
@@ -23,6 +34,10 @@ import ErrorState from './ErrorState';
|
||||
function StatusCodeBarCharts({
|
||||
endPointStatusCodeBarChartsDataQuery,
|
||||
endPointStatusCodeLatencyBarChartsDataQuery,
|
||||
domainName,
|
||||
endPointName,
|
||||
domainListFilters,
|
||||
filters,
|
||||
}: {
|
||||
endPointStatusCodeBarChartsDataQuery: UseQueryResult<
|
||||
SuccessResponse<any>,
|
||||
@@ -32,6 +47,10 @@ function StatusCodeBarCharts({
|
||||
SuccessResponse<any>,
|
||||
unknown
|
||||
>;
|
||||
domainName: string;
|
||||
endPointName: string;
|
||||
domainListFilters: IBuilderQuery['filters'];
|
||||
filters: IBuilderQuery['filters'];
|
||||
}): JSX.Element {
|
||||
// 0 : Status Code Count
|
||||
// 1 : Status Code Latency
|
||||
@@ -85,6 +104,72 @@ function StatusCodeBarCharts({
|
||||
|
||||
const isDarkMode = useIsDarkMode();
|
||||
|
||||
const graphClick = useGraphClickToShowButton({
|
||||
graphRef,
|
||||
isButtonEnabled: true,
|
||||
buttonClassName: 'view-onclick-show-button',
|
||||
});
|
||||
|
||||
const navigateToExplorer = useNavigateToExplorer();
|
||||
|
||||
const navigateToExplorerPages = useNavigateToExplorerPages();
|
||||
const { notifications } = useNotifications();
|
||||
|
||||
const { getCustomSeries } = useGetGraphCustomSeries({
|
||||
isDarkMode,
|
||||
drawStyle: 'bars',
|
||||
colorMapping: {
|
||||
'200-299': Color.BG_FOREST_500,
|
||||
'300-399': Color.BG_AMBER_400,
|
||||
'400-499': Color.BG_CHERRY_500,
|
||||
'500-599': Color.BG_ROBIN_500,
|
||||
Other: Color.BG_SIENNA_500,
|
||||
},
|
||||
});
|
||||
|
||||
const widget = useMemo<Widgets>(
|
||||
() =>
|
||||
getStatusCodeBarChartWidgetData(domainName, endPointName, {
|
||||
items: [...domainListFilters.items, ...filters.items],
|
||||
op: filters.op,
|
||||
}),
|
||||
[domainName, endPointName, domainListFilters, filters],
|
||||
);
|
||||
|
||||
const graphClickHandler = useCallback(
|
||||
(
|
||||
xValue: number,
|
||||
yValue: number,
|
||||
mouseX: number,
|
||||
mouseY: number,
|
||||
metric?: { [key: string]: string },
|
||||
queryData?: { queryName: string; inFocusOrNot: boolean },
|
||||
): void => {
|
||||
const customFilters = getCustomFiltersForBarChart(metric);
|
||||
handleGraphClick({
|
||||
xValue,
|
||||
yValue,
|
||||
mouseX,
|
||||
mouseY,
|
||||
metric,
|
||||
queryData,
|
||||
widget,
|
||||
navigateToExplorerPages,
|
||||
navigateToExplorer,
|
||||
notifications,
|
||||
graphClick,
|
||||
customFilters,
|
||||
});
|
||||
},
|
||||
[
|
||||
widget,
|
||||
navigateToExplorerPages,
|
||||
navigateToExplorer,
|
||||
notifications,
|
||||
graphClick,
|
||||
],
|
||||
);
|
||||
|
||||
const options = useMemo(
|
||||
() =>
|
||||
getUPlotChartOptions({
|
||||
@@ -100,6 +185,8 @@ function StatusCodeBarCharts({
|
||||
minTimeScale: Math.floor(minTime / 1e9),
|
||||
maxTimeScale: Math.floor(maxTime / 1e9),
|
||||
panelType: PANEL_TYPES.BAR,
|
||||
onClickHandler: graphClickHandler,
|
||||
customSeries: getCustomSeries,
|
||||
}),
|
||||
[
|
||||
minTime,
|
||||
@@ -109,6 +196,8 @@ function StatusCodeBarCharts({
|
||||
formattedEndPointStatusCodeBarChartsDataPayload,
|
||||
formattedEndPointStatusCodeLatencyBarChartsDataPayload,
|
||||
isDarkMode,
|
||||
graphClickHandler,
|
||||
getCustomSeries,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import '../Explorer.styles.scss';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Spin, Table, Typography } from 'antd';
|
||||
import axios from 'api';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import cx from 'classnames';
|
||||
@@ -130,6 +131,7 @@ function DomainList({
|
||||
(item) => item.key === record.key,
|
||||
);
|
||||
setSelectedDomainIndex(dataIndex);
|
||||
logEvent('API Monitoring: Domain name row clicked', {});
|
||||
}
|
||||
},
|
||||
className: 'expanded-clickable-row',
|
||||
@@ -147,6 +149,7 @@ function DomainList({
|
||||
handleClose={(): void => {
|
||||
setSelectedDomainIndex(-1);
|
||||
}}
|
||||
domainListFilters={query?.filters}
|
||||
/>
|
||||
)}
|
||||
</section>
|
||||
|
||||
@@ -3,13 +3,14 @@ import './Explorer.styles.scss';
|
||||
import { FilterOutlined } from '@ant-design/icons';
|
||||
import * as Sentry from '@sentry/react';
|
||||
import { Switch, Typography } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import cx from 'classnames';
|
||||
import QuickFilters from 'components/QuickFilters/QuickFilters';
|
||||
import { QuickFiltersSource } from 'components/QuickFilters/types';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { useMemo, useState } from 'react';
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
@@ -21,6 +22,10 @@ function Explorer(): JSX.Element {
|
||||
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
useEffect(() => {
|
||||
logEvent('API Monitoring: Landing page visited', {});
|
||||
}, []);
|
||||
|
||||
const { handleChangeQueryData } = useQueryOperations({
|
||||
index: 0,
|
||||
query: currentQuery.builder.queryData[0],
|
||||
@@ -64,7 +69,12 @@ function Explorer(): JSX.Element {
|
||||
style={{ marginLeft: 'auto' }}
|
||||
checked={showIP}
|
||||
onClick={(): void => {
|
||||
setShowIP((showIP) => !showIP);
|
||||
setShowIP((showIP): boolean => {
|
||||
logEvent('API Monitoring: Show IP addresses clicked', {
|
||||
showIP: !showIP,
|
||||
});
|
||||
return !showIP;
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -8,16 +8,23 @@ import {
|
||||
} from 'components/QuickFilters/types';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
||||
import { GraphClickMetaData } from 'container/GridCardLayout/useNavigateToExplorerPages';
|
||||
import { getWidgetQueryBuilder } from 'container/MetricsApplication/MetricsApplication.factory';
|
||||
import dayjs from 'dayjs';
|
||||
import { GetQueryResultsProps } from 'lib/dashboard/getQueryResults';
|
||||
import { cloneDeep } from 'lodash-es';
|
||||
import { ArrowUpDown, ChevronDown, ChevronRight } from 'lucide-react';
|
||||
import { getWidgetQuery } from 'pages/MessagingQueues/MQDetails/MetricPage/MetricPageUtil';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import {
|
||||
IBuilderQuery,
|
||||
TagFilterItem,
|
||||
} from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { QueryData } from 'types/api/widgets/getQuery';
|
||||
import { EQueryType } from 'types/common/dashboard';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
@@ -128,12 +135,15 @@ export const columnsConfig: ColumnType<APIDomainsRowData>[] = [
|
||||
sorter: false,
|
||||
align: 'right',
|
||||
className: `column`,
|
||||
render: (lastUsed: number): string => getLastUsedRelativeTime(lastUsed),
|
||||
render: (lastUsed: number | string): string =>
|
||||
lastUsed === 'n/a' || lastUsed === '-'
|
||||
? '-'
|
||||
: getLastUsedRelativeTime(lastUsed as number),
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<div>
|
||||
Rate <span className="round-metric-tag">/s</span>
|
||||
Rate <span className="round-metric-tag">ops/s</span>
|
||||
</div>
|
||||
),
|
||||
dataIndex: 'rate',
|
||||
@@ -155,21 +165,26 @@ export const columnsConfig: ColumnType<APIDomainsRowData>[] = [
|
||||
sorter: false,
|
||||
align: 'right',
|
||||
className: `column`,
|
||||
render: (errorRate: number): React.ReactNode => (
|
||||
<Progress
|
||||
status="active"
|
||||
percent={Number((errorRate * 100).toFixed(1))}
|
||||
strokeLinecap="butt"
|
||||
size="small"
|
||||
strokeColor={((): string => {
|
||||
const errorRatePercent = Number((errorRate * 100).toFixed(1));
|
||||
if (errorRatePercent >= 90) return Color.BG_SAKURA_500;
|
||||
if (errorRatePercent >= 60) return Color.BG_AMBER_500;
|
||||
return Color.BG_FOREST_500;
|
||||
})()}
|
||||
className="progress-bar error-rate"
|
||||
/>
|
||||
),
|
||||
render: (errorRate: number | string): React.ReactNode => {
|
||||
if (errorRate === 'n/a' || errorRate === '-') {
|
||||
return '-';
|
||||
}
|
||||
return (
|
||||
<Progress
|
||||
status="active"
|
||||
percent={Number(((errorRate as number) * 100).toFixed(1))}
|
||||
strokeLinecap="butt"
|
||||
size="small"
|
||||
strokeColor={((): string => {
|
||||
const errorRatePercent = Number(((errorRate as number) * 100).toFixed(1));
|
||||
if (errorRatePercent >= 90) return Color.BG_SAKURA_500;
|
||||
if (errorRatePercent >= 60) return Color.BG_AMBER_500;
|
||||
return Color.BG_FOREST_500;
|
||||
})()}
|
||||
className="progress-bar error-rate"
|
||||
/>
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
title: (
|
||||
@@ -217,9 +232,9 @@ interface APIMonitoringResponseRow {
|
||||
data: {
|
||||
endpoints: number;
|
||||
error_rate: number;
|
||||
lastseen: number;
|
||||
lastseen: number | string;
|
||||
[domainNameKey]: string;
|
||||
p99: number;
|
||||
p99: number | string;
|
||||
rps: number;
|
||||
};
|
||||
}
|
||||
@@ -232,12 +247,12 @@ interface EndPointsResponseRow {
|
||||
|
||||
export interface APIDomainsRowData {
|
||||
key: string;
|
||||
domainName: React.ReactNode;
|
||||
endpointCount: React.ReactNode;
|
||||
rate: React.ReactNode;
|
||||
errorRate: React.ReactNode;
|
||||
latency: React.ReactNode;
|
||||
lastUsed: React.ReactNode;
|
||||
domainName: string;
|
||||
endpointCount: number | string;
|
||||
rate: number | string;
|
||||
errorRate: number | string;
|
||||
latency: number | string;
|
||||
lastUsed: string;
|
||||
}
|
||||
|
||||
// Rename this to a proper name
|
||||
@@ -246,12 +261,20 @@ export const formatDataForTable = (
|
||||
): APIDomainsRowData[] =>
|
||||
data?.map((domain) => ({
|
||||
key: v4(),
|
||||
domainName: domain.data[domainNameKey] || '',
|
||||
endpointCount: domain.data.endpoints,
|
||||
rate: domain.data.rps,
|
||||
errorRate: domain.data.error_rate,
|
||||
latency: Math.round(domain.data.p99 / 1000000), // Convert from nanoseconds to milliseconds
|
||||
lastUsed: new Date(Math.floor(domain.data.lastseen / 1000000)).toISOString(), // Convert from nanoseconds to milliseconds
|
||||
domainName: domain?.data[domainNameKey] || '-',
|
||||
endpointCount: domain?.data?.endpoints || '-',
|
||||
rate: domain.data.rps || '-',
|
||||
errorRate: domain.data.error_rate || '-',
|
||||
latency:
|
||||
domain.data.p99 === 'n/a'
|
||||
? '-'
|
||||
: Math.round(Number(domain.data.p99) / 1000000), // Convert from nanoseconds to milliseconds
|
||||
lastUsed:
|
||||
domain.data.lastseen === 'n/a'
|
||||
? '-'
|
||||
: new Date(
|
||||
Math.floor(Number(domain.data.lastseen) / 1000000),
|
||||
).toISOString(), // Convert from nanoseconds to milliseconds
|
||||
}));
|
||||
|
||||
// Rename this to a proper name
|
||||
@@ -468,7 +491,6 @@ export const extractPortAndEndpoint = (
|
||||
}
|
||||
};
|
||||
|
||||
// Add icons in the below column headers
|
||||
export const getEndPointsColumnsConfig = (
|
||||
isGroupedByAttribute: boolean,
|
||||
expandedRowKeys: React.Key[],
|
||||
@@ -576,7 +598,7 @@ export const formatEndPointsDataForTable = (
|
||||
);
|
||||
return {
|
||||
key: v4(),
|
||||
endpointName: (endpoint.data['http.url'] as string) || '',
|
||||
endpointName: (endpoint.data['http.url'] as string) || '-',
|
||||
port,
|
||||
callCount: endpoint.data.A || '-',
|
||||
latency:
|
||||
@@ -593,7 +615,6 @@ export const formatEndPointsDataForTable = (
|
||||
|
||||
const groupedByAttributeData = groupBy.map((attribute) => attribute.key);
|
||||
|
||||
// TODO: Use tags to show the concatenated attribute values
|
||||
return data?.map((endpoint) => {
|
||||
const newEndpointName = groupedByAttributeData
|
||||
.map((attribute) => endpoint.data[attribute])
|
||||
@@ -639,7 +660,7 @@ export const createFiltersForSelectedRowData = (
|
||||
type: null,
|
||||
},
|
||||
op: '=',
|
||||
value: groupedByMeta[key],
|
||||
value: groupedByMeta[key] || '',
|
||||
id: key,
|
||||
})),
|
||||
);
|
||||
@@ -649,12 +670,10 @@ export const createFiltersForSelectedRowData = (
|
||||
|
||||
// First query payload for endpoint metrics
|
||||
// Second query payload for endpoint status code
|
||||
// Third query payload for endpoint rate over time graph
|
||||
// Fourth query payload for endpoint latency over time graph
|
||||
// Fifth query payload for endpoint dropdown selection
|
||||
// Sixth query payload for endpoint dependant services
|
||||
// Seventh query payload for endpoint response status count bar chart
|
||||
// Eighth query payload for endpoint response status code latency bar chart
|
||||
// Third query payload for endpoint dropdown selection
|
||||
// Fourth query payload for endpoint dependant services
|
||||
// Fifth query payload for endpoint response status count bar chart
|
||||
// Sixth query payload for endpoint response status code latency bar chart
|
||||
export const getEndPointDetailsQueryPayload = (
|
||||
domainName: string,
|
||||
endPointName: string,
|
||||
@@ -1101,205 +1120,6 @@ export const getEndPointDetailsQueryPayload = (
|
||||
end,
|
||||
step: 60,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.String,
|
||||
id: '------false',
|
||||
isColumn: false,
|
||||
key: '',
|
||||
type: '',
|
||||
},
|
||||
aggregateOperator: 'rate',
|
||||
dataSource: DataSource.TRACES,
|
||||
disabled: false,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: '3c76fe0b',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'net.peer.name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'net.peer.name',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: domainName,
|
||||
},
|
||||
{
|
||||
id: '30710f04',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: endPointName,
|
||||
},
|
||||
...filters.items,
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: '',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'rate',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
},
|
||||
clickhouse_sql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
id: '315b15fa-ff0c-442f-89f8-2bf4fb1af2f2',
|
||||
promql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
start,
|
||||
end,
|
||||
step: 60,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'duration_nano--float64----true',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
key: 'duration_nano',
|
||||
type: '',
|
||||
},
|
||||
aggregateOperator: 'p99',
|
||||
dataSource: DataSource.TRACES,
|
||||
disabled: false,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: '63adb3ff',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'net.peer.name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'net.peer.name',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: domainName,
|
||||
},
|
||||
{
|
||||
id: '50142500',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: endPointName,
|
||||
},
|
||||
...filters.items,
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: '',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'p99',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
},
|
||||
clickhouse_sql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
id: '315b15fa-ff0c-442f-89f8-2bf4fb1af2f2',
|
||||
promql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
start,
|
||||
end,
|
||||
step: 60,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TABLE,
|
||||
@@ -1801,7 +1621,7 @@ interface EndPointMetricsData {
|
||||
interface EndPointStatusCodeData {
|
||||
key: string;
|
||||
statusCode: string;
|
||||
count: number;
|
||||
count: number | string;
|
||||
p99Latency: number | string;
|
||||
}
|
||||
|
||||
@@ -1824,8 +1644,8 @@ export const getFormattedEndPointStatusCodeData = (
|
||||
): EndPointStatusCodeData[] =>
|
||||
data?.map((row) => ({
|
||||
key: v4(),
|
||||
statusCode: row.data.response_status_code,
|
||||
count: row.data.A,
|
||||
statusCode: row.data.response_status_code || '-',
|
||||
count: row.data.A || '-',
|
||||
p99Latency:
|
||||
row.data.B === 'n/a' ? '-' : Math.round(Number(row.data.B) / 1000000), // Convert from nanoseconds to milliseconds,
|
||||
}));
|
||||
@@ -1857,11 +1677,6 @@ export const endPointStatusCodeColumns: ColumnType<EndPointStatusCodeData>[] = [
|
||||
},
|
||||
];
|
||||
|
||||
export const apiWidgetInfo = [
|
||||
{ title: 'Rate over time', yAxisUnit: 'ops/s' },
|
||||
{ title: 'Latency over time', yAxisUnit: 'ns' },
|
||||
];
|
||||
|
||||
export const statusCodeWidgetInfo = [
|
||||
{ yAxisUnit: 'calls' },
|
||||
{ yAxisUnit: 'ns' },
|
||||
@@ -1885,8 +1700,8 @@ export const getFormattedEndPointDropDownData = (
|
||||
): EndPointDropDownData[] =>
|
||||
data?.map((row) => ({
|
||||
key: v4(),
|
||||
label: row.data['http.url'],
|
||||
value: row.data['http.url'],
|
||||
label: row.data['http.url'] || '-',
|
||||
value: row.data['http.url'] || '-',
|
||||
}));
|
||||
|
||||
interface DependentServicesResponseRow {
|
||||
@@ -1903,6 +1718,7 @@ interface DependentServicesData {
|
||||
percentage: number;
|
||||
}
|
||||
|
||||
// Discuss once about type safety of this function
|
||||
export const getFormattedDependentServicesData = (
|
||||
data: DependentServicesResponseRow[],
|
||||
): DependentServicesData[] => {
|
||||
@@ -1983,7 +1799,7 @@ export const groupStatusCodes = (
|
||||
|
||||
// Track all timestamps
|
||||
series.values.forEach((value) => {
|
||||
allTimestamps.add(value[0]);
|
||||
allTimestamps.add(Number(value[0]));
|
||||
});
|
||||
|
||||
// Initialize or update the grouped series
|
||||
@@ -2049,8 +1865,114 @@ export const groupStatusCodes = (
|
||||
});
|
||||
});
|
||||
|
||||
return Object.values(groupedSeries);
|
||||
// Define the order of status code ranges
|
||||
const statusCodeOrder = ['200-299', '300-399', '400-499', '500-599', 'Other'];
|
||||
|
||||
// Return the grouped series in the specified order
|
||||
return statusCodeOrder
|
||||
.filter((code) => groupedSeries[code]) // Only include codes that exist in the data
|
||||
.map((code) => groupedSeries[code]);
|
||||
};
|
||||
|
||||
export const getStatusCodeBarChartWidgetData = (
|
||||
domainName: string,
|
||||
endPointName: string,
|
||||
filters: IBuilderQuery['filters'],
|
||||
): Widgets => ({
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.String,
|
||||
id: '------false',
|
||||
isColumn: false,
|
||||
key: '',
|
||||
type: '',
|
||||
},
|
||||
aggregateOperator: 'count',
|
||||
dataSource: DataSource.TRACES,
|
||||
disabled: false,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'c6724407',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'net.peer.name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'net.peer.name',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: domainName,
|
||||
},
|
||||
{
|
||||
id: '8b1be6f0',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: endPointName,
|
||||
},
|
||||
...filters.items,
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [],
|
||||
having: [],
|
||||
legend: '',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'rate',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
},
|
||||
clickhouse_sql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
id: '315b15fa-ff0c-442f-89f8-2bf4fb1af2f2',
|
||||
promql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
description: '',
|
||||
id: '315b15fa-ff0c-442f-89f8-2bf4fb1af2f2',
|
||||
isStacked: false,
|
||||
panelTypes: PANEL_TYPES.BAR,
|
||||
title: '',
|
||||
opacity: '',
|
||||
nullZeroValues: '',
|
||||
timePreferance: 'GLOBAL_TIME',
|
||||
softMin: null,
|
||||
softMax: null,
|
||||
selectedLogFields: null,
|
||||
selectedTracesFields: null,
|
||||
});
|
||||
interface EndPointStatusCodePayloadData {
|
||||
data: {
|
||||
result: QueryData[];
|
||||
@@ -2085,3 +2007,277 @@ export const END_POINT_DETAILS_QUERY_KEYS_ARRAY = [
|
||||
REACT_QUERY_KEY.GET_ENDPOINT_STATUS_CODE_BAR_CHARTS_DATA,
|
||||
REACT_QUERY_KEY.GET_ENDPOINT_STATUS_CODE_LATENCY_BAR_CHARTS_DATA,
|
||||
];
|
||||
|
||||
export const getRateOverTimeWidgetData = (
|
||||
domainName: string,
|
||||
endPointName: string,
|
||||
filters: IBuilderQuery['filters'],
|
||||
): Widgets => {
|
||||
const { endpoint, port } = extractPortAndEndpoint(endPointName);
|
||||
const legend = `${
|
||||
port !== '-' && port !== 'n/a' ? `${port}:` : ''
|
||||
}${endpoint}`;
|
||||
return getWidgetQueryBuilder(
|
||||
getWidgetQuery({
|
||||
title: 'Rate Over Time',
|
||||
description: 'Rate over time.',
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.String,
|
||||
id: '------false',
|
||||
isColumn: false,
|
||||
key: '',
|
||||
type: '',
|
||||
},
|
||||
aggregateOperator: 'rate',
|
||||
dataSource: DataSource.TRACES,
|
||||
disabled: false,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: '3c76fe0b',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'net.peer.name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'net.peer.name',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: domainName,
|
||||
},
|
||||
{
|
||||
id: '30710f04',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: endPointName,
|
||||
},
|
||||
...filters.items,
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend,
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'rate',
|
||||
},
|
||||
],
|
||||
yAxisUnit: 'ops/s',
|
||||
}),
|
||||
);
|
||||
};
|
||||
|
||||
export const getLatencyOverTimeWidgetData = (
|
||||
domainName: string,
|
||||
endPointName: string,
|
||||
filters: IBuilderQuery['filters'],
|
||||
): Widgets => {
|
||||
const { endpoint, port } = extractPortAndEndpoint(endPointName);
|
||||
const legend = `${port}:${endpoint}`;
|
||||
return getWidgetQueryBuilder(
|
||||
getWidgetQuery({
|
||||
title: 'Latency Over Time',
|
||||
description: 'Latency over time.',
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'duration_nano--float64----true',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
key: 'duration_nano',
|
||||
type: '',
|
||||
},
|
||||
aggregateOperator: 'p99',
|
||||
dataSource: DataSource.TRACES,
|
||||
disabled: false,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: '63adb3ff',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'net.peer.name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'net.peer.name',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: domainName,
|
||||
},
|
||||
{
|
||||
id: '50142500',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: endPointName,
|
||||
},
|
||||
...filters.items,
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'http.url--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'http.url',
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend,
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'p99',
|
||||
},
|
||||
],
|
||||
yAxisUnit: 'ns',
|
||||
}),
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper function to get the start and end status codes from a status code range string
|
||||
* @param value Status code range string (e.g. '200-299') or boolean
|
||||
* @returns Tuple of [startStatusCode, endStatusCode] as strings
|
||||
*/
|
||||
const getStartAndEndStatusCode = (
|
||||
value: string | boolean,
|
||||
): [string, string] => {
|
||||
if (!value) {
|
||||
return ['', ''];
|
||||
}
|
||||
|
||||
switch (value) {
|
||||
case '100-199':
|
||||
return ['100', '199'];
|
||||
case '200-299':
|
||||
return ['200', '299'];
|
||||
case '300-399':
|
||||
return ['300', '399'];
|
||||
case '400-499':
|
||||
return ['400', '499'];
|
||||
case '500-599':
|
||||
return ['500', '599'];
|
||||
default:
|
||||
return ['', ''];
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates filter items for bar chart based on group by fields and request data
|
||||
* Used specifically for filtering status code ranges in bar charts
|
||||
* @param groupBy Array of group by fields to create filters for
|
||||
* @param requestData Data from graph click containing values to filter on
|
||||
* @returns Array of TagFilterItems with >= and < operators for status code ranges
|
||||
*/
|
||||
export const createGroupByFiltersForBarChart = (
|
||||
groupBy: BaseAutocompleteData[],
|
||||
requestData: GraphClickMetaData,
|
||||
): TagFilterItem[] =>
|
||||
groupBy
|
||||
.map((gb) => {
|
||||
const value = requestData[gb.key];
|
||||
const [startStatusCode, endStatusCode] = getStartAndEndStatusCode(value);
|
||||
return value
|
||||
? [
|
||||
{
|
||||
id: v4(),
|
||||
key: gb,
|
||||
op: '>=',
|
||||
value: startStatusCode,
|
||||
},
|
||||
{
|
||||
id: v4(),
|
||||
key: gb,
|
||||
op: '<=',
|
||||
value: endStatusCode,
|
||||
},
|
||||
]
|
||||
: [];
|
||||
})
|
||||
.flat();
|
||||
|
||||
export const getCustomFiltersForBarChart = (
|
||||
metric:
|
||||
| {
|
||||
[key: string]: string;
|
||||
}
|
||||
| undefined,
|
||||
): TagFilterItem[] => {
|
||||
if (!metric?.response_status_code) {
|
||||
return [];
|
||||
}
|
||||
const [startStatusCode, endStatusCode] = getStartAndEndStatusCode(
|
||||
metric.response_status_code,
|
||||
);
|
||||
return [
|
||||
{
|
||||
id: v4(),
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'response_status_code--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'response_status_code',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '>=',
|
||||
value: startStatusCode,
|
||||
},
|
||||
{
|
||||
id: v4(),
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'response_status_code--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'response_status_code',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '<=',
|
||||
value: endStatusCode,
|
||||
},
|
||||
];
|
||||
};
|
||||
|
||||
@@ -49,6 +49,7 @@ function FullView({
|
||||
isDependedDataLoaded = false,
|
||||
onToggleModelHandler,
|
||||
onClickHandler,
|
||||
customOnDragSelect,
|
||||
setCurrentGraphRef,
|
||||
}: FullViewProps): JSX.Element {
|
||||
const { safeNavigate } = useSafeNavigate();
|
||||
@@ -252,7 +253,7 @@ function FullView({
|
||||
onToggleModelHandler={onToggleModelHandler}
|
||||
setGraphVisibility={setGraphsVisibilityStates}
|
||||
graphVisibility={graphsVisibilityStates}
|
||||
onDragSelect={onDragSelect}
|
||||
onDragSelect={customOnDragSelect ?? onDragSelect}
|
||||
tableProcessedDataRef={tableProcessedDataRef}
|
||||
searchTerm={searchTerm}
|
||||
onClickHandler={onClickHandler}
|
||||
|
||||
@@ -50,6 +50,7 @@ export interface FullViewProps {
|
||||
widget: Widgets;
|
||||
fullViewOptions?: boolean;
|
||||
onClickHandler?: OnClickPluginOpts['onClick'];
|
||||
customOnDragSelect?: (start: number, end: number) => void;
|
||||
name: string;
|
||||
tableProcessedDataRef: MutableRefObject<RowData[]>;
|
||||
version?: string;
|
||||
|
||||
@@ -50,6 +50,7 @@ function WidgetGraphComponent({
|
||||
setRequestData,
|
||||
onClickHandler,
|
||||
onDragSelect,
|
||||
customOnDragSelect,
|
||||
customTooltipElement,
|
||||
openTracesButton,
|
||||
onOpenTraceBtnClick,
|
||||
@@ -327,6 +328,7 @@ function WidgetGraphComponent({
|
||||
onToggleModelHandler={onToggleModelHandler}
|
||||
tableProcessedDataRef={tableProcessedDataRef}
|
||||
onClickHandler={onClickHandler ?? graphClickHandler}
|
||||
customOnDragSelect={customOnDragSelect}
|
||||
setCurrentGraphRef={setCurrentGraphRef}
|
||||
/>
|
||||
</Modal>
|
||||
|
||||
@@ -36,6 +36,7 @@ function GridCardGraph({
|
||||
version,
|
||||
onClickHandler,
|
||||
onDragSelect,
|
||||
customOnDragSelect,
|
||||
customTooltipElement,
|
||||
dataAvailable,
|
||||
getGraphData,
|
||||
@@ -272,6 +273,7 @@ function GridCardGraph({
|
||||
setRequestData={setRequestData}
|
||||
onClickHandler={onClickHandler}
|
||||
onDragSelect={onDragSelect}
|
||||
customOnDragSelect={customOnDragSelect}
|
||||
customTooltipElement={customTooltipElement}
|
||||
openTracesButton={openTracesButton}
|
||||
onOpenTraceBtnClick={onOpenTraceBtnClick}
|
||||
|
||||
@@ -33,6 +33,7 @@ export interface WidgetGraphComponentProps {
|
||||
setRequestData?: Dispatch<SetStateAction<GetQueryResultsProps>>;
|
||||
onClickHandler?: OnClickPluginOpts['onClick'];
|
||||
onDragSelect: (start: number, end: number) => void;
|
||||
customOnDragSelect?: (start: number, end: number) => void;
|
||||
customTooltipElement?: HTMLDivElement;
|
||||
openTracesButton?: boolean;
|
||||
onOpenTraceBtnClick?: (record: RowData) => void;
|
||||
@@ -49,6 +50,7 @@ export interface GridCardGraphProps {
|
||||
variables?: Dashboard['data']['variables'];
|
||||
version?: string;
|
||||
onDragSelect: (start: number, end: number) => void;
|
||||
customOnDragSelect?: (start: number, end: number) => void;
|
||||
customTooltipElement?: HTMLDivElement;
|
||||
dataAvailable?: (isDataAvailable: boolean) => void;
|
||||
getGraphData?: (graphData?: MetricRangePayloadProps['data']) => void;
|
||||
|
||||
@@ -178,6 +178,7 @@ interface HandleGraphClickParams {
|
||||
navigateToExplorer: (props: NavigateToExplorerProps) => void;
|
||||
notifications: NotificationInstance;
|
||||
graphClick: (props: GraphClickProps) => void;
|
||||
customFilters?: TagFilterItem[];
|
||||
}
|
||||
|
||||
export const handleGraphClick = async ({
|
||||
@@ -192,6 +193,7 @@ export const handleGraphClick = async ({
|
||||
navigateToExplorer,
|
||||
notifications,
|
||||
graphClick,
|
||||
customFilters,
|
||||
}: HandleGraphClickParams): Promise<void> => {
|
||||
const { stepInterval } = widget?.query?.builder?.queryData?.[0] ?? {};
|
||||
|
||||
@@ -221,7 +223,7 @@ export const handleGraphClick = async ({
|
||||
}: ${key}`,
|
||||
onClick: (): void =>
|
||||
navigateToExplorer({
|
||||
filters: result[key].filters,
|
||||
filters: [...result[key].filters, ...(customFilters || [])],
|
||||
dataSource: result[key].dataSource as DataSource,
|
||||
startTime: xValue,
|
||||
endTime: xValue + (stepInterval ?? 60),
|
||||
|
||||
@@ -44,7 +44,10 @@ import { EditMenuAction, ViewMenuAction } from './config';
|
||||
import DashboardEmptyState from './DashboardEmptyState/DashboardEmptyState';
|
||||
import GridCard from './GridCard';
|
||||
import { Card, CardContainer, ReactGridLayout } from './styles';
|
||||
import { removeUndefinedValuesFromLayout } from './utils';
|
||||
import {
|
||||
hasColumnWidthsChanged,
|
||||
removeUndefinedValuesFromLayout,
|
||||
} from './utils';
|
||||
import { MenuItemKeys } from './WidgetHeader/contants';
|
||||
import { WidgetRowHeader } from './WidgetRow';
|
||||
|
||||
@@ -68,6 +71,7 @@ function GraphLayout(props: GraphLayoutProps): JSX.Element {
|
||||
setDashboardQueryRangeCalled,
|
||||
setSelectedRowWidgetId,
|
||||
isDashboardFetching,
|
||||
columnWidths,
|
||||
} = useDashboard();
|
||||
const { data } = selectedDashboard || {};
|
||||
const { pathname } = useLocation();
|
||||
@@ -162,6 +166,7 @@ function GraphLayout(props: GraphLayoutProps): JSX.Element {
|
||||
logEventCalledRef.current = true;
|
||||
}
|
||||
}, [data]);
|
||||
|
||||
const onSaveHandler = (): void => {
|
||||
if (!selectedDashboard) return;
|
||||
|
||||
@@ -171,6 +176,15 @@ function GraphLayout(props: GraphLayoutProps): JSX.Element {
|
||||
...selectedDashboard.data,
|
||||
panelMap: { ...currentPanelMap },
|
||||
layout: dashboardLayout.filter((e) => e.i !== PANEL_TYPES.EMPTY_WIDGET),
|
||||
widgets: selectedDashboard?.data?.widgets?.map((widget) => {
|
||||
if (columnWidths?.[widget.id]) {
|
||||
return {
|
||||
...widget,
|
||||
columnWidths: columnWidths[widget.id],
|
||||
};
|
||||
}
|
||||
return widget;
|
||||
}),
|
||||
},
|
||||
uuid: selectedDashboard.uuid,
|
||||
};
|
||||
@@ -227,20 +241,31 @@ function GraphLayout(props: GraphLayoutProps): JSX.Element {
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
isDashboardLocked ||
|
||||
!saveLayoutPermission ||
|
||||
updateDashboardMutation.isLoading ||
|
||||
isDashboardFetching
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
const shouldSaveLayout =
|
||||
dashboardLayout &&
|
||||
Array.isArray(dashboardLayout) &&
|
||||
dashboardLayout.length > 0 &&
|
||||
!isEqual(layouts, dashboardLayout) &&
|
||||
!isDashboardLocked &&
|
||||
saveLayoutPermission &&
|
||||
!updateDashboardMutation.isLoading &&
|
||||
!isDashboardFetching
|
||||
) {
|
||||
!isEqual(layouts, dashboardLayout);
|
||||
|
||||
const shouldSaveColumnWidths =
|
||||
dashboardLayout &&
|
||||
Array.isArray(dashboardLayout) &&
|
||||
dashboardLayout.length > 0 &&
|
||||
hasColumnWidthsChanged(columnWidths, selectedDashboard);
|
||||
|
||||
if (shouldSaveLayout || shouldSaveColumnWidths) {
|
||||
onSaveHandler();
|
||||
}
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [dashboardLayout]);
|
||||
}, [dashboardLayout, columnWidths]);
|
||||
|
||||
const onSettingsModalSubmit = (): void => {
|
||||
const newTitle = form.getFieldValue('title');
|
||||
|
||||
@@ -12,7 +12,7 @@ import { v4 } from 'uuid';
|
||||
|
||||
import { extractQueryNamesFromExpression } from './utils';
|
||||
|
||||
type GraphClickMetaData = {
|
||||
export type GraphClickMetaData = {
|
||||
[key: string]: string | boolean;
|
||||
queryName: string;
|
||||
inFocusOrNot: boolean;
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import { FORMULA_REGEXP } from 'constants/regExp';
|
||||
import { isEmpty, isEqual } from 'lodash-es';
|
||||
import { Layout } from 'react-grid-layout';
|
||||
import { Dashboard, Widgets } from 'types/api/dashboard/getAll';
|
||||
|
||||
export const removeUndefinedValuesFromLayout = (layout: Layout[]): Layout[] =>
|
||||
layout.map((obj) =>
|
||||
@@ -25,3 +27,27 @@ export function extractQueryNamesFromExpression(expression: string): string[] {
|
||||
// Extract matches and deduplicate
|
||||
return [...new Set(expression.match(queryNameRegex) || [])];
|
||||
}
|
||||
|
||||
export const hasColumnWidthsChanged = (
|
||||
columnWidths: Record<string, Record<string, number>>,
|
||||
selectedDashboard?: Dashboard,
|
||||
): boolean => {
|
||||
// If no column widths stored, no changes
|
||||
if (isEmpty(columnWidths) || !selectedDashboard) return false;
|
||||
|
||||
// Check each widget's column widths
|
||||
return Object.keys(columnWidths).some((widgetId) => {
|
||||
const dashboardWidget = selectedDashboard?.data?.widgets?.find(
|
||||
(widget) => widget.id === widgetId,
|
||||
) as Widgets;
|
||||
|
||||
const newWidths = columnWidths[widgetId];
|
||||
const existingWidths = dashboardWidget?.columnWidths;
|
||||
|
||||
// If both are empty/undefined, no change
|
||||
if (isEmpty(newWidths) || isEmpty(existingWidths)) return false;
|
||||
|
||||
// Compare stored column widths with dashboard widget's column widths
|
||||
return !isEqual(newWidths, existingWidths);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -43,6 +43,7 @@ function GridTableComponent({
|
||||
sticky,
|
||||
openTracesButton,
|
||||
onOpenTraceBtnClick,
|
||||
widgetId,
|
||||
...props
|
||||
}: GridTableComponentProps): JSX.Element {
|
||||
const { t } = useTranslation(['valueGraph']);
|
||||
@@ -229,6 +230,7 @@ function GridTableComponent({
|
||||
columns={openTracesButton ? columnDataWithOpenTracesButton : newColumnData}
|
||||
dataSource={dataSource}
|
||||
sticky={sticky}
|
||||
widgetId={widgetId}
|
||||
onRow={
|
||||
openTracesButton
|
||||
? (record): React.HTMLAttributes<HTMLElement> => ({
|
||||
|
||||
@@ -17,6 +17,7 @@ export type GridTableComponentProps = {
|
||||
searchTerm?: string;
|
||||
openTracesButton?: boolean;
|
||||
onOpenTraceBtnClick?: (record: RowData) => void;
|
||||
widgetId?: string;
|
||||
} & Pick<LogsExplorerTableProps, 'data'> &
|
||||
Omit<TableProps<RowData>, 'columns' | 'dataSource'>;
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import './LogsPanelComponent.styles.scss';
|
||||
|
||||
import { Table } from 'antd';
|
||||
import LogDetail from 'components/LogDetail';
|
||||
import { VIEW_TYPES } from 'components/LogDetail/constants';
|
||||
import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar';
|
||||
import { ResizeTable } from 'components/ResizeTable';
|
||||
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import Controls from 'container/Controls';
|
||||
@@ -79,9 +79,14 @@ function LogsPanelComponent({
|
||||
|
||||
const { formatTimezoneAdjustedTimestamp } = useTimezone();
|
||||
|
||||
const columns = getLogPanelColumnsList(
|
||||
widget.selectedLogFields,
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
const columns = useMemo(
|
||||
() =>
|
||||
getLogPanelColumnsList(
|
||||
widget.selectedLogFields,
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
),
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[widget.selectedLogFields],
|
||||
);
|
||||
|
||||
const dataLength =
|
||||
@@ -216,16 +221,18 @@ function LogsPanelComponent({
|
||||
<div className="logs-table">
|
||||
<div className="resize-table">
|
||||
<OverlayScrollbar>
|
||||
<Table
|
||||
<ResizeTable
|
||||
pagination={false}
|
||||
tableLayout="fixed"
|
||||
scroll={{ x: `calc(50vw - 10px)` }}
|
||||
scroll={{ x: `max-content` }}
|
||||
sticky
|
||||
loading={queryResponse.isFetching}
|
||||
style={tableStyles}
|
||||
dataSource={flattenLogData}
|
||||
columns={columns}
|
||||
onRow={handleRow}
|
||||
widgetId={widget.id}
|
||||
shouldPersistColumnWidths
|
||||
/>
|
||||
</OverlayScrollbar>
|
||||
</div>
|
||||
|
||||
@@ -74,6 +74,7 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
setToScrollWidgetId,
|
||||
selectedRowWidgetId,
|
||||
setSelectedRowWidgetId,
|
||||
columnWidths,
|
||||
} = useDashboard();
|
||||
|
||||
const { t } = useTranslation(['dashboard']);
|
||||
@@ -238,8 +239,10 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
selectedLogFields,
|
||||
selectedTracesFields,
|
||||
isLogScale,
|
||||
columnWidths: columnWidths?.[selectedWidget?.id],
|
||||
};
|
||||
});
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [
|
||||
columnUnits,
|
||||
currentQuery,
|
||||
@@ -260,6 +263,7 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
combineHistogram,
|
||||
stackedBarChart,
|
||||
isLogScale,
|
||||
columnWidths,
|
||||
]);
|
||||
|
||||
const closeModal = (): void => {
|
||||
|
||||
@@ -26,6 +26,7 @@ function TablePanelWrapper({
|
||||
searchTerm={searchTerm}
|
||||
openTracesButton={openTracesButton}
|
||||
onOpenTraceBtnClick={onOpenTraceBtnClick}
|
||||
widgetId={widget.id}
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...GRID_TABLE_CONFIG}
|
||||
/>
|
||||
|
||||
@@ -9,6 +9,8 @@ exports[`Table panel wrappper tests table should render fine with the query resp
|
||||
width: 0.625rem;
|
||||
height: 100%;
|
||||
cursor: col-resize;
|
||||
margin-left: 4px;
|
||||
margin-right: 4px;
|
||||
}
|
||||
|
||||
.c0 {
|
||||
@@ -54,7 +56,7 @@ exports[`Table panel wrappper tests table should render fine with the query resp
|
||||
class="query-table"
|
||||
>
|
||||
<div
|
||||
class="ant-table-wrapper css-dev-only-do-not-override-2i2tap"
|
||||
class="ant-table-wrapper resize-main-table css-dev-only-do-not-override-2i2tap"
|
||||
>
|
||||
<div
|
||||
class="ant-spin-nested-loading css-dev-only-do-not-override-2i2tap"
|
||||
@@ -82,7 +84,7 @@ exports[`Table panel wrappper tests table should render fine with the query resp
|
||||
<tr>
|
||||
<th
|
||||
aria-label="service_name"
|
||||
class="ant-table-cell ant-table-column-has-sorters react-resizable"
|
||||
class="resizable-header react-resizable"
|
||||
scope="col"
|
||||
tabindex="0"
|
||||
>
|
||||
@@ -143,12 +145,12 @@ exports[`Table panel wrappper tests table should render fine with the query resp
|
||||
</span>
|
||||
</div>
|
||||
<span
|
||||
class="c1 react-resizable-handle"
|
||||
class="c1 resize-handle"
|
||||
/>
|
||||
</th>
|
||||
<th
|
||||
aria-label="latency-per-service"
|
||||
class="ant-table-cell ant-table-column-has-sorters react-resizable"
|
||||
class="resizable-header react-resizable"
|
||||
scope="col"
|
||||
tabindex="0"
|
||||
>
|
||||
@@ -209,7 +211,7 @@ exports[`Table panel wrappper tests table should render fine with the query resp
|
||||
</span>
|
||||
</div>
|
||||
<span
|
||||
class="c1 react-resizable-handle"
|
||||
class="c1 resize-handle"
|
||||
/>
|
||||
</th>
|
||||
</tr>
|
||||
@@ -221,7 +223,7 @@ exports[`Table panel wrappper tests table should render fine with the query resp
|
||||
style="overflow-x: auto; overflow-y: hidden;"
|
||||
>
|
||||
<table
|
||||
style="width: auto; min-width: 100%; table-layout: fixed;"
|
||||
style="min-width: 100%; table-layout: fixed;"
|
||||
>
|
||||
<colgroup>
|
||||
<col
|
||||
|
||||
@@ -453,7 +453,7 @@ export const Query = memo(function Query({
|
||||
</Col>
|
||||
)}
|
||||
<Col flex="1" className="qb-search-container">
|
||||
{query.dataSource === DataSource.LOGS ? (
|
||||
{[DataSource.LOGS, DataSource.TRACES].includes(query.dataSource) ? (
|
||||
<QueryBuilderSearchV2
|
||||
query={query}
|
||||
onChange={handleChangeTagFilters}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import './QueryBuilderSearchV2.styles.scss';
|
||||
|
||||
import { Typography } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import {
|
||||
ArrowDown,
|
||||
ArrowUp,
|
||||
@@ -25,6 +26,7 @@ interface ICustomDropdownProps {
|
||||
exampleQueries: TagFilter[];
|
||||
onChange: (value: TagFilter) => void;
|
||||
currentFilterItem?: ITag;
|
||||
isLogsDataSource: boolean;
|
||||
}
|
||||
|
||||
export default function QueryBuilderSearchDropdown(
|
||||
@@ -38,11 +40,14 @@ export default function QueryBuilderSearchDropdown(
|
||||
exampleQueries,
|
||||
options,
|
||||
onChange,
|
||||
isLogsDataSource,
|
||||
} = props;
|
||||
const userOs = getUserOperatingSystem();
|
||||
return (
|
||||
<>
|
||||
<div className="content">
|
||||
<div
|
||||
className={cx('content', { 'non-logs-data-source': !isLogsDataSource })}
|
||||
>
|
||||
{!currentFilterItem?.key ? (
|
||||
<div className="suggested-filters">Suggested Filters</div>
|
||||
) : !currentFilterItem?.op ? (
|
||||
|
||||
@@ -11,6 +11,11 @@
|
||||
.rc-virtual-list-holder {
|
||||
height: 115px;
|
||||
}
|
||||
&.non-logs-data-source {
|
||||
.rc-virtual-list-holder {
|
||||
height: 256px;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -689,12 +689,29 @@ function QueryBuilderSearchV2(
|
||||
})),
|
||||
);
|
||||
} else {
|
||||
setDropdownOptions(
|
||||
data?.payload?.attributeKeys?.map((key) => ({
|
||||
setDropdownOptions([
|
||||
// Add user typed option if it doesn't exist in the payload
|
||||
...(!isEmpty(tagKey) &&
|
||||
!data?.payload?.attributeKeys?.some((val) => isEqual(val.key, tagKey))
|
||||
? [
|
||||
{
|
||||
label: tagKey,
|
||||
value: {
|
||||
key: tagKey,
|
||||
dataType: DataTypes.EMPTY,
|
||||
type: '',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
},
|
||||
]
|
||||
: []),
|
||||
// Map existing attribute keys from payload
|
||||
...(data?.payload?.attributeKeys?.map((key) => ({
|
||||
label: key.key,
|
||||
value: key,
|
||||
})) || [],
|
||||
);
|
||||
})) || []),
|
||||
]);
|
||||
}
|
||||
}
|
||||
if (currentState === DropdownState.OPERATOR) {
|
||||
@@ -964,6 +981,7 @@ function QueryBuilderSearchV2(
|
||||
exampleQueries={suggestionsData?.payload?.example_queries || []}
|
||||
tags={tags}
|
||||
currentFilterItem={currentFilterItem}
|
||||
isLogsDataSource={isLogsDataSource}
|
||||
/>
|
||||
)}
|
||||
>
|
||||
|
||||
@@ -20,4 +20,5 @@ export type QueryTableProps = Omit<
|
||||
dataSource?: RowData[];
|
||||
sticky?: TableProps<RowData>['sticky'];
|
||||
searchTerm?: string;
|
||||
widgetId?: string;
|
||||
};
|
||||
|
||||
@@ -24,6 +24,7 @@ export function QueryTable({
|
||||
dataSource,
|
||||
sticky,
|
||||
searchTerm,
|
||||
widgetId,
|
||||
...props
|
||||
}: QueryTableProps): JSX.Element {
|
||||
const { isDownloadEnabled = false, fileName = '' } = downloadOption || {};
|
||||
@@ -95,8 +96,10 @@ export function QueryTable({
|
||||
columns={tableColumns}
|
||||
tableLayout="fixed"
|
||||
dataSource={filterTable === null ? newDataSource : filterTable}
|
||||
scroll={{ x: true }}
|
||||
scroll={{ x: 'max-content' }}
|
||||
pagination={paginationConfig}
|
||||
widgetId={widgetId}
|
||||
shouldPersistColumnWidths
|
||||
sticky={sticky}
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...props}
|
||||
|
||||
@@ -279,6 +279,17 @@ function SideNav(): JSX.Element {
|
||||
let updatedUserManagementItems: UserManagementMenuItems[] = [
|
||||
manageLicenseMenuItem,
|
||||
];
|
||||
|
||||
const isApiMonitoringEnabled = featureFlags?.find(
|
||||
(flag) => flag.name === FeatureKeys.THIRD_PARTY_API,
|
||||
)?.active;
|
||||
|
||||
if (!isApiMonitoringEnabled) {
|
||||
updatedMenuItems = updatedMenuItems.filter(
|
||||
(item) => item.key !== ROUTES.API_MONITORING,
|
||||
);
|
||||
}
|
||||
|
||||
if (isCloudUserVal || isEECloudUserVal) {
|
||||
const isOnboardingEnabled =
|
||||
featureFlags?.find((feature) => feature.name === FeatureKeys.ONBOARDING)
|
||||
|
||||
@@ -128,6 +128,7 @@ const menuItems: SidebarItem[] = [
|
||||
key: ROUTES.API_MONITORING,
|
||||
label: 'API Monitoring',
|
||||
icon: <Binoculars size={16} />,
|
||||
isNew: true,
|
||||
},
|
||||
{
|
||||
key: ROUTES.LIST_ALL_ALERT,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import './TracesTableComponent.styles.scss';
|
||||
|
||||
import { Table } from 'antd';
|
||||
import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar';
|
||||
import { ResizeTable } from 'components/ResizeTable';
|
||||
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||
import Controls from 'container/Controls';
|
||||
import { PER_PAGE_OPTIONS } from 'container/TracesExplorer/ListView/configs';
|
||||
@@ -54,9 +54,14 @@ function TracesTableComponent({
|
||||
|
||||
const { formatTimezoneAdjustedTimestamp } = useTimezone();
|
||||
|
||||
const columns = getListColumns(
|
||||
widget.selectedTracesFields || [],
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
const columns = useMemo(
|
||||
() =>
|
||||
getListColumns(
|
||||
widget.selectedTracesFields || [],
|
||||
formatTimezoneAdjustedTimestamp,
|
||||
),
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[widget.selectedTracesFields],
|
||||
);
|
||||
|
||||
const dataLength =
|
||||
@@ -116,16 +121,18 @@ function TracesTableComponent({
|
||||
<div className="traces-table">
|
||||
<div className="resize-table">
|
||||
<OverlayScrollbar>
|
||||
<Table
|
||||
<ResizeTable
|
||||
pagination={false}
|
||||
tableLayout="fixed"
|
||||
scroll={{ x: true }}
|
||||
scroll={{ x: 'max-content' }}
|
||||
loading={queryResponse.isFetching}
|
||||
style={tableStyles}
|
||||
dataSource={transformedQueryTableData}
|
||||
columns={columns}
|
||||
onRow={handleRow}
|
||||
sticky
|
||||
widgetId={widget.id}
|
||||
shouldPersistColumnWidths
|
||||
/>
|
||||
</OverlayScrollbar>
|
||||
</div>
|
||||
|
||||
@@ -170,11 +170,7 @@ export const useOptions = (
|
||||
(option, index, self) =>
|
||||
index ===
|
||||
self.findIndex(
|
||||
(o) =>
|
||||
// to remove duplicate & empty options from list
|
||||
o.label === option.label &&
|
||||
o.value === option.value &&
|
||||
o.dataType?.toLowerCase() === option.dataType?.toLowerCase(), // handle case sensitivity
|
||||
(o) => o.label === option.label && o.value === option.value, // to remove duplicate & empty options from list
|
||||
) && option.value !== '',
|
||||
) || []
|
||||
).map((option) => {
|
||||
|
||||
@@ -2,18 +2,24 @@ import './TracesModulePage.styles.scss';
|
||||
|
||||
import RouteTab from 'components/RouteTab';
|
||||
import { TabRoutes } from 'components/RouteTab/types';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import history from 'lib/history';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
|
||||
import { tracesExplorer, tracesFunnel, tracesSaveView } from './constants';
|
||||
|
||||
function TracesModulePage(): JSX.Element {
|
||||
const { pathname } = useLocation();
|
||||
const { featureFlags } = useAppContext();
|
||||
|
||||
const isTraceFunnelsEnabled =
|
||||
featureFlags?.find((flag) => flag.name === FeatureKeys.TRACE_FUNNELS)
|
||||
?.active || false;
|
||||
|
||||
const routes: TabRoutes[] = [
|
||||
tracesExplorer,
|
||||
// TODO(shaheer): remove this check after everything is ready
|
||||
process.env.NODE_ENV === 'development' ? tracesFunnel : null,
|
||||
isTraceFunnelsEnabled ? tracesFunnel : null,
|
||||
tracesSaveView,
|
||||
].filter(Boolean) as TabRoutes[];
|
||||
|
||||
|
||||
@@ -40,7 +40,11 @@ import { Dashboard, IDashboardVariable } from 'types/api/dashboard/getAll';
|
||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||
import { v4 as generateUUID } from 'uuid';
|
||||
|
||||
import { DashboardSortOrder, IDashboardContext } from './types';
|
||||
import {
|
||||
DashboardSortOrder,
|
||||
IDashboardContext,
|
||||
WidgetColumnWidths,
|
||||
} from './types';
|
||||
import { sortLayout } from './util';
|
||||
|
||||
const DashboardContext = createContext<IDashboardContext>({
|
||||
@@ -74,6 +78,8 @@ const DashboardContext = createContext<IDashboardContext>({
|
||||
selectedRowWidgetId: '',
|
||||
setSelectedRowWidgetId: () => {},
|
||||
isDashboardFetching: false,
|
||||
columnWidths: {},
|
||||
setColumnWidths: () => {},
|
||||
});
|
||||
|
||||
interface Props {
|
||||
@@ -408,6 +414,8 @@ export function DashboardProvider({
|
||||
}
|
||||
};
|
||||
|
||||
const [columnWidths, setColumnWidths] = useState<WidgetColumnWidths>({});
|
||||
|
||||
const value: IDashboardContext = useMemo(
|
||||
() => ({
|
||||
toScrollWidgetId,
|
||||
@@ -435,6 +443,8 @@ export function DashboardProvider({
|
||||
selectedRowWidgetId,
|
||||
setSelectedRowWidgetId,
|
||||
isDashboardFetching,
|
||||
columnWidths,
|
||||
setColumnWidths,
|
||||
}),
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[
|
||||
@@ -457,6 +467,8 @@ export function DashboardProvider({
|
||||
selectedRowWidgetId,
|
||||
setSelectedRowWidgetId,
|
||||
isDashboardFetching,
|
||||
columnWidths,
|
||||
setColumnWidths,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -10,6 +10,10 @@ export interface DashboardSortOrder {
|
||||
search: string;
|
||||
}
|
||||
|
||||
export type WidgetColumnWidths = {
|
||||
[widgetId: string]: Record<string, number>;
|
||||
};
|
||||
|
||||
export interface IDashboardContext {
|
||||
isDashboardSliderOpen: boolean;
|
||||
isDashboardLocked: boolean;
|
||||
@@ -48,4 +52,6 @@ export interface IDashboardContext {
|
||||
selectedRowWidgetId: string | null;
|
||||
setSelectedRowWidgetId: React.Dispatch<React.SetStateAction<string | null>>;
|
||||
isDashboardFetching: boolean;
|
||||
columnWidths: WidgetColumnWidths;
|
||||
setColumnWidths: React.Dispatch<React.SetStateAction<WidgetColumnWidths>>;
|
||||
}
|
||||
|
||||
@@ -764,10 +764,7 @@ export function QueryBuilderProvider({
|
||||
);
|
||||
|
||||
const { safeNavigate } = useSafeNavigate({
|
||||
preventSameUrlNavigation: !(
|
||||
initialDataSource === DataSource.LOGS ||
|
||||
initialDataSource === DataSource.TRACES
|
||||
),
|
||||
preventSameUrlNavigation: false,
|
||||
});
|
||||
|
||||
const redirectWithQueryBuilderData = useCallback(
|
||||
|
||||
@@ -109,6 +109,7 @@ export interface IBaseWidget {
|
||||
selectedLogFields: IField[] | null;
|
||||
selectedTracesFields: BaseAutocompleteData[] | null;
|
||||
isLogScale?: boolean;
|
||||
columnWidths?: Record<string, number>;
|
||||
}
|
||||
export interface Widgets extends IBaseWidget {
|
||||
query: Query;
|
||||
|
||||
6
go.mod
6
go.mod
@@ -6,6 +6,7 @@ toolchain go1.22.7
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.4
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
@@ -21,6 +22,7 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/go-redis/redismock/v8 v8.11.5
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0
|
||||
github.com/gojek/heimdall/v7 v7.0.3
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
@@ -61,6 +63,7 @@ require (
|
||||
go.opentelemetry.io/collector/pdata v1.17.0
|
||||
go.opentelemetry.io/collector/processor v0.111.0
|
||||
go.opentelemetry.io/contrib/config v0.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0
|
||||
go.opentelemetry.io/otel v1.34.0
|
||||
go.opentelemetry.io/otel/metric v1.34.0
|
||||
go.opentelemetry.io/otel/sdk v1.34.0
|
||||
@@ -128,6 +131,7 @@ require (
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/gofrs/uuid v4.4.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gojek/valkyrie v0.0.0-20180215180059-6aee720afcdf // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
@@ -201,6 +205,7 @@ require (
|
||||
github.com/smarty/assertions v1.15.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
@@ -245,7 +250,6 @@ require (
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/service v0.111.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
|
||||
|
||||
19
go.sum
19
go.sum
@@ -64,6 +64,8 @@ dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.4 h1:iLRwjzz1mWmUEf5UNrSYOceQ+PX9SdBJ8Xw0DNrL114=
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.4/go.mod h1:W0Z82wJWkJxz2RVun/RMwxue3g7ut47Xxl+SFqdJGus=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
@@ -88,6 +90,7 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.7.1+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
@@ -97,6 +100,7 @@ github.com/SigNoz/prometheus v1.13.0 h1:hsUql1zd83ifXtswO9Qk1rpCgVjE/ItQvgdNocBS
|
||||
github.com/SigNoz/prometheus v1.13.0/go.mod h1:4PC0dxmx6y3kNI2d9oOTvEFTPkH6QnxDxERyqeL1hvI=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -142,6 +146,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -326,6 +331,10 @@ github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gojek/heimdall/v7 v7.0.3 h1:+5sAhl8S0m+qRRL8IVeHCJudFh/XkG3wyO++nvOg+gc=
|
||||
github.com/gojek/heimdall/v7 v7.0.3/go.mod h1:Z43HtMid7ysSjmsedPTXAki6jcdcNVnjn5pmsTyiMic=
|
||||
github.com/gojek/valkyrie v0.0.0-20180215180059-6aee720afcdf h1:5xRGbUdOmZKoDXkGx5evVLehuCMpuO1hl701bEQqXOM=
|
||||
github.com/gojek/valkyrie v0.0.0-20180215180059-6aee720afcdf/go.mod h1:QzhUKaYKJmcbTnCYCAVQrroCOY7vOOI8cSQ4NbuhYf0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -431,6 +440,7 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
|
||||
github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
|
||||
@@ -800,6 +810,7 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0 h1:i+cMcpEDY1BkNm7lPDkCtE4oElsYLn+EKF8kAu2vXT4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
@@ -827,10 +838,14 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dul
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N++y4=
|
||||
github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
|
||||
github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
|
||||
github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
|
||||
@@ -848,6 +863,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
|
||||
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
||||
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
@@ -865,6 +882,7 @@ github.com/srikanthccv/ClickHouse-go-mock v0.11.0 h1:hKY9l7SbhI4IPPs7hjKAL1iDgKc
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.11.0/go.mod h1:CzFC21J4tLn7cEYdU5k6hg7yyf052xtZXUY2e3UF6+I=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
@@ -1353,6 +1371,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
|
||||
58
pkg/http/client/http.go
Normal file
58
pkg/http/client/http.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/http/client/plugin"
|
||||
"github.com/gojek/heimdall/v7/httpclient"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
c *httpclient.Client
|
||||
netc *http.Client
|
||||
}
|
||||
|
||||
func New(logger *slog.Logger, tracerProvider trace.TracerProvider, meterProvider metric.MeterProvider, opts ...Option) (*Client, error) {
|
||||
clientOpts := options{
|
||||
retryCount: 3,
|
||||
requestResponseLog: false,
|
||||
timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&clientOpts)
|
||||
}
|
||||
|
||||
netc := &http.Client{
|
||||
Timeout: clientOpts.timeout,
|
||||
Transport: otelhttp.NewTransport(http.DefaultTransport, otelhttp.WithTracerProvider(tracerProvider), otelhttp.WithMeterProvider(meterProvider)),
|
||||
}
|
||||
|
||||
c := httpclient.NewClient(
|
||||
httpclient.WithHTTPClient(netc),
|
||||
httpclient.WithRetrier(clientOpts.retriable),
|
||||
httpclient.WithRetryCount(clientOpts.retryCount),
|
||||
)
|
||||
|
||||
if clientOpts.requestResponseLog {
|
||||
c.AddPlugin(plugin.NewLog(logger))
|
||||
}
|
||||
|
||||
return &Client{
|
||||
netc: netc,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) Do(request *http.Request) (*http.Response, error) {
|
||||
return c.c.Do(request)
|
||||
}
|
||||
|
||||
func (c *Client) Client() *http.Client {
|
||||
return c.netc
|
||||
}
|
||||
42
pkg/http/client/option.go
Normal file
42
pkg/http/client/option.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gojek/heimdall/v7"
|
||||
)
|
||||
|
||||
type Retriable = heimdall.Retriable
|
||||
|
||||
type options struct {
|
||||
retryCount int
|
||||
requestResponseLog bool
|
||||
timeout time.Duration
|
||||
retriable Retriable
|
||||
}
|
||||
|
||||
type Option func(*options)
|
||||
|
||||
func WithRetryCount(i int) Option {
|
||||
return func(o *options) {
|
||||
o.retryCount = i
|
||||
}
|
||||
}
|
||||
|
||||
func WithTimeout(i time.Duration) Option {
|
||||
return func(o *options) {
|
||||
o.timeout = i
|
||||
}
|
||||
}
|
||||
|
||||
func WithRequestResponseLog(b bool) Option {
|
||||
return func(o *options) {
|
||||
o.requestResponseLog = b
|
||||
}
|
||||
}
|
||||
|
||||
func WithRetriable(retriable Retriable) Option {
|
||||
return func(o *options) {
|
||||
o.retriable = retriable
|
||||
}
|
||||
}
|
||||
77
pkg/http/client/plugin/log.go
Normal file
77
pkg/http/client/plugin/log.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gojek/heimdall/v7"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.27.0"
|
||||
)
|
||||
|
||||
type reqResLog struct {
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func NewLog(logger *slog.Logger) heimdall.Plugin {
|
||||
return &reqResLog{
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *reqResLog) OnRequestStart(request *http.Request) {
|
||||
host, port, _ := net.SplitHostPort(request.Host)
|
||||
fields := []any{
|
||||
string(semconv.HTTPRequestMethodKey), request.Method,
|
||||
string(semconv.URLPathKey), request.URL.Path,
|
||||
string(semconv.URLSchemeKey), request.URL.Scheme,
|
||||
string(semconv.UserAgentOriginalKey), request.UserAgent(),
|
||||
string(semconv.ServerAddressKey), host,
|
||||
string(semconv.ServerPortKey), port,
|
||||
string(semconv.HTTPRequestSizeKey), request.ContentLength,
|
||||
"http.request.headers", request.Header,
|
||||
}
|
||||
|
||||
plugin.logger.InfoContext(request.Context(), "::SENT-REQUEST::", fields...)
|
||||
}
|
||||
|
||||
func (plugin *reqResLog) OnRequestEnd(request *http.Request, response *http.Response) {
|
||||
fields := []any{
|
||||
string(semconv.HTTPResponseStatusCodeKey), response.StatusCode,
|
||||
string(semconv.HTTPResponseBodySizeKey), response.ContentLength,
|
||||
}
|
||||
|
||||
bodybytes, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
plugin.logger.DebugContext(request.Context(), "::UNABLE-TO-LOG-RESPONSE-BODY::", "error", err)
|
||||
} else {
|
||||
_ = response.Body.Close()
|
||||
response.Body = io.NopCloser(bytes.NewBuffer(bodybytes))
|
||||
|
||||
if len(bodybytes) > 0 {
|
||||
fields = append(fields, "http.response.body", string(bodybytes))
|
||||
} else {
|
||||
fields = append(fields, "http.response.body", "(empty)")
|
||||
}
|
||||
}
|
||||
|
||||
plugin.logger.InfoContext(request.Context(), "::RECEIVED-RESPONSE::", fields...)
|
||||
}
|
||||
|
||||
func (plugin *reqResLog) OnError(request *http.Request, err error) {
|
||||
host, port, _ := net.SplitHostPort(request.Host)
|
||||
fields := []any{
|
||||
err,
|
||||
string(semconv.HTTPRequestMethodKey), request.Method,
|
||||
string(semconv.URLPathKey), request.URL.Path,
|
||||
string(semconv.URLSchemeKey), request.URL.Scheme,
|
||||
string(semconv.UserAgentOriginalKey), request.UserAgent(),
|
||||
string(semconv.ServerAddressKey), host,
|
||||
string(semconv.ServerPortKey), port,
|
||||
string(semconv.HTTPRequestSizeKey), request.ContentLength,
|
||||
}
|
||||
|
||||
plugin.logger.ErrorContext(request.Context(), "::UNABLE-TO-SEND-REQUEST::", fields...)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func (r *ClickHouseReader) getValuesForLogAttributes(
|
||||
from %s.%s
|
||||
where tag_key = $%d and (
|
||||
string_value != '' or number_value is not null
|
||||
)
|
||||
) and tag_type != 'logfield'
|
||||
limit %d
|
||||
)`, r.logsDB, r.logsTagAttributeTableV2, idx+1, limit))
|
||||
|
||||
|
||||
@@ -1143,7 +1143,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU
|
||||
|
||||
func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams,
|
||||
smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string,
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
@@ -1291,7 +1291,7 @@ func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.Sea
|
||||
|
||||
func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams,
|
||||
smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string,
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
if r.useTraceNewSchema {
|
||||
return r.SearchTracesV2(ctx, params, smartTraceAlgorithm)
|
||||
@@ -4065,7 +4065,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
|
||||
return nil, fmt.Errorf("unsupported aggregate operator")
|
||||
}
|
||||
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type from %s.%s WHERE %s limit $2", r.logsDB, r.logsTagAttributeTableV2, where)
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type from %s.%s WHERE %s and tag_type != 'logfield' limit $2", r.logsDB, r.logsTagAttributeTableV2, where)
|
||||
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while executing query", zap.Error(err))
|
||||
@@ -4114,10 +4114,10 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
|
||||
var response v3.FilterAttributeKeyResponse
|
||||
|
||||
if len(req.SearchText) != 0 {
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where tag_key ILIKE $1 limit $2", r.logsDB, r.logsTagAttributeTableV2)
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where tag_type != 'logfield' and tag_key ILIKE $1 limit $2", r.logsDB, r.logsTagAttributeTableV2)
|
||||
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
|
||||
} else {
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s limit $1", r.logsDB, r.logsTagAttributeTableV2)
|
||||
query = fmt.Sprintf("select distinct tag_key, tag_type, tag_data_type from %s.%s where tag_type != 'logfield' limit $1", r.logsDB, r.logsTagAttributeTableV2)
|
||||
rows, err = r.db.Query(ctx, query, req.Limit)
|
||||
}
|
||||
|
||||
@@ -4838,7 +4838,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported aggregate operator")
|
||||
}
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type FROM %s.%s WHERE %s", r.TraceDB, r.spanAttributeTableV2, where)
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type FROM %s.%s WHERE %s and tag_type != 'spanfield'", r.TraceDB, r.spanAttributeTableV2, where)
|
||||
if req.Limit != 0 {
|
||||
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
|
||||
}
|
||||
@@ -4900,7 +4900,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
|
||||
var rows driver.Rows
|
||||
var response v3.FilterAttributeKeyResponse
|
||||
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type FROM %s.%s WHERE tag_key ILIKE $1 LIMIT $2", r.TraceDB, r.spanAttributeTableV2)
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type FROM %s.%s WHERE tag_key ILIKE $1 and tag_type != 'spanfield' LIMIT $2", r.TraceDB, r.spanAttributeTableV2)
|
||||
|
||||
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
|
||||
|
||||
@@ -5982,10 +5982,10 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
}
|
||||
|
||||
firstQueryLimit := req.Limit
|
||||
dataPointsOrder := false
|
||||
samplesOrder := false
|
||||
var orderByClauseFirstQuery string
|
||||
if req.OrderBy.ColumnName == "samples" {
|
||||
dataPointsOrder = true
|
||||
samplesOrder = true
|
||||
orderByClauseFirstQuery = fmt.Sprintf("ORDER BY timeseries %s", req.OrderBy.Order)
|
||||
if req.Limit < 50 {
|
||||
firstQueryLimit = 50
|
||||
@@ -5995,8 +5995,8 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
}
|
||||
|
||||
// Determine which tables to use
|
||||
start, end, tsTable, localTsTable := utils.WhichTSTableToUse(req.Start, req.EndD)
|
||||
sampleTable, countExp := utils.WhichSampleTableToUse(req.Start, req.EndD)
|
||||
start, end, tsTable, localTsTable := utils.WhichTSTableToUse(req.Start, req.End)
|
||||
sampleTable, countExp := utils.WhichSampleTableToUse(req.Start, req.End)
|
||||
|
||||
metricsQuery := fmt.Sprintf(
|
||||
`SELECT
|
||||
@@ -6018,7 +6018,10 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
|
||||
args = append(args, start, end)
|
||||
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
|
||||
begin := time.Now()
|
||||
rows, err := r.db.Query(valueCtx, metricsQuery, args...)
|
||||
queryDuration := time.Since(begin)
|
||||
zap.L().Info("Time taken to execute metrics query to fetch metrics with high time series", zap.String("query", metricsQuery), zap.Any("args", args), zap.Duration("duration", queryDuration))
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing metrics query", zap.Error(err))
|
||||
return &metrics_explorer.SummaryListMetricsResponse{}, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
@@ -6049,12 +6052,12 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
// Build a comma-separated list of quoted metric names.
|
||||
metricsList := "'" + strings.Join(metricNames, "', '") + "'"
|
||||
// If samples are being sorted by datapoints, update the ORDER clause.
|
||||
if dataPointsOrder {
|
||||
if samplesOrder {
|
||||
orderByClauseFirstQuery = fmt.Sprintf("ORDER BY s.samples %s", req.OrderBy.Order)
|
||||
} else {
|
||||
orderByClauseFirstQuery = ""
|
||||
}
|
||||
|
||||
args = make([]interface{}, 0)
|
||||
var sampleQuery string
|
||||
var sb strings.Builder
|
||||
|
||||
@@ -6062,13 +6065,11 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
sb.WriteString(fmt.Sprintf(
|
||||
`SELECT
|
||||
s.samples,
|
||||
s.metric_name,
|
||||
s.lastReceived
|
||||
s.metric_name
|
||||
FROM (
|
||||
SELECT
|
||||
dm.metric_name,
|
||||
%s AS samples,
|
||||
MAX(dm.unix_milli) AS lastReceived
|
||||
%s AS samples
|
||||
FROM %s.%s AS dm
|
||||
WHERE dm.metric_name IN (%s)
|
||||
AND dm.fingerprint IN (
|
||||
@@ -6076,6 +6077,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
FROM %s.%s
|
||||
WHERE metric_name IN (%s)
|
||||
AND __normalized = true
|
||||
AND unix_milli BETWEEN ? AND ?
|
||||
%s
|
||||
GROUP BY fingerprint
|
||||
)
|
||||
@@ -6089,18 +6091,18 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
metricsList,
|
||||
whereClause,
|
||||
))
|
||||
args = append(args, start, end)
|
||||
args = append(args, req.Start, req.End)
|
||||
} else {
|
||||
// If no filters, it is a simpler query.
|
||||
sb.WriteString(fmt.Sprintf(
|
||||
`SELECT
|
||||
s.samples,
|
||||
s.metric_name,
|
||||
s.lastReceived
|
||||
s.metric_name
|
||||
FROM (
|
||||
SELECT
|
||||
metric_name,
|
||||
%s AS samples,
|
||||
MAX(unix_milli) AS lastReceived
|
||||
%s AS samples
|
||||
FROM %s.%s
|
||||
WHERE metric_name IN (%s)
|
||||
AND unix_milli BETWEEN ? AND ?
|
||||
@@ -6109,6 +6111,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
countExp,
|
||||
signozMetricDBName, sampleTable,
|
||||
metricsList))
|
||||
args = append(args, req.Start, req.End)
|
||||
}
|
||||
|
||||
// Append ORDER BY clause if provided.
|
||||
@@ -6119,10 +6122,10 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
// Append LIMIT clause.
|
||||
sb.WriteString(fmt.Sprintf("LIMIT %d;", req.Limit))
|
||||
sampleQuery = sb.String()
|
||||
|
||||
// Append the time boundaries for sampleQuery.
|
||||
args = append(args, start, end)
|
||||
begin = time.Now()
|
||||
rows, err = r.db.Query(valueCtx, sampleQuery, args...)
|
||||
queryDuration = time.Since(begin)
|
||||
zap.L().Info("Time taken to execute list summary query", zap.String("query", sampleQuery), zap.Any("args", args), zap.Duration("duration", queryDuration))
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing samples query", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
@@ -6130,18 +6133,15 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
defer rows.Close()
|
||||
|
||||
samplesMap := make(map[string]uint64)
|
||||
lastReceivedMap := make(map[string]int64)
|
||||
|
||||
for rows.Next() {
|
||||
var samples uint64
|
||||
var metricName string
|
||||
var lastReceived int64
|
||||
if err := rows.Scan(&samples, &metricName, &lastReceived); err != nil {
|
||||
if err := rows.Scan(&samples, &metricName); err != nil {
|
||||
zap.L().Error("Error scanning sample row", zap.Error(err))
|
||||
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
samplesMap[metricName] = samples
|
||||
lastReceivedMap[metricName] = lastReceived
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over sample rows", zap.Error(err))
|
||||
@@ -6167,16 +6167,13 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
|
||||
}
|
||||
if samples, exists := samplesMap[response.Metrics[i].MetricName]; exists {
|
||||
response.Metrics[i].Samples = samples
|
||||
if lastReceived, exists := lastReceivedMap[response.Metrics[i].MetricName]; exists {
|
||||
response.Metrics[i].LastReceived = lastReceived
|
||||
}
|
||||
filteredMetrics = append(filteredMetrics, response.Metrics[i])
|
||||
}
|
||||
}
|
||||
response.Metrics = filteredMetrics
|
||||
|
||||
// If ordering by samples, sort in-memory.
|
||||
if dataPointsOrder {
|
||||
if samplesOrder {
|
||||
sort.Slice(response.Metrics, func(i, j int) bool {
|
||||
return response.Metrics[i].Samples > response.Metrics[j].Samples
|
||||
})
|
||||
@@ -6194,7 +6191,7 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "AND " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
start, end, tsTable, _ := utils.WhichTSTableToUse(req.Start, req.EndD)
|
||||
start, end, tsTable, _ := utils.WhichTSTableToUse(req.Start, req.End)
|
||||
|
||||
// Construct the query without backticks
|
||||
query := fmt.Sprintf(`
|
||||
@@ -6224,26 +6221,29 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
|
||||
)
|
||||
|
||||
args = append(args,
|
||||
start, end, // For total_cardinality subquery
|
||||
start, end, // For total_time_series subquery
|
||||
start, end, // For main query
|
||||
)
|
||||
|
||||
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
|
||||
begin := time.Now()
|
||||
rows, err := r.db.Query(valueCtx, query, args...)
|
||||
duration := time.Since(begin)
|
||||
zap.L().Info("Time taken to execute time series percentage query", zap.String("query", query), zap.Any("args", args), zap.Duration("duration", duration))
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing cardinality query", zap.Error(err), zap.String("query", query))
|
||||
zap.L().Error("Error executing time series percentage query", zap.Error(err), zap.String("query", query))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var heatmap []metrics_explorer.TreeMapResponseItem
|
||||
var treemap []metrics_explorer.TreeMapResponseItem
|
||||
for rows.Next() {
|
||||
var item metrics_explorer.TreeMapResponseItem
|
||||
if err := rows.Scan(&item.MetricName, &item.TotalValue, &item.Percentage); err != nil {
|
||||
zap.L().Error("Error scanning row", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
heatmap = append(heatmap, item)
|
||||
treemap = append(treemap, item)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
@@ -6251,11 +6251,10 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
return &heatmap, nil
|
||||
return &treemap, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError) {
|
||||
var args []interface{}
|
||||
|
||||
conditions, _ := utils.BuildFilterConditions(&req.Filters, "ts")
|
||||
whereClause := ""
|
||||
@@ -6264,8 +6263,8 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
}
|
||||
|
||||
// Determine time range and tables to use
|
||||
start, end, tsTable, localTsTable := utils.WhichTSTableToUse(req.Start, req.EndD)
|
||||
sampleTable, countExp := utils.WhichSampleTableToUse(req.Start, req.EndD)
|
||||
start, end, tsTable, localTsTable := utils.WhichTSTableToUse(req.Start, req.End)
|
||||
sampleTable, countExp := utils.WhichSampleTableToUse(req.Start, req.End)
|
||||
|
||||
queryLimit := 50 + req.Limit
|
||||
metricsQuery := fmt.Sprintf(
|
||||
@@ -6284,9 +6283,12 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
)
|
||||
|
||||
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
|
||||
begin := time.Now()
|
||||
rows, err := r.db.Query(valueCtx, metricsQuery, start, end)
|
||||
duration := time.Since(begin)
|
||||
zap.L().Info("Time taken to execute samples percentage metric name query to reduce search space", zap.String("query", metricsQuery), zap.Any("start", start), zap.Any("end", end), zap.Duration("duration", duration))
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing metrics query", zap.Error(err))
|
||||
zap.L().Error("Error executing samples percentage query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
defer rows.Close()
|
||||
@@ -6317,7 +6319,6 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
|
||||
// Build optimized query with JOIN but `unix_milli` filter only on the sample table
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(fmt.Sprintf(
|
||||
`WITH TotalSamples AS (
|
||||
SELECT %s AS total_samples
|
||||
@@ -6338,8 +6339,14 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
countExp, signozMetricDBName, sampleTable, // Inner select samples
|
||||
))
|
||||
|
||||
var args []interface{}
|
||||
args = append(args,
|
||||
req.Start, req.End, // For total_samples subquery
|
||||
)
|
||||
|
||||
// Apply `unix_milli` filter **only** on the sample table (`dm`)
|
||||
sb.WriteString(` WHERE dm.unix_milli BETWEEN ? AND ?`)
|
||||
args = append(args, req.Start, req.End)
|
||||
|
||||
// Use JOIN instead of IN (subquery) when additional filters exist
|
||||
if whereClause != "" {
|
||||
@@ -6348,12 +6355,14 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
SELECT ts.fingerprint
|
||||
FROM %s.%s AS ts
|
||||
WHERE ts.metric_name IN (%s)
|
||||
AND unix_milli BETWEEN ? AND ?
|
||||
AND __normalized = true
|
||||
%s
|
||||
GROUP BY ts.fingerprint
|
||||
)`,
|
||||
signozMetricDBName, localTsTable, metricsList, whereClause,
|
||||
))
|
||||
args = append(args, start, end)
|
||||
}
|
||||
|
||||
// Apply metric filtering after all conditions
|
||||
@@ -6366,14 +6375,16 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
LIMIT ?;`,
|
||||
metricsList,
|
||||
))
|
||||
|
||||
args = append(args, req.Limit)
|
||||
sampleQuery := sb.String()
|
||||
|
||||
// Add start and end time to args (only for sample table)
|
||||
args = append(args, start, end, start, end, req.Limit)
|
||||
|
||||
begin = time.Now()
|
||||
// Execute the sample percentage query
|
||||
rows, err = r.db.Query(valueCtx, sampleQuery, args...)
|
||||
duration = time.Since(begin)
|
||||
zap.L().Info("Time taken to execute samples percentage query", zap.String("query", sampleQuery), zap.Any("args", args), zap.Duration("duration", duration))
|
||||
if err != nil {
|
||||
zap.L().Error("Error executing samples query", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
@@ -6381,21 +6392,21 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
|
||||
defer rows.Close()
|
||||
|
||||
// Process the results into a response slice
|
||||
var heatmap []metrics_explorer.TreeMapResponseItem
|
||||
var treemap []metrics_explorer.TreeMapResponseItem
|
||||
for rows.Next() {
|
||||
var item metrics_explorer.TreeMapResponseItem
|
||||
if err := rows.Scan(&item.TotalValue, &item.MetricName, &item.Percentage); err != nil {
|
||||
zap.L().Error("Error scanning row", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
heatmap = append(heatmap, item)
|
||||
treemap = append(treemap, item)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
zap.L().Error("Error iterating over sample rows", zap.Error(err))
|
||||
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
|
||||
}
|
||||
|
||||
return &heatmap, nil
|
||||
return &treemap, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetNameSimilarity(ctx context.Context, req *metrics_explorer.RelatedMetricsRequest) (map[string]metrics_explorer.RelatedMetricsScore, *model.ApiError) {
|
||||
|
||||
@@ -124,7 +124,7 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
}
|
||||
|
||||
// TODO(Raj): parameterized this in follow up changes
|
||||
agentVersion := "0.0.2"
|
||||
agentVersion := "0.0.3"
|
||||
|
||||
connectionUrl := fmt.Sprintf(
|
||||
"https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?",
|
||||
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
|
After Width: | Height: | Size: 371 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 6.0 KiB |
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,3 @@
|
||||
### Monitor Managed Streaming Kafka with SigNoz
|
||||
|
||||
Collect key MSK metrics and view them with an out of the box dashboard.
|
||||
@@ -145,11 +145,12 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
// todo(nitya): remove this once we fix agents in multitenancy
|
||||
defaultOrgID, err := ic.GetDefaultOrgID(ctx)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to get default org ID")
|
||||
// we don't want to fail the request if we can't get the default org ID
|
||||
// we will just return an empty list of pipelines
|
||||
zap.L().Warn("failed to get default org ID", zap.Error(err))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
fmt.Println("defaultOrgID", defaultOrgID)
|
||||
|
||||
if version >= 0 {
|
||||
savedPipelines, errors := ic.getPipelinesByVersion(ctx, defaultOrgID, version)
|
||||
if errors != nil {
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestPrepareTableQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = cumulative",
|
||||
@@ -93,7 +93,7 @@ func TestPrepareTableQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = avg, space aggregation = avg, temporality = unspecified, testing metrics and attribute name with dot",
|
||||
@@ -148,7 +148,7 @@ func TestPrepareTableQuery(t *testing.T) {
|
||||
},
|
||||
start: 1735295140000,
|
||||
end: 1735554340000,
|
||||
expectedQueryContains: "SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system.memory.usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1735257600000 AND unix_milli < 1735554340000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name IN ['system.memory.usage'] AND unix_milli >= 1735295140000 AND unix_milli < 1735554340000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC",
|
||||
expectedQueryContains: "SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system.memory.usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1735257600000 AND unix_milli < 1735554340000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name IN ['system.memory.usage'] AND unix_milli >= 1735295140000 AND unix_milli < 1735554340000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery)
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableFilter := fmt.Sprintf("metric_name IN %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedMetricNames(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableFilter := fmt.Sprintf("metric_name IN %s AND unix_milli >= %d AND unix_milli < %d AND bitAnd(flags, 1) = 0", utils.ClickHouseFormattedMetricNames(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
tableName := helpers.WhichSamplesTableToUse(start, end, mq)
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestPrepareTimeAggregationSubQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'service_name') != 'payment_service' AND JSONExtractString(labels, 'endpoint') IN ['/paycallback','/payme','/paypal']) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts",
|
||||
expectedQueryContains: "SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'service_name') != 'payment_service' AND JSONExtractString(labels, 'endpoint') IN ['/paycallback','/payme','/paypal']) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, temporality = cumulative",
|
||||
@@ -107,7 +107,7 @@ func TestPrepareTimeAggregationSubQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)",
|
||||
expectedQueryContains: "SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = cumulative",
|
||||
@@ -210,7 +210,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = avg, space aggregation = avg, temporality = unspecified, testing metrics and attribute name with dot",
|
||||
@@ -265,7 +265,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1735295140000,
|
||||
end: 1735554340000,
|
||||
expectedQueryContains: "SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system.memory.usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1735257600000 AND unix_milli < 1735554340000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name IN ['system.memory.usage'] AND unix_milli >= 1735295140000 AND unix_milli < 1735554340000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC",
|
||||
expectedQueryContains: "SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system.memory.usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1735257600000 AND unix_milli < 1735554340000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name IN ['system.memory.usage'] AND unix_milli >= 1735295140000 AND unix_milli < 1735554340000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestPrepareTableQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = delta",
|
||||
@@ -95,7 +95,7 @@ func TestPrepareTableQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = avg, temporality = delta, testing metrics and attribute name with dot",
|
||||
@@ -143,7 +143,7 @@ func TestPrepareTableQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT ts, avg(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['signoz.latency.sum'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'host.name') = '4f6ec470feea') as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz.latency.sum'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, avg(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['signoz.latency.sum'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'host.name') = '4f6ec470feea') as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz.latency.sum'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestPrepareTimeAggregationSubQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'service_name') != 'payment_service' AND JSONExtractString(labels, 'endpoint') IN ['/paycallback','/payme','/paypal']) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts",
|
||||
expectedQueryContains: "SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'service_name') != 'payment_service' AND JSONExtractString(labels, 'endpoint') IN ['/paycallback','/payme','/paypal']) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, temporality = delta",
|
||||
@@ -107,7 +107,7 @@ func TestPrepareTimeAggregationSubQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts",
|
||||
expectedQueryContains: "SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'state') != 'idle') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = delta",
|
||||
@@ -210,7 +210,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['http_requests'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['http_requests'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation percentile99, type = ExponentialHistogram",
|
||||
@@ -244,7 +244,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1701794980000,
|
||||
end: 1701796780000,
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, quantilesDDMerge(0.01, 0.990000)(sketch)[1] as value FROM signoz_metrics.distributed_exp_hist INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['signoz_latency'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, quantilesDDMerge(0.01, 0.990000)(sketch)[1] as value FROM signoz_metrics.distributed_exp_hist INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['signoz_latency'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency'] AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = max, temporality = delta, testing metrics and attribute name with dot",
|
||||
@@ -299,7 +299,7 @@ func TestPrepareTimeseriesQuery(t *testing.T) {
|
||||
},
|
||||
start: 1735036101000,
|
||||
end: 1735637901000,
|
||||
expectedQueryContains: "SELECT `host.name`, ts, max(per_series_value) as value FROM (SELECT fingerprint, any(`host.name`) as `host.name`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host.name') as `host.name`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz.latency.sum'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1734998400000 AND unix_milli < 1735637901000 AND JSONExtractString(labels, 'host_name') = '4f6ec470feea') as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz.latency.sum'] AND unix_milli >= 1735036101000 AND unix_milli < 1735637901000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY `host.name`, ts ORDER BY `host.name` ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT `host.name`, ts, max(per_series_value) as value FROM (SELECT fingerprint, any(`host.name`) as `host.name`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host.name') as `host.name`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz.latency.sum'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1734998400000 AND unix_milli < 1735637901000 AND JSONExtractString(labels, 'host_name') = '4f6ec470feea') as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz.latency.sum'] AND unix_milli >= 1735036101000 AND unix_milli < 1735637901000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY `host.name`, ts ORDER BY `host.name` ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery)
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableFilter := fmt.Sprintf("metric_name IN %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedMetricNames(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableFilter := fmt.Sprintf("metric_name IN %s AND unix_milli >= %d AND unix_milli < %d AND bitAnd(flags, 1) = 0", utils.ClickHouseFormattedMetricNames(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
tableName := helpers.WhichSamplesTableToUse(start, end, mq)
|
||||
|
||||
@@ -83,7 +83,7 @@ func prepareQueryOptimized(start, end, step int64, mq *v3.BuilderQuery) (string,
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableFilter := fmt.Sprintf("metric_name IN %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedMetricNames(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableFilter := fmt.Sprintf("metric_name IN %s AND unix_milli >= %d AND unix_milli < %d AND bitAnd(flags, 1) = 0", utils.ClickHouseFormattedMetricNames(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
tableName := helpers.WhichSamplesTableToUse(start, end, mq)
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ func TestPrepareMetricQueryCumulativeRatePreAgg(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = cumulative, multiple group by",
|
||||
@@ -82,7 +82,7 @@ func TestPrepareMetricQueryCumulativeRatePreAgg(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, endpoint, ts, sum(per_series_value) as value FROM (SELECT service_name, endpoint, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(endpoint) as endpoint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'endpoint') as endpoint, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, endpoint, ts ORDER BY service_name ASC, endpoint ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, endpoint, ts, sum(per_series_value) as value FROM (SELECT service_name, endpoint, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(endpoint) as endpoint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'endpoint') as endpoint, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, endpoint, ts ORDER BY service_name ASC, endpoint ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestPrepareMetricQueryDeltaRatePreAgg(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = delta, group by service_name",
|
||||
@@ -149,7 +149,7 @@ func TestPrepareMetricQueryDeltaRatePreAgg(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -204,7 +204,7 @@ func TestPrepreMetricQueryCumulativeQuantilePreAgg(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test temporality = cumulative, quantile = 0.99 without group by",
|
||||
@@ -234,7 +234,7 @@ func TestPrepreMetricQueryCumulativeQuantilePreAgg(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ func TestPrepreMetricQueryDeltaQuantilePreAgg(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test temporality = delta, quantile = 0.99 no group by",
|
||||
@@ -319,7 +319,7 @@ func TestPrepreMetricQueryDeltaQuantilePreAgg(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ func TestPrepareMetricQueryGaugePreAgg(t *testing.T) {
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test gauge query with group by host_name",
|
||||
@@ -386,7 +386,7 @@ func TestPrepareMetricQueryGaugePreAgg(t *testing.T) {
|
||||
Expression: "A",
|
||||
Disabled: false,
|
||||
},
|
||||
expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -194,7 +194,7 @@ func TestPrepareMetricQueryCumulativeRate(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = cumulative, multiple group by",
|
||||
@@ -227,7 +227,7 @@ func TestPrepareMetricQueryCumulativeRate(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, endpoint, ts, sum(per_series_value) as value FROM (SELECT service_name, endpoint, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(endpoint) as endpoint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'endpoint') as endpoint, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, endpoint, ts ORDER BY service_name ASC, endpoint ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, endpoint, ts, sum(per_series_value) as value FROM (SELECT service_name, endpoint, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(endpoint) as endpoint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'endpoint') as endpoint, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, endpoint, ts ORDER BY service_name ASC, endpoint ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ func TestPrepareMetricQueryDeltaRate(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test time aggregation = rate, space aggregation = sum, temporality = delta, group by service_name",
|
||||
@@ -294,7 +294,7 @@ func TestPrepareMetricQueryDeltaRate(t *testing.T) {
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_calls_total'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_calls_total'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -349,7 +349,7 @@ func TestPrepreMetricQueryCumulativeQuantile(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test temporality = cumulative, quantile = 0.99 without group by",
|
||||
@@ -379,7 +379,7 @@ func TestPrepreMetricQueryCumulativeQuantile(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -434,7 +434,7 @@ func TestPrepreMetricQueryDeltaQuantile(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test temporality = delta, quantile = 0.99 no group by",
|
||||
@@ -464,7 +464,7 @@ func TestPrepreMetricQueryDeltaQuantile(t *testing.T) {
|
||||
Disabled: false,
|
||||
SpaceAggregation: v3.SpaceAggregationPercentile99,
|
||||
},
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['signoz_latency_bucket'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name IN ['signoz_latency_bucket'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -505,7 +505,7 @@ func TestPrepareMetricQueryGauge(t *testing.T) {
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test value filter with string value",
|
||||
@@ -562,7 +562,7 @@ func TestPrepareMetricQueryGauge(t *testing.T) {
|
||||
Expression: "A",
|
||||
Disabled: false,
|
||||
},
|
||||
expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system_cpu_usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name IN ['system_cpu_usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC",
|
||||
},
|
||||
{
|
||||
name: "test gauge query with multiple group by with metric and attribute name containing dot",
|
||||
@@ -631,7 +631,7 @@ func TestPrepareMetricQueryGauge(t *testing.T) {
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
Having: []v3.Having{},
|
||||
},
|
||||
expectedQueryContains: "SELECT `os.type`, state, `host.name`, ts, max(per_series_value) as value FROM (SELECT fingerprint, any(`os.type`) as `os.type`, any(state) as state, any(`host.name`) as `host.name`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, JSONExtractString(labels, 'state') as state, JSONExtractString(labels, 'host.name') as `host.name`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system.memory.usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name IN ['system.memory.usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY `os.type`, state, `host.name`, ts ORDER BY `os.type` desc, state asc, `host.name` ASC, ts ASC",
|
||||
expectedQueryContains: "SELECT `os.type`, state, `host.name`, ts, max(per_series_value) as value FROM (SELECT fingerprint, any(`os.type`) as `os.type`, any(state) as state, any(`host.name`) as `host.name`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, JSONExtractString(labels, 'state') as state, JSONExtractString(labels, 'host.name') as `host.name`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name IN ['system.memory.usage'] AND temporality = 'Unspecified' AND __normalized = true AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name IN ['system.memory.usage'] AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY `os.type`, state, `host.name`, ts ORDER BY `os.type` desc, state asc, `host.name` ASC, ts ASC",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -711,7 +711,7 @@ func TestPrepareMetricQueryValueTypePanelWithGroupBY(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedQueryContains: "SELECT max(value) as aggregated_value, ts FROM (SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, anyLast(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1735891200000 AND unix_milli < 1735894800000 AND JSONExtractString(labels, 'os_type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1735891800000 AND unix_milli < 1735894800000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC) GROUP BY ts ORDER BY ts",
|
||||
expectedQueryContains: "SELECT max(value) as aggregated_value, ts FROM (SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, anyLast(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Delta' AND __normalized = true AND unix_milli >= 1735891200000 AND unix_milli < 1735894800000 AND JSONExtractString(labels, 'os_type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1735891800000 AND unix_milli < 1735894800000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "test temporality = cumulative, panel = value, series agg = max group by state, host_name",
|
||||
@@ -777,7 +777,7 @@ func TestPrepareMetricQueryValueTypePanelWithGroupBY(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedQueryContains: "SELECT max(value) as aggregated_value, ts FROM (SELECT state, host_name, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, anyLast(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1735891200000 AND unix_milli < 1735894800000 AND JSONExtractString(labels, 'os_type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1735891800000 AND unix_milli < 1735894800000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, host_name, ts ORDER BY state desc, host_name ASC, ts ASC) GROUP BY ts ORDER BY ts",
|
||||
expectedQueryContains: "SELECT max(value) as aggregated_value, ts FROM (SELECT state, host_name, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, anyLast(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name IN ['system_memory_usage'] AND temporality = 'Cumulative' AND __normalized = true AND unix_milli >= 1735891200000 AND unix_milli < 1735894800000 AND JSONExtractString(labels, 'os_type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name IN ['system_memory_usage'] AND unix_milli >= 1735891800000 AND unix_milli < 1735894800000 AND bitAnd(flags, 1) = 0 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, host_name, ts ORDER BY state desc, host_name ASC, ts ASC) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -225,21 +225,21 @@ func (receiver *SummaryService) GetMetricsTreemap(ctx context.Context, params *m
|
||||
var response metrics_explorer.TreeMap
|
||||
switch params.Treemap {
|
||||
case metrics_explorer.TimeSeriesTeeMap:
|
||||
cardinality, apiError := receiver.reader.GetMetricsTimeSeriesPercentage(ctx, params)
|
||||
ts, apiError := receiver.reader.GetMetricsTimeSeriesPercentage(ctx, params)
|
||||
if apiError != nil {
|
||||
return nil, apiError
|
||||
}
|
||||
if cardinality != nil {
|
||||
response.TimeSeries = *cardinality
|
||||
if ts != nil {
|
||||
response.TimeSeries = *ts
|
||||
}
|
||||
return &response, nil
|
||||
case metrics_explorer.SamplesTreeMap:
|
||||
dataPoints, apiError := receiver.reader.GetMetricsSamplesPercentage(ctx, params)
|
||||
samples, apiError := receiver.reader.GetMetricsSamplesPercentage(ctx, params)
|
||||
if apiError != nil {
|
||||
return nil, apiError
|
||||
}
|
||||
if dataPoints != nil {
|
||||
response.Samples = *dataPoints
|
||||
if samples != nil {
|
||||
response.Samples = *samples
|
||||
}
|
||||
return &response, nil
|
||||
default:
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
promModel "github.com/prometheus/common/model"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metrics"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
@@ -34,6 +35,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
querytemplate "github.com/SigNoz/signoz/pkg/query-service/utils/queryTemplate"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
chVariables "github.com/SigNoz/signoz/pkg/variables/clickhouse"
|
||||
)
|
||||
|
||||
var allowedFunctions = []string{"count", "ratePerSec", "sum", "avg", "min", "max", "p50", "p90", "p95", "p99"}
|
||||
@@ -841,6 +843,29 @@ func validateExpressions(expressions []string, funcs map[string]govaluate.Expres
|
||||
return errs
|
||||
}
|
||||
|
||||
// chTransformQuery transforms the clickhouse query with the given variables
|
||||
// it is used to check what would be the query if variables are selected as __all__.
|
||||
// for now, this is just a pass through, but in the future, we will use it to
|
||||
// dashboard variables
|
||||
// TODO(srikanthccv): version based query replacement
|
||||
func chTransformQuery(query string, variables map[string]interface{}) {
|
||||
varsForTransform := make([]chVariables.VariableValue, 0, len(variables))
|
||||
for name := range variables {
|
||||
varsForTransform = append(varsForTransform, chVariables.VariableValue{
|
||||
Name: name,
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
})
|
||||
}
|
||||
transformer := chVariables.NewQueryTransformer(query, varsForTransform)
|
||||
transformedQuery, err := transformer.Transform()
|
||||
if err != nil {
|
||||
zap.L().Warn("failed to transform clickhouse query", zap.Error(err))
|
||||
}
|
||||
zap.L().Info("transformed clickhouse query", zap.String("transformedQuery", transformedQuery), zap.String("originalQuery", query))
|
||||
}
|
||||
|
||||
func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiError) {
|
||||
|
||||
var queryRangeParams *v3.QueryRangeParamsV3
|
||||
@@ -979,6 +1004,7 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
continue
|
||||
}
|
||||
|
||||
chTransformQuery(chQuery.Query, queryRangeParams.Variables)
|
||||
for name, value := range queryRangeParams.Variables {
|
||||
chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("{{%s}}", name), fmt.Sprint(value), -1)
|
||||
chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("[[%s]]", name), fmt.Sprint(value), -1)
|
||||
|
||||
@@ -89,13 +89,13 @@ func (aH *APIHandler) GetTreeMap(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
params, apiError := explorer.ParseTreeMapMetricsParams(r)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error parsing heatmap metric params", zap.Error(apiError.Err))
|
||||
zap.L().Error("error parsing tree map metric params", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
result, apiError := aH.SummaryService.GetMetricsTreemap(ctx, params)
|
||||
if apiError != nil {
|
||||
zap.L().Error("error getting heatmap data", zap.Error(apiError.Err))
|
||||
zap.L().Error("error getting tree map data", zap.Error(apiError.Err))
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ const AlertChannelOpsgenie = "ALERT_CHANNEL_OPSGENIE"
|
||||
const AlertChannelEmail = "ALERT_CHANNEL_EMAIL"
|
||||
const AnomalyDetection = "ANOMALY_DETECTION"
|
||||
const HostsInfraMonitoring = "HOSTS_INFRA_MONITORING"
|
||||
const TraceFunnels = "TRACE_FUNNELS"
|
||||
|
||||
var BasicPlan = FeatureSet{
|
||||
Feature{
|
||||
@@ -124,4 +125,11 @@ var BasicPlan = FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ type SummaryListMetricsRequest struct {
|
||||
Limit int `json:"limit"`
|
||||
OrderBy v3.OrderBy `json:"orderBy"`
|
||||
Start int64 `json:"start"`
|
||||
EndD int64 `json:"end"`
|
||||
End int64 `json:"end"`
|
||||
Filters v3.FilterSet `json:"filters"`
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ type TreeMapMetricsRequest struct {
|
||||
Limit int `json:"limit"`
|
||||
Treemap TreeMapType `json:"treemap"`
|
||||
Start int64 `json:"start"`
|
||||
EndD int64 `json:"end"`
|
||||
End int64 `json:"end"`
|
||||
Filters v3.FilterSet `json:"filters"`
|
||||
}
|
||||
|
||||
|
||||
@@ -740,7 +740,7 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEma
|
||||
|
||||
// zap.L().Info(data)
|
||||
properties := analytics.NewProperties()
|
||||
properties.Set("version", version.Info.Version)
|
||||
properties.Set("version", version.Info.Version())
|
||||
properties.Set("deploymentType", getDeploymentType())
|
||||
properties.Set("companyDomain", a.getCompanyDomain())
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigration"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlitesqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
@@ -39,7 +38,6 @@ func NewSQLStoreProviderFactories() factory.NamedMap[factory.ProviderFactory[sql
|
||||
hook := sqlstorehook.NewLoggingFactory()
|
||||
return factory.MustNewNamedMap(
|
||||
sqlitesqlstore.NewFactory(hook),
|
||||
postgressqlstore.NewFactory(hook),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
511
pkg/variables/clickhouse/processor.go
Normal file
511
pkg/variables/clickhouse/processor.go
Normal file
@@ -0,0 +1,511 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AfterShip/clickhouse-sql-parser/parser"
|
||||
)
|
||||
|
||||
// FilterAction represents what to do with a filter containing a variable
|
||||
type FilterAction int
|
||||
|
||||
const (
|
||||
// KeepFilter maintains the original filter
|
||||
KeepFilter FilterAction = iota
|
||||
// RemoveFilter completely removes the filter
|
||||
RemoveFilter
|
||||
// ReplaceWithExistsCheck replaces filter with an EXISTS check
|
||||
ReplaceWithExistsCheck
|
||||
)
|
||||
|
||||
// FilterTransformer defines the callback function that decides
|
||||
// what to do with a filter containing a variable
|
||||
type FilterTransformer func(variableName string, expr parser.Expr) FilterAction
|
||||
|
||||
// QueryProcessor handles ClickHouse query modifications
|
||||
type QueryProcessor struct {
|
||||
}
|
||||
|
||||
// NewQueryProcessor creates a new processor
|
||||
func NewQueryProcessor() *QueryProcessor {
|
||||
return &QueryProcessor{}
|
||||
}
|
||||
|
||||
// ProcessQuery finds variables in WHERE clauses and modifies them according to the transformer function
|
||||
func (qp *QueryProcessor) ProcessQuery(query string, transformer FilterTransformer) (string, error) {
|
||||
p := parser.NewParser(query)
|
||||
stmts, err := p.ParseStmts()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse query: %w", err)
|
||||
}
|
||||
|
||||
if len(stmts) == 0 {
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// Look for SELECT statements
|
||||
modified := false
|
||||
for i, stmt := range stmts {
|
||||
selectQuery, ok := stmt.(*parser.SelectQuery)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
whereModified, err := qp.processWhereClause(selectQuery, transformer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if whereModified {
|
||||
modified = true
|
||||
stmts[i] = selectQuery
|
||||
}
|
||||
}
|
||||
|
||||
if !modified {
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// Reconstruct the query
|
||||
var resultBuilder strings.Builder
|
||||
for _, stmt := range stmts {
|
||||
resultBuilder.WriteString(stmt.String())
|
||||
resultBuilder.WriteString(";")
|
||||
}
|
||||
|
||||
return resultBuilder.String(), nil
|
||||
}
|
||||
|
||||
// processWhereClause processes the WHERE clause in a SELECT statement
|
||||
func (qp *QueryProcessor) processWhereClause(selectQuery *parser.SelectQuery, transformer FilterTransformer) (bool, error) {
|
||||
// First, process any subqueries in the FROM clause
|
||||
if selectQuery.From != nil {
|
||||
subQueryModified, err := qp.processFromClauseSubqueries(selectQuery.From, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if subQueryModified {
|
||||
// Mark as modified if any subqueries were modified
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Then process the main WHERE clause
|
||||
if selectQuery.Where == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Process the WHERE expression, which may include subqueries
|
||||
modified := false
|
||||
newExpr, hasChanged, err := qp.transformExpr(selectQuery.Where.Expr, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if hasChanged {
|
||||
modified = true
|
||||
if newExpr == nil {
|
||||
// If the entire WHERE clause is removed
|
||||
selectQuery.Where = nil
|
||||
} else {
|
||||
selectQuery.Where.Expr = newExpr
|
||||
}
|
||||
}
|
||||
|
||||
return modified, nil
|
||||
}
|
||||
|
||||
// processFromClauseSubqueries recursively processes subqueries in the FROM clause
|
||||
func (qp *QueryProcessor) processFromClauseSubqueries(fromClause *parser.FromClause, transformer FilterTransformer) (bool, error) {
|
||||
if fromClause == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return qp.processExprSubqueries(fromClause.Expr, transformer)
|
||||
}
|
||||
|
||||
// processExprSubqueries processes subqueries found in expressions
|
||||
func (qp *QueryProcessor) processExprSubqueries(expr parser.Expr, transformer FilterTransformer) (bool, error) {
|
||||
if expr == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
modified := false
|
||||
|
||||
switch e := expr.(type) {
|
||||
case *parser.SubQuery:
|
||||
// Process the subquery's SELECT statement
|
||||
if e.Select != nil {
|
||||
subQueryModified, err := qp.processWhereClause(e.Select, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if subQueryModified {
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
|
||||
case *parser.BinaryOperation:
|
||||
// Check left and right expressions for subqueries
|
||||
leftModified, err := qp.processExprSubqueries(e.LeftExpr, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
rightModified, err := qp.processExprSubqueries(e.RightExpr, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if leftModified || rightModified {
|
||||
modified = true
|
||||
}
|
||||
|
||||
case *parser.JoinExpr:
|
||||
// Process both sides of the join
|
||||
leftModified, err := qp.processExprSubqueries(e.Left, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
rightModified, err := qp.processExprSubqueries(e.Right, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Process join constraints if any
|
||||
constraintsModified, err := qp.processExprSubqueries(e.Constraints, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if leftModified || rightModified || constraintsModified {
|
||||
modified = true
|
||||
}
|
||||
|
||||
case *parser.TableExpr:
|
||||
// Process any subqueries in the table expression
|
||||
return qp.processExprSubqueries(e.Expr, transformer)
|
||||
|
||||
case *parser.AliasExpr:
|
||||
// Check if the aliased expression contains a subquery
|
||||
return qp.processExprSubqueries(e.Expr, transformer)
|
||||
|
||||
case *parser.FunctionExpr:
|
||||
// Check function parameters for subqueries
|
||||
if e.Params != nil && e.Params.Items != nil {
|
||||
for _, item := range e.Params.Items.Items {
|
||||
itemModified, err := qp.processExprSubqueries(item, transformer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if itemModified {
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return modified, nil
|
||||
}
|
||||
|
||||
// transformExpr recursively processes expressions in the WHERE clause
|
||||
func (qp *QueryProcessor) transformExpr(expr parser.Expr, transformer FilterTransformer) (parser.Expr, bool, error) {
|
||||
if expr == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Handle different expression types
|
||||
switch e := expr.(type) {
|
||||
case *parser.SubQuery:
|
||||
// Handle subqueries like "column IN (SELECT...)"
|
||||
if e.Select != nil {
|
||||
modified, err := qp.processWhereClause(e.Select, transformer)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return expr, modified, nil
|
||||
}
|
||||
|
||||
case *parser.BinaryOperation:
|
||||
// Handle IN with a subquery on the right
|
||||
if e.Operation == "IN" || e.Operation == "NOT IN" {
|
||||
_, rightIsSubQuery := e.RightExpr.(*parser.SubQuery)
|
||||
if rightIsSubQuery {
|
||||
// If right side is a subquery, check if left side has variables
|
||||
leftVars := qp.findVariables(e.LeftExpr)
|
||||
if len(leftVars) > 0 {
|
||||
// Apply action to the entire IN clause
|
||||
action := transformer(leftVars[0], expr)
|
||||
switch action {
|
||||
case RemoveFilter:
|
||||
return nil, true, nil
|
||||
case ReplaceWithExistsCheck:
|
||||
return qp.createExistsCheck(expr, leftVars[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Process the subquery separately (regardless of whether we modified based on left side)
|
||||
newRight, rightChanged, err := qp.transformExpr(e.RightExpr, transformer)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if rightChanged {
|
||||
return &parser.BinaryOperation{
|
||||
LeftExpr: e.LeftExpr,
|
||||
Operation: e.Operation,
|
||||
RightExpr: newRight,
|
||||
HasGlobal: e.HasGlobal,
|
||||
HasNot: e.HasNot,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// If no changes, return the original
|
||||
return expr, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this specific binary operation directly contains a variable
|
||||
leftVars := qp.findVariables(e.LeftExpr)
|
||||
rightVars := qp.findVariables(e.RightExpr)
|
||||
|
||||
// If this is a direct filter with a variable (e.g., "column = $var")
|
||||
// and not a complex expression, handle it directly
|
||||
if len(leftVars) > 0 && len(rightVars) == 0 &&
|
||||
!qp.isComplexExpression(e.LeftExpr) && !qp.isComplexExpression(e.RightExpr) {
|
||||
action := transformer(leftVars[0], expr)
|
||||
switch action {
|
||||
case RemoveFilter:
|
||||
return nil, true, nil
|
||||
case ReplaceWithExistsCheck:
|
||||
return qp.createExistsCheck(expr, leftVars[0])
|
||||
}
|
||||
} else if len(rightVars) > 0 && len(leftVars) == 0 &&
|
||||
!qp.isComplexExpression(e.LeftExpr) && !qp.isComplexExpression(e.RightExpr) {
|
||||
action := transformer(rightVars[0], expr)
|
||||
switch action {
|
||||
case RemoveFilter:
|
||||
return nil, true, nil
|
||||
case ReplaceWithExistsCheck:
|
||||
return qp.createExistsCheck(expr, rightVars[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, recursively process left and right sides
|
||||
newLeft, leftChanged, err := qp.transformExpr(e.LeftExpr, transformer)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
newRight, rightChanged, err := qp.transformExpr(e.RightExpr, transformer)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if leftChanged || rightChanged {
|
||||
if e.Operation == "AND" {
|
||||
// For AND operations, if either side is nil (removed), we can simplify
|
||||
if newLeft == nil {
|
||||
return newRight, true, nil
|
||||
}
|
||||
if newRight == nil {
|
||||
return newLeft, true, nil
|
||||
}
|
||||
} else if (newLeft == nil || newRight == nil) &&
|
||||
(e.Operation == "=" || e.Operation == "IN" ||
|
||||
e.Operation == "<" || e.Operation == ">" ||
|
||||
e.Operation == "<=" || e.Operation == ">=") {
|
||||
// For direct comparison operations, if one side is removed, remove the entire expression
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// Create a new binary operation with the modified sides
|
||||
return &parser.BinaryOperation{
|
||||
LeftExpr: newLeft,
|
||||
Operation: e.Operation,
|
||||
RightExpr: newRight,
|
||||
HasGlobal: e.HasGlobal,
|
||||
HasNot: e.HasNot,
|
||||
}, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// For other expression types that may contain variables
|
||||
variables := qp.findVariables(expr)
|
||||
if len(variables) > 0 && !qp.isComplexExpression(expr) {
|
||||
action := transformer(variables[0], expr)
|
||||
switch action {
|
||||
case RemoveFilter:
|
||||
return nil, true, nil
|
||||
case ReplaceWithExistsCheck:
|
||||
return qp.createExistsCheck(expr, variables[0])
|
||||
}
|
||||
}
|
||||
|
||||
return expr, false, nil
|
||||
}
|
||||
|
||||
// isComplexExpression checks if an expression contains nested operations
|
||||
// that should not be treated as a simple variable reference
|
||||
func (qp *QueryProcessor) isComplexExpression(expr parser.Expr) bool {
|
||||
switch e := expr.(type) {
|
||||
case *parser.BinaryOperation:
|
||||
// If it's a binary operation, it's complex
|
||||
return true
|
||||
case *parser.FunctionExpr:
|
||||
// If it's a function, examine its parameters
|
||||
if e.Params != nil && e.Params.Items != nil {
|
||||
for _, item := range e.Params.Items.Items {
|
||||
if qp.isComplexExpression(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findVariables finds all variables in an expression
|
||||
func (qp *QueryProcessor) findVariables(expr parser.Expr) []string {
|
||||
var variables []string
|
||||
|
||||
if expr == nil {
|
||||
return variables
|
||||
}
|
||||
|
||||
switch e := expr.(type) {
|
||||
case *parser.Ident:
|
||||
// we should identify the following ways of using variables
|
||||
// whitespace at the end or beginning of the variable name
|
||||
// should be trimmed
|
||||
// {{.variable_name}}, {{ .variable_name }}, {{ .variable_name}}
|
||||
// $variable_name
|
||||
// [[variable_name]], [[ variable_name]], [[ variable_name ]]
|
||||
// {{variable_name}}, {{ variable_name }}, {{variable_name }}
|
||||
|
||||
if strings.HasPrefix(e.Name, "$") {
|
||||
variables = append(variables, e.Name[1:]) // Remove the $ prefix
|
||||
}
|
||||
|
||||
case *parser.BinaryOperation:
|
||||
variables = append(variables, qp.findVariables(e.LeftExpr)...)
|
||||
variables = append(variables, qp.findVariables(e.RightExpr)...)
|
||||
case *parser.FunctionExpr:
|
||||
if e.Params != nil && e.Params.Items != nil {
|
||||
for _, item := range e.Params.Items.Items {
|
||||
variables = append(variables, qp.findVariables(item)...)
|
||||
}
|
||||
}
|
||||
case *parser.ColumnExpr:
|
||||
variables = append(variables, qp.findVariables(e.Expr)...)
|
||||
case *parser.ParamExprList:
|
||||
if e.Items != nil {
|
||||
for _, item := range e.Items.Items {
|
||||
variables = append(variables, qp.findVariables(item)...)
|
||||
}
|
||||
}
|
||||
case *parser.SelectItem:
|
||||
variables = append(variables, qp.findVariables(e.Expr)...)
|
||||
case *parser.IndexOperation:
|
||||
variables = append(variables, qp.findVariables(e.Object)...)
|
||||
variables = append(variables, qp.findVariables(e.Index)...)
|
||||
}
|
||||
|
||||
return variables
|
||||
}
|
||||
|
||||
// createExistsCheck creates an EXISTS check for a column/map field
|
||||
func (qp *QueryProcessor) createExistsCheck(expr parser.Expr, _ string) (parser.Expr, bool, error) {
|
||||
switch e := expr.(type) {
|
||||
case *parser.BinaryOperation:
|
||||
// Handle map field access like "attributes['http.method'] = $http_method"
|
||||
if indexOp, ok := e.LeftExpr.(*parser.IndexOperation); ok {
|
||||
// Create a "has" function check for maps
|
||||
functionName := &parser.Ident{
|
||||
Name: "has",
|
||||
}
|
||||
|
||||
// Create function parameters with the map and the key
|
||||
params := &parser.ParamExprList{
|
||||
Items: &parser.ColumnExprList{
|
||||
Items: []parser.Expr{
|
||||
indexOp.Object, // The map name (e.g., "attributes")
|
||||
indexOp.Index, // The key (e.g., "'http.method'")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &parser.FunctionExpr{
|
||||
Name: functionName,
|
||||
Params: params,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// Handle direct field comparisons like "field = $variable"
|
||||
if ident, ok := e.LeftExpr.(*parser.Ident); ok && !strings.HasPrefix(ident.Name, "$") {
|
||||
// For regular columns, we might want to check if the column exists or has a non-null value
|
||||
functionName := &parser.Ident{
|
||||
Name: "isNotNull",
|
||||
}
|
||||
|
||||
// Create function parameters
|
||||
params := &parser.ParamExprList{
|
||||
Items: &parser.ColumnExprList{
|
||||
Items: []parser.Expr{
|
||||
ident, // The field name
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &parser.FunctionExpr{
|
||||
Name: functionName,
|
||||
Params: params,
|
||||
}, true, nil
|
||||
} else if ident, ok := e.RightExpr.(*parser.Ident); ok && !strings.HasPrefix(ident.Name, "$") {
|
||||
// For regular columns, but variable is on the left
|
||||
functionName := &parser.Ident{
|
||||
Name: "isNotNull",
|
||||
}
|
||||
|
||||
params := &parser.ParamExprList{
|
||||
Items: &parser.ColumnExprList{
|
||||
Items: []parser.Expr{
|
||||
ident, // The field name
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &parser.FunctionExpr{
|
||||
Name: functionName,
|
||||
Params: params,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// Handle IN clauses like "field IN ($variables)"
|
||||
if e.Operation == "IN" || e.Operation == "NOT IN" {
|
||||
if ident, ok := e.LeftExpr.(*parser.Ident); ok && !strings.HasPrefix(ident.Name, "$") {
|
||||
// For IN clauses, we might just check if the field exists
|
||||
functionName := &parser.Ident{
|
||||
Name: "isNotNull",
|
||||
}
|
||||
|
||||
params := &parser.ParamExprList{
|
||||
Items: &parser.ColumnExprList{
|
||||
Items: []parser.Expr{
|
||||
ident, // The field name
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &parser.FunctionExpr{
|
||||
Name: functionName,
|
||||
Params: params,
|
||||
}, true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't transform it to an EXISTS check, keep the original
|
||||
return expr, false, nil
|
||||
}
|
||||
210
pkg/variables/clickhouse/transfor_test.go
Normal file
210
pkg/variables/clickhouse/transfor_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
sql string
|
||||
variables []VariableValue
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Example 1: Only service_name is __all__",
|
||||
sql: `SELECT trace_id, name, kind, status
|
||||
FROM signoz_traces.signoz_index_v3
|
||||
WHERE service_name = $service_name
|
||||
AND operation_name IN ($operation_names)
|
||||
AND duration >= $min_duration
|
||||
AND attributes['http.method'] = $http_method
|
||||
AND op IN (SELECT op FROM signoz_traces.operations WHERE service_name = $service_name AND kind = 'server')
|
||||
AND timestamp BETWEEN $start_time AND $end_time`,
|
||||
|
||||
// Define our variables and their values/metadata
|
||||
variables: []VariableValue{
|
||||
{
|
||||
Name: "service_name",
|
||||
Values: []string{"__all__"}, // User selected "__all__"
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "operation_names",
|
||||
Values: []string{"op1", "op2", "op3"},
|
||||
IsSelectAll: false, // User selected specific values
|
||||
FieldType: "array",
|
||||
},
|
||||
{
|
||||
Name: "min_duration",
|
||||
Values: []string{"100"},
|
||||
IsSelectAll: false,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "http_method",
|
||||
Values: []string{"GET", "POST"},
|
||||
IsSelectAll: false,
|
||||
FieldType: "map", // This is a map lookup
|
||||
},
|
||||
{
|
||||
Name: "start_time",
|
||||
Values: []string{"2023-01-01 00:00:00"},
|
||||
IsSelectAll: false,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "end_time",
|
||||
Values: []string{"2023-01-02 00:00:00"},
|
||||
IsSelectAll: false,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
},
|
||||
expected: `SELECT trace_id, name, kind, status FROM signoz_traces.signoz_index_v3 WHERE operation_name IN ($operation_names) AND duration >= $min_duration AND attributes['http.method'] = $http_method AND op IN (SELECT op FROM signoz_traces.operations WHERE kind = 'server') AND timestamp BETWEEN $start_time AND $end_time;`,
|
||||
},
|
||||
{
|
||||
name: "Example 2: Multiple __all__ selections and map lookups",
|
||||
sql: `SELECT trace_id, name, kind, status
|
||||
FROM signoz_traces.signoz_index_v3
|
||||
WHERE service_name = $service_name
|
||||
AND operation_name IN ($operation_names)
|
||||
AND duration >= $min_duration
|
||||
AND attributes['http.method'] = $http_method
|
||||
AND op IN (SELECT op FROM signoz_traces.operations WHERE service_name = $service_name AND kind = 'server')
|
||||
AND timestamp BETWEEN $start_time AND $end_time`,
|
||||
|
||||
variables: []VariableValue{
|
||||
{
|
||||
Name: "service_name",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "operation_names",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "array",
|
||||
},
|
||||
{
|
||||
Name: "min_duration",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "http_method",
|
||||
Values: []string{"GET", "POST"},
|
||||
IsSelectAll: false,
|
||||
FieldType: "map",
|
||||
},
|
||||
{
|
||||
Name: "start_time",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "end_time",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
},
|
||||
expected: `SELECT trace_id, name, kind, status FROM signoz_traces.signoz_index_v3 WHERE attributes['http.method'] = $http_method AND op IN (SELECT op FROM signoz_traces.operations WHERE kind = 'server') AND timestamp BETWEEN $start_time AND $end_time;`,
|
||||
},
|
||||
{
|
||||
name: "Example 3: Multiple __all__ selections and map lookups",
|
||||
sql: `SELECT trace_id, name, kind, status
|
||||
FROM signoz_traces.signoz_index_v3
|
||||
WHERE service_name = $service_name
|
||||
AND operation_name IN ($operation_names)
|
||||
AND duration >= $min_duration
|
||||
AND attributes['http.method'] = $http_method
|
||||
AND op IN (SELECT op FROM signoz_traces.operations WHERE service_name = $service_name AND kind = 'server')
|
||||
AND timestamp BETWEEN $start_time AND $end_time`,
|
||||
|
||||
variables: []VariableValue{
|
||||
{
|
||||
Name: "service_name",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "operation_names",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "array",
|
||||
},
|
||||
{
|
||||
Name: "min_duration",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "http_method",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "map",
|
||||
},
|
||||
},
|
||||
expected: `SELECT trace_id, name, kind, status FROM signoz_traces.signoz_index_v3 WHERE op IN (SELECT op FROM signoz_traces.operations WHERE kind = 'server') AND timestamp BETWEEN $start_time AND $end_time;`,
|
||||
},
|
||||
{
|
||||
name: "Example 3: Multiple __all__ selections and map lookups",
|
||||
sql: `SELECT trace_id, name, kind, status
|
||||
FROM signoz_traces.signoz_index_v3
|
||||
WHERE service_name = {{service_name}}
|
||||
AND operation_name IN {{.operation_names}}
|
||||
AND duration >= {{min_duration}}
|
||||
AND attributes['http.method'] = $http_method
|
||||
AND op IN (SELECT op FROM signoz_traces.operations WHERE service_name = {{service_name}} AND kind = 'server')`,
|
||||
|
||||
variables: []VariableValue{
|
||||
{
|
||||
Name: "service_name",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "operation_names",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "array",
|
||||
},
|
||||
{
|
||||
Name: "min_duration",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "scalar",
|
||||
},
|
||||
{
|
||||
Name: "http_method",
|
||||
Values: []string{"__all__"},
|
||||
IsSelectAll: true,
|
||||
FieldType: "map",
|
||||
},
|
||||
},
|
||||
expected: `SELECT trace_id, name, kind, status FROM signoz_traces.signoz_index_v3 WHERE op IN (SELECT op FROM signoz_traces.operations WHERE kind = 'server');`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
transformer := NewQueryTransformer(testCase.sql, testCase.variables)
|
||||
modifiedQuery, err := transformer.Transform()
|
||||
if err != nil {
|
||||
t.Fatalf("Error transforming query: %v", err)
|
||||
}
|
||||
|
||||
if modifiedQuery != testCase.expected {
|
||||
t.Errorf("Expected transformed query to be:\n%s\nBut got:\n%s", testCase.expected, modifiedQuery)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
74
pkg/variables/clickhouse/transform.go
Normal file
74
pkg/variables/clickhouse/transform.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AfterShip/clickhouse-sql-parser/parser"
|
||||
)
|
||||
|
||||
// VariableValue represents a variable's assigned value
|
||||
type VariableValue struct {
|
||||
Name string
|
||||
Values []string
|
||||
IsSelectAll bool
|
||||
FieldType string // "scalar", "array", "map", etc.
|
||||
}
|
||||
|
||||
// QueryTransformer handles the transformation of queries based on variable values
|
||||
type QueryTransformer struct {
|
||||
processor *QueryProcessor
|
||||
variables map[string]VariableValue
|
||||
originalSQL string
|
||||
}
|
||||
|
||||
// NewQueryTransformer creates a new transformer with the given SQL and variables
|
||||
func NewQueryTransformer(sql string, variables []VariableValue) *QueryTransformer {
|
||||
varMap := make(map[string]VariableValue)
|
||||
for _, v := range variables {
|
||||
varMap[v.Name] = v
|
||||
}
|
||||
|
||||
// for each variable, replace the `{{variable_name}}`, [[variable_name]], {{ .variable_name }}, {{.variable_name}}
|
||||
// with $variable_name
|
||||
for name := range varMap {
|
||||
sql = strings.Replace(sql, fmt.Sprintf("{{%s}}", name), fmt.Sprintf("$%s", name), -1)
|
||||
sql = strings.Replace(sql, fmt.Sprintf("[[%s]]", name), fmt.Sprintf("$%s", name), -1)
|
||||
sql = strings.Replace(sql, fmt.Sprintf("{{ .%s }}", name), fmt.Sprintf("$%s", name), -1)
|
||||
sql = strings.Replace(sql, fmt.Sprintf("{{.%s}}", name), fmt.Sprintf("$%s", name), -1)
|
||||
}
|
||||
|
||||
return &QueryTransformer{
|
||||
processor: NewQueryProcessor(),
|
||||
variables: varMap,
|
||||
originalSQL: sql,
|
||||
}
|
||||
}
|
||||
|
||||
// Transform processes the query and returns a transformed version
|
||||
func (t *QueryTransformer) Transform() (string, error) {
|
||||
return t.processor.ProcessQuery(t.originalSQL, t.transformFilter)
|
||||
}
|
||||
|
||||
// transformFilter is the callback function that decides what to do with each filter
|
||||
func (t *QueryTransformer) transformFilter(variableName string, expr parser.Expr) FilterAction {
|
||||
// Check if we have info about this variable
|
||||
varInfo, exists := t.variables[variableName]
|
||||
if !exists {
|
||||
// If we don't have info, keep the filter as is
|
||||
return KeepFilter
|
||||
}
|
||||
|
||||
// If the user selected "__all__", we should remove the filter
|
||||
if varInfo.IsSelectAll {
|
||||
return RemoveFilter
|
||||
}
|
||||
|
||||
// For maps, we might want to check for existence rather than equality
|
||||
if varInfo.FieldType == "map" {
|
||||
return ReplaceWithExistsCheck
|
||||
}
|
||||
|
||||
// Otherwise keep the filter as is (it will be filled with the actual values)
|
||||
return KeepFilter
|
||||
}
|
||||
Reference in New Issue
Block a user