Compare commits

...

54 Commits

Author SHA1 Message Date
Piyush Singariya
fc0853a8ae Merge branch 'main' into merge-json-col-fields 2026-02-18 10:56:34 +05:30
Abhi kumar
7f1d350ffe chore: hide chartmanager when queries are merged (#10332)
Some checks are pending
build-staging / prepare (push) Waiting to run
build-staging / js-build (push) Blocked by required conditions
build-staging / go-build (push) Blocked by required conditions
Release Drafter / update_release_draft (push) Waiting to run
build-staging / staging (push) Blocked by required conditions
* chore: hide chartmanager when queries are merged

* chore: added histogram panel tests
2026-02-17 14:02:23 +00:00
Ashwin Bhatkal
1d3134959d refactor: dashboard header components (#10326)
* chore: dashboard header components

* chore: small format
2026-02-17 13:41:03 +00:00
Abhi kumar
b86bd24dd9 feat: added new histogram panel (#10328)
* chore: refactored the config builder and added base config builder

* chore: added a common chart wrapper

* chore: tsc fix

* fix: pr review changes

* fix: pr review changes

* chore: added different tooltips

* chore: removed dayjs extention

* feat: added new barpanel component

* fix: added fix for pr review changes

* chore: added support for bar alignment configuration

* feat: added new histogram panel

* chore: updated structure for bar panel

* fix: fixed merge histogram breaking issue

* chore: pr review changes

* chore: added fill mode + bar color when merged

* chore: code cleanup

* chore: added fill mode + bar color when merged

* feat: added new histogram panel
2026-02-17 13:26:39 +00:00
Srikanth Chekuri
4c49d45cbf fix: update rate/increase query and address several issues in builder… (#10299) 2026-02-17 13:09:58 +00:00
Abhi kumar
9b3d3453b1 feat: added new histogram panel component (#10278)
* chore: refactored the config builder and added base config builder

* chore: added a common chart wrapper

* chore: tsc fix

* fix: pr review changes

* fix: pr review changes

* chore: added different tooltips

* chore: removed dayjs extention

* feat: added new barpanel component

* fix: added fix for pr review changes

* chore: added support for bar alignment configuration

* feat: added new histogram panel

* chore: updated structure for bar panel

* fix: fixed merge histogram breaking issue

* chore: pr review changes

* chore: added fill mode + bar color when merged

* chore: code cleanup

* chore: added fill mode + bar color when merged
2026-02-17 12:38:54 +00:00
Karan Balani
9d981d8a13 feat: improve root user provisioning and restore users unique index (#10327)
* chore: pushing forward root user

* chore: fix message

* fix: config naming
2026-02-17 17:53:13 +05:30
Piyush Singariya
bb8c874755 fix: go lint 2026-02-17 17:19:24 +05:30
Piyush Singariya
13cbe03d64 fix: tests 2026-02-17 16:58:00 +05:30
Piyush Singariya
93621c29b7 fix: go mod changes 2026-02-17 16:29:00 +05:30
Piyush Singariya
2c691b5a75 fix: test fixed 2026-02-17 16:28:54 +05:30
Piyush Singariya
cd7e1bb114 Merge branch 'main' into merge-json-col-fields 2026-02-17 16:22:58 +05:30
Piyush Singariya
a1d2ec8b8a fix: remove unused function 2026-02-17 16:18:38 +05:30
Piyush Singariya
8bbafb52d5 fix: go.mod required changes 2026-02-17 16:16:17 +05:30
Pandey
6de4520a95 feat: add root user support (#10313)
## Summary

- Adds root user support with environment-based provisioning, protection guards, and automatic reconciliation. A root user is a special admin user that is provisioned via configuration (environment variables) rather than the UI, designed for automated/headless deployments.

## Key Features
- Environment-based provisioning: Configure root user via user.root.enabled, user.root.email, user.root.password, and user.root.org_name settings

- Automatic reconciliation: A background service runs on startup that:
    - Looks up the organization by configured org_name
    - If no matching org exists, creates the organization and root user via CreateFirstUser
    - If the org exists, reconciles the root user (creates, promotes existing user, or updates email/password to match config)
    - Retries every 10 seconds until successful

- Protection guards: Root users cannot be:
    - Updated or deleted through the API
    - Invited or have their password changed through the UI
    - Authenticated via SSO/SAML (password-only authentication enforced)

- Self-registration disabled: When root user provisioning is enabled, the self-registration endpoint (/register) is blocked to prevent creating duplicate organizations

- Idempotent password sync: On every reconciliation, the root user's password is synced with the configured value — if it differs, it's updated; if it matches, no-op
2026-02-17 15:26:56 +05:30
Abhishek Kumar Singh
f566909320 fix: added support for ucum based units in converter (#10284) 2026-02-17 14:09:43 +05:30
Piyush Singariya
075cfab463 feat: mapping body_v2.message:string map to body 2026-02-17 13:26:34 +05:30
Piyush Singariya
86bccaac0c test: blocked on pr #10153 2026-02-16 15:24:31 +05:30
Piyush Singariya
de1aac63c0 revert: more unrelated change 2026-02-16 13:19:49 +05:30
Piyush Singariya
14fe8745b5 Merge branch 'main' into merge-json-col-fields 2026-02-16 13:14:39 +05:30
Piyush Singariya
4013c7ee03 revert: few unrelated changes 2026-02-16 13:13:07 +05:30
Piyush Singariya
0d34360e0b fix: handle datatype collision 2026-01-30 12:17:28 +05:30
srikanthccv
d204c89dec Merge branch 'main' into merge-json-col-fields 2026-01-30 02:12:14 +05:30
Piyush Singariya
8dd33c1ab7 Merge branch 'main' into merge-json-col-fields 2026-01-29 19:57:22 +05:30
Piyush Singariya
8e5c3d5ae1 chore: merge json fields 2026-01-29 16:46:00 +05:30
Piyush Singariya
d45bb52f33 Merge branch 'has-jsonqb' into merge-json-col-fields 2026-01-29 13:21:13 +05:30
Piyush Singariya
e71818292d fix: go test flakiness 2026-01-29 10:17:53 +05:30
Piyush Singariya
37557f7f24 Merge branch 'main' into has-jsonqb 2026-01-29 09:12:22 +05:30
Piyush Singariya
27ff102660 Merge branch 'main' into has-jsonqb 2026-01-28 17:44:48 +05:30
Piyush Singariya
cb2aa4cffd fix: tests 2026-01-28 17:42:17 +05:30
Piyush Singariya
58d1d84ec7 test: fix 2026-01-28 15:34:22 +05:30
Piyush Singariya
d8e116a7bc fix: merge conflict 2026-01-28 15:15:50 +05:30
Piyush Singariya
6a48bdc37e Merge branch 'main' into has-jsonqb 2026-01-28 15:15:17 +05:30
Piyush Singariya
ffb62432f8 chore: var renamed 2026-01-28 14:42:51 +05:30
Piyush Singariya
57c51f070c fix: merge json body columns together 2026-01-28 14:36:15 +05:30
Piyush Singariya
36becfc7a2 fix: removed comment 2026-01-27 13:20:52 +05:30
Piyush Singariya
8e71de09f3 fix: remove unnecessary bool checking 2026-01-27 13:16:30 +05:30
Piyush Singariya
56de92de73 fix: changes based on review from Srikanth 2026-01-27 13:12:32 +05:30
Piyush Singariya
62b10f8e77 Merge branch 'main' into has-jsonqb 2026-01-27 10:04:32 +05:30
Piyush Singariya
20b53d7856 fix: review based on tushar 2026-01-27 10:04:15 +05:30
Piyush Singariya
8f2c506304 fix: json qb test fix 2026-01-22 22:00:06 +05:30
Srikanth Chekuri
7b5b9027dd Merge branch 'main' into has-jsonqb 2026-01-22 20:19:39 +05:30
Piyush Singariya
b77f97fcb7 fix: tests 2026-01-22 17:24:26 +05:30
Piyush Singariya
62942a4162 fix: tests 2026-01-22 15:47:20 +05:30
Piyush Singariya
349bbbbf1d Merge branch 'main' into has-jsonqb 2026-01-22 12:36:45 +05:30
Piyush Singariya
1966a7a5f6 fix: empty filteredArrays condition 2026-01-22 12:36:29 +05:30
Piyush Singariya
a4eed9ff13 fix: build json plans in metadata 2026-01-22 12:33:51 +05:30
Piyush Singariya
24d1ee33b5 revert: gitignore change 2026-01-22 10:39:02 +05:30
Srikanth Chekuri
3402203021 Merge branch 'main' into has-jsonqb 2026-01-21 13:47:39 +05:30
Piyush Singariya
e8e4897cc8 fix: tests GroupBy 2026-01-20 12:10:44 +05:30
Piyush Singariya
96fb88aaee fix: ignored .vscode in gitignore 2026-01-20 11:47:34 +05:30
Piyush Singariya
5a00e6c2cd Merge branch 'main' into has-jsonqb 2026-01-20 11:44:32 +05:30
Piyush Singariya
e2500cff7d fix: tests expected queries and values 2026-01-20 11:40:56 +05:30
Piyush Singariya
4864c3bc37 feat: has JSON QB 2026-01-20 11:23:50 +05:30
125 changed files with 5061 additions and 2508 deletions

View File

@@ -176,25 +176,6 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
### Unsere Projektbetreuer
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
## Dokumentation

View File

@@ -221,34 +221,6 @@ We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
- [Ekansh Gupta](https://github.com/eKuG)
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
#### Frontend
- [Yunus M](https://github.com/YounixM)
- [Vikrant Gupta](https://github.com/vikrantgupta25)
- [Sagar Rajput](https://github.com/SagarRajput-7)
- [Shaheer Kochai](https://github.com/ahmadshaheer)
- [Amlan Kumar Nandy](https://github.com/amlannandy)
- [Sahil Khan](https://github.com/sawhil)
- [Aditya Singh](https://github.com/aks07)
- [Abhi Kumar](https://github.com/ahrefabhi)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
- [Vibhu Pandey](https://github.com/therealpandey)
<br /><br />

View File

@@ -187,25 +187,6 @@ Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metric
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
### 项目维护人员
#### 后端
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### 前端
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### 运维开发
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
## 文档

View File

@@ -294,7 +294,6 @@ flagger:
config:
boolean:
use_span_metrics: true
interpolation_enabled: false
kafka_span_eval: false
string:
float:
@@ -309,3 +308,14 @@ user:
allow_self: true
# The duration within which a user can reset their password.
max_token_lifetime: 6h
root:
# Whether to enable the root user. When enabled, a root user is provisioned
# on startup using the email and password below. The root user cannot be
# deleted, updated, or have their password changed through the UI.
enabled: false
# The email address of the root user.
email: ""
# The password of the root user. Must meet password requirements.
password: ""
# The name of the organization to create or look up for the root user.
org_name: default

View File

@@ -4678,6 +4678,8 @@ components:
type: string
id:
type: string
isRoot:
type: boolean
orgId:
type: string
role:

View File

@@ -45,7 +45,7 @@ type APIHandler struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz, config signoz.Config) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
RuleManager: opts.RulesManager,
@@ -58,7 +58,7 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
})
}, config)
if err != nil {
return nil, err

View File

@@ -175,7 +175,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
GlobalConfig: config.Global,
}
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)
apiHandler, err := api.NewAPIHandler(apiOpts, signoz, config)
if err != nil {
return nil, err
}

View File

@@ -1542,6 +1542,10 @@ export interface TypesUserDTO {
* @type string
*/
id?: string;
/**
* @type boolean
*/
isRoot?: boolean;
/**
* @type string
*/

View File

@@ -9,74 +9,6 @@
padding: 0px;
}
.dashboard-header {
border-bottom: 1px solid var(--bg-slate-400);
display: flex;
justify-content: space-between;
gap: 16px;
align-items: center;
padding: 0 8px;
box-sizing: border-box;
.dashboard-breadcrumbs {
width: 100%;
height: 48px;
display: flex;
gap: 6px;
align-items: center;
max-width: 80%;
.dashboard-btn {
display: flex;
align-items: center;
color: var(--bg-vanilla-400);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px; /* 142.857% */
letter-spacing: -0.07px;
padding: 0px;
height: 20px;
}
.dashboard-btn:hover {
background-color: unset;
}
.id-btn {
display: flex;
align-items: center;
padding: 0px 2px;
border-radius: 2px;
background: rgba(113, 144, 249, 0.1);
color: var(--bg-robin-400);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px; /* 142.857% */
height: 20px;
max-width: calc(100% - 120px);
span {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.ant-btn-icon {
margin-inline-end: 4px;
}
}
.id-btn:hover {
background: rgba(113, 144, 249, 0.1);
color: var(--bg-robin-300);
}
}
}
.dashboard-details {
display: flex;
justify-content: space-between;
@@ -535,15 +467,6 @@
.dashboard-description-container {
color: var(--bg-ink-400);
.dashboard-header {
border-bottom: 1px solid var(--bg-vanilla-300);
.dashboard-breadcrumbs {
.dashboard-btn {
color: var(--bg-ink-400);
}
}
}
.dashboard-details {
.left-section {
.dashboard-title {

View File

@@ -16,9 +16,7 @@ import {
} from 'antd';
import logEvent from 'api/common/logEvent';
import ConfigureIcon from 'assets/Integrations/ConfigureIcon';
import HeaderRightSection from 'components/HeaderRightSection/HeaderRightSection';
import { PANEL_GROUP_TYPES, PANEL_TYPES } from 'constants/queryBuilder';
import ROUTES from 'constants/routes';
import { DeleteButton } from 'container/ListOfDashboard/TableComponents/DeleteButton';
import DateTimeSelectionV2 from 'container/TopNav/DateTimeSelectionV2';
import { useDashboardVariables } from 'hooks/dashboard/useDashboardVariables';
@@ -27,7 +25,6 @@ import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
import useComponentPermission from 'hooks/useComponentPermission';
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
import { useNotifications } from 'hooks/useNotifications';
import { useSafeNavigate } from 'hooks/useSafeNavigate';
import { isEmpty } from 'lodash-es';
import {
Check,
@@ -37,7 +34,6 @@ import {
FolderKanban,
Fullscreen,
Globe,
LayoutGrid,
LockKeyhole,
PenLine,
X,
@@ -51,6 +47,7 @@ import { ROLES, USER_ROLES } from 'types/roles';
import { ComponentTypes } from 'utils/permission';
import { v4 as uuid } from 'uuid';
import DashboardHeader from '../components/DashboardHeader/DashboardHeader';
import DashboardGraphSlider from '../ComponentsSlider';
import DashboardSettings from '../DashboardSettings';
import { Base64Icons } from '../DashboardSettings/General/utils';
@@ -71,7 +68,6 @@ interface DashboardDescriptionProps {
// eslint-disable-next-line sonarjs/cognitive-complexity
function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
const { safeNavigate } = useSafeNavigate();
const { handle } = props;
const {
selectedDashboard,
@@ -80,7 +76,6 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
layouts,
setLayouts,
isDashboardLocked,
listSortOrder,
setSelectedDashboard,
handleToggleDashboardSlider,
setSelectedRowWidgetId,
@@ -292,17 +287,6 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
});
}
function goToListPage(): void {
const urlParams = new URLSearchParams();
urlParams.set('columnKey', listSortOrder.columnKey as string);
urlParams.set('order', listSortOrder.order as string);
urlParams.set('page', listSortOrder.pagination as string);
urlParams.set('search', listSortOrder.search as string);
const generatedUrl = `${ROUTES.ALL_DASHBOARD}?${urlParams.toString()}`;
safeNavigate(generatedUrl);
}
const {
data: publicDashboardResponse,
isLoading: isLoadingPublicDashboardData,
@@ -351,32 +335,7 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
return (
<Card className="dashboard-description-container">
<div className="dashboard-header">
<section className="dashboard-breadcrumbs">
<Button
type="text"
icon={<LayoutGrid size={14} />}
className="dashboard-btn"
onClick={(): void => goToListPage()}
>
Dashboard /
</Button>
<Button type="text" className="id-btn dashboard-name-btn">
<img
src={image}
alt="dashboard-icon"
style={{ height: '14px', width: '14px' }}
/>
{title}
</Button>
</section>
<HeaderRightSection
enableAnnouncements={false}
enableShare
enableFeedback
/>
</div>
<DashboardHeader />
<section className="dashboard-details">
<div className="left-section">
<img src={image} alt="dashboard-img" className="dashboard-img" />

View File

@@ -0,0 +1,71 @@
.dashboard-breadcrumbs {
width: 100%;
height: 48px;
display: flex;
gap: 6px;
align-items: center;
max-width: 80%;
.dashboard-btn {
display: flex;
align-items: center;
color: var(--bg-vanilla-400);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px; /* 142.857% */
letter-spacing: -0.07px;
padding: 0px;
height: 20px;
}
.dashboard-btn:hover {
background-color: unset;
}
.id-btn {
display: flex;
align-items: center;
gap: 4px;
padding: 0px 2px;
border-radius: 2px;
background: rgba(113, 144, 249, 0.1);
color: var(--bg-robin-400);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px; /* 142.857% */
height: 20px;
max-width: calc(100% - 120px);
span {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.ant-btn-icon {
margin-inline-end: 4px;
}
}
.id-btn:hover {
background: rgba(113, 144, 249, 0.1);
color: var(--bg-robin-300);
}
.dashboard-icon-image {
height: 14px;
width: 14px;
}
}
.lightMode {
.dashboard-breadcrumbs {
.dashboard-btn {
color: var(--bg-ink-400);
}
}
}

View File

@@ -0,0 +1,55 @@
import { useCallback } from 'react';
import { Button } from 'antd';
import ROUTES from 'constants/routes';
import { useSafeNavigate } from 'hooks/useSafeNavigate';
import { LayoutGrid } from 'lucide-react';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { DashboardData } from 'types/api/dashboard/getAll';
import { Base64Icons } from '../../DashboardSettings/General/utils';
import './DashboardBreadcrumbs.styles.scss';
function DashboardBreadcrumbs(): JSX.Element {
const { safeNavigate } = useSafeNavigate();
const { selectedDashboard, listSortOrder } = useDashboard();
const selectedData = selectedDashboard
? {
...selectedDashboard.data,
uuid: selectedDashboard.id,
}
: ({} as DashboardData);
const { title = '', image = Base64Icons[0] } = selectedData || {};
const goToListPage = useCallback(() => {
const urlParams = new URLSearchParams();
urlParams.set('columnKey', listSortOrder.columnKey as string);
urlParams.set('order', listSortOrder.order as string);
urlParams.set('page', listSortOrder.pagination as string);
urlParams.set('search', listSortOrder.search as string);
const generatedUrl = `${ROUTES.ALL_DASHBOARD}?${urlParams.toString()}`;
safeNavigate(generatedUrl);
}, [listSortOrder, safeNavigate]);
return (
<div className="dashboard-breadcrumbs">
<Button
type="text"
icon={<LayoutGrid size={14} />}
className="dashboard-btn"
onClick={goToListPage}
>
Dashboard /
</Button>
<Button type="text" className="id-btn dashboard-name-btn">
<img src={image} alt="dashboard-icon" className="dashboard-icon-image" />
{title}
</Button>
</div>
);
}
export default DashboardBreadcrumbs;

View File

@@ -0,0 +1,15 @@
.dashboard-header {
border-bottom: 1px solid var(--bg-slate-400);
display: flex;
justify-content: space-between;
gap: 16px;
align-items: center;
padding: 0 8px;
box-sizing: border-box;
}
.lightMode {
.dashboard-header {
border-bottom: 1px solid var(--bg-vanilla-300);
}
}

View File

@@ -0,0 +1,17 @@
import { memo } from 'react';
import HeaderRightSection from 'components/HeaderRightSection/HeaderRightSection';
import DashboardBreadcrumbs from './DashboardBreadcrumbs';
import './DashboardHeader.styles.scss';
function DashboardHeader(): JSX.Element {
return (
<div className="dashboard-header">
<DashboardBreadcrumbs />
<HeaderRightSection enableAnnouncements={false} enableShare enableFeedback />
</div>
);
}
export default memo(DashboardHeader);

View File

@@ -23,6 +23,7 @@ export default function ChartWrapper({
width: containerWidth,
height: containerHeight,
showTooltip = true,
showLegend = true,
canPinTooltip = false,
syncMode,
syncKey,
@@ -36,6 +37,9 @@ export default function ChartWrapper({
const legendComponent = useCallback(
(averageLegendWidth: number): React.ReactNode => {
if (!showLegend) {
return null;
}
return (
<Legend
config={config}
@@ -44,7 +48,7 @@ export default function ChartWrapper({
/>
);
},
[config, legendConfig.position],
[config, legendConfig.position, showLegend],
);
const renderTooltipCallback = useCallback(
@@ -60,6 +64,7 @@ export default function ChartWrapper({
return (
<PlotContextProvider>
<ChartLayout
showLegend={showLegend}
config={config}
containerWidth={containerWidth}
containerHeight={containerHeight}

View File

@@ -0,0 +1,55 @@
import { useCallback } from 'react';
import ChartWrapper from 'container/DashboardContainer/visualization/charts/ChartWrapper/ChartWrapper';
import HistogramTooltip from 'lib/uPlotV2/components/Tooltip/HistogramTooltip';
import { buildTooltipContent } from 'lib/uPlotV2/components/Tooltip/utils';
import {
HistogramTooltipProps,
TooltipRenderArgs,
} from 'lib/uPlotV2/components/types';
import { HistogramChartProps } from '../types';
export default function Histogram(props: HistogramChartProps): JSX.Element {
const {
children,
renderTooltip: customRenderTooltip,
isQueriesMerged,
...rest
} = props;
const renderTooltip = useCallback(
(props: TooltipRenderArgs): React.ReactNode => {
if (customRenderTooltip) {
return customRenderTooltip(props);
}
const content = buildTooltipContent({
data: props.uPlotInstance.data,
series: props.uPlotInstance.series,
dataIndexes: props.dataIndexes,
activeSeriesIndex: props.seriesIndex,
uPlotInstance: props.uPlotInstance,
yAxisUnit: rest.yAxisUnit ?? '',
decimalPrecision: rest.decimalPrecision,
});
const tooltipProps: HistogramTooltipProps = {
...props,
timezone: rest.timezone,
yAxisUnit: rest.yAxisUnit,
decimalPrecision: rest.decimalPrecision,
content,
};
return <HistogramTooltip {...tooltipProps} />;
},
[customRenderTooltip, rest.timezone, rest.yAxisUnit, rest.decimalPrecision],
);
return (
<ChartWrapper
showLegend={!isQueriesMerged}
{...rest}
renderTooltip={renderTooltip}
>
{children}
</ChartWrapper>
);
}

View File

@@ -7,6 +7,7 @@ interface BaseChartProps {
width: number;
height: number;
showTooltip?: boolean;
showLegend?: boolean;
timezone: string;
canPinTooltip?: boolean;
yAxisUnit?: string;
@@ -17,6 +18,7 @@ interface BaseChartProps {
interface UPlotBasedChartProps {
config: UPlotConfigBuilder;
data: uPlot.AlignedData;
legendConfig: LegendConfig;
syncMode?: DashboardCursorSync;
syncKey?: string;
plotRef?: (plot: uPlot | null) => void;
@@ -26,14 +28,20 @@ interface UPlotBasedChartProps {
}
export interface TimeSeriesChartProps
extends BaseChartProps,
UPlotBasedChartProps {}
export interface HistogramChartProps
extends BaseChartProps,
UPlotBasedChartProps {
legendConfig: LegendConfig;
isQueriesMerged?: boolean;
}
export interface BarChartProps extends BaseChartProps, UPlotBasedChartProps {
legendConfig: LegendConfig;
isStackedBarChart?: boolean;
}
export type ChartProps = TimeSeriesChartProps | BarChartProps;
export type ChartProps =
| TimeSeriesChartProps
| BarChartProps
| HistogramChartProps;

View File

@@ -0,0 +1,69 @@
import { renderHook } from '@testing-library/react';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { useScrollWidgetIntoView } from '../useScrollWidgetIntoView';
jest.mock('providers/Dashboard/Dashboard');
type MockHTMLElement = {
scrollIntoView: jest.Mock;
focus: jest.Mock;
};
function createMockElement(): MockHTMLElement {
return {
scrollIntoView: jest.fn(),
focus: jest.fn(),
};
}
describe('useScrollWidgetIntoView', () => {
const mockedUseDashboard = useDashboard as jest.MockedFunction<
typeof useDashboard
>;
beforeEach(() => {
jest.clearAllMocks();
});
it('scrolls into view and focuses when toScrollWidgetId matches widget id', () => {
const setToScrollWidgetId = jest.fn();
const mockElement = createMockElement();
const ref = ({
current: mockElement,
} as unknown) as React.RefObject<HTMLDivElement>;
mockedUseDashboard.mockReturnValue(({
toScrollWidgetId: 'widget-id',
setToScrollWidgetId,
} as unknown) as ReturnType<typeof useDashboard>);
renderHook(() => useScrollWidgetIntoView('widget-id', ref));
expect(mockElement.scrollIntoView).toHaveBeenCalledWith({
behavior: 'smooth',
block: 'center',
});
expect(mockElement.focus).toHaveBeenCalled();
expect(setToScrollWidgetId).toHaveBeenCalledWith('');
});
it('does nothing when toScrollWidgetId does not match widget id', () => {
const setToScrollWidgetId = jest.fn();
const mockElement = createMockElement();
const ref = ({
current: mockElement,
} as unknown) as React.RefObject<HTMLDivElement>;
mockedUseDashboard.mockReturnValue(({
toScrollWidgetId: 'other-widget',
setToScrollWidgetId,
} as unknown) as ReturnType<typeof useDashboard>);
renderHook(() => useScrollWidgetIntoView('widget-id', ref));
expect(mockElement.scrollIntoView).not.toHaveBeenCalled();
expect(mockElement.focus).not.toHaveBeenCalled();
expect(setToScrollWidgetId).not.toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,26 @@
import { RefObject, useEffect } from 'react';
import { useDashboard } from 'providers/Dashboard/Dashboard';
/**
* Scrolls the given widget container into view when the dashboard
* requests it via `toScrollWidgetId`.
*
* Intended for use in panel components that render a single widget.
*/
export function useScrollWidgetIntoView<T extends HTMLElement>(
widgetId: string,
widgetContainerRef: RefObject<T>,
): void {
const { toScrollWidgetId, setToScrollWidgetId } = useDashboard();
useEffect(() => {
if (toScrollWidgetId === widgetId) {
widgetContainerRef.current?.scrollIntoView({
behavior: 'smooth',
block: 'center',
});
widgetContainerRef.current?.focus();
setToScrollWidgetId('');
}
}, [toScrollWidgetId, setToScrollWidgetId, widgetId, widgetContainerRef]);
}

View File

@@ -1,12 +1,14 @@
import { useMemo } from 'react';
import cx from 'classnames';
import { calculateChartDimensions } from 'container/DashboardContainer/visualization/charts/utils';
import { MAX_LEGEND_WIDTH } from 'lib/uPlotV2/components/Legend/Legend';
import { LegendConfig, LegendPosition } from 'lib/uPlotV2/components/types';
import { UPlotConfigBuilder } from 'lib/uPlotV2/config/UPlotConfigBuilder';
import './ChartLayout.styles.scss';
export interface ChartLayoutProps {
showLegend?: boolean;
legendComponent: (legendPerSet: number) => React.ReactNode;
children: (props: {
chartWidth: number;
@@ -20,6 +22,7 @@ export interface ChartLayoutProps {
config: UPlotConfigBuilder;
}
export default function ChartLayout({
showLegend = true,
legendComponent,
children,
layoutChildren,
@@ -30,6 +33,15 @@ export default function ChartLayout({
}: ChartLayoutProps): JSX.Element {
const chartDimensions = useMemo(
() => {
if (!showLegend) {
return {
width: containerWidth,
height: containerHeight,
legendWidth: 0,
legendHeight: 0,
averageLegendWidth: MAX_LEGEND_WIDTH,
};
}
const legendItemsMap = config.getLegendItems();
const seriesLabels = Object.values(legendItemsMap)
.map((item) => item.label)
@@ -42,7 +54,7 @@ export default function ChartLayout({
});
},
// eslint-disable-next-line react-hooks/exhaustive-deps
[containerWidth, containerHeight, legendConfig],
[containerWidth, containerHeight, legendConfig, showLegend],
);
return (
@@ -60,15 +72,17 @@ export default function ChartLayout({
averageLegendWidth: chartDimensions.averageLegendWidth,
})}
</div>
<div
className="chart-layout__legend-wrapper"
style={{
height: chartDimensions.legendHeight,
width: chartDimensions.legendWidth,
}}
>
{legendComponent(chartDimensions.averageLegendWidth)}
</div>
{showLegend && (
<div
className="chart-layout__legend-wrapper"
style={{
height: chartDimensions.legendHeight,
width: chartDimensions.legendWidth,
}}
>
{legendComponent(chartDimensions.averageLegendWidth)}
</div>
)}
</div>
{layoutChildren}
</div>

View File

@@ -1,10 +1,10 @@
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useScrollWidgetIntoView } from 'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView';
import { PanelWrapperProps } from 'container/PanelWrapper/panelWrapper.types';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useResizeObserver } from 'hooks/useDimensions';
import { LegendPosition } from 'lib/uPlotV2/components/types';
import ContextMenu from 'periscope/components/ContextMenu';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { useTimezone } from 'providers/Timezone';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import uPlot from 'uplot';
@@ -27,7 +27,6 @@ function BarPanel(props: PanelWrapperProps): JSX.Element {
onToggleModelHandler,
} = props;
const uPlotRef = useRef<uPlot | null>(null);
const { toScrollWidgetId, setToScrollWidgetId } = useDashboard();
const graphRef = useRef<HTMLDivElement>(null);
const [minTimeScale, setMinTimeScale] = useState<number>();
const [maxTimeScale, setMaxTimeScale] = useState<number>();
@@ -36,16 +35,7 @@ function BarPanel(props: PanelWrapperProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const { timezone } = useTimezone();
useEffect(() => {
if (toScrollWidgetId === widget.id) {
graphRef.current?.scrollIntoView({
behavior: 'smooth',
block: 'center',
});
graphRef.current?.focus();
setToScrollWidgetId('');
}
}, [toScrollWidgetId, setToScrollWidgetId, widget.id]);
useScrollWidgetIntoView(widget.id, graphRef);
useEffect((): void => {
const { startTime, endTime } = getTimeRange(queryResponse);

View File

@@ -0,0 +1,114 @@
import { useMemo, useRef } from 'react';
import { useScrollWidgetIntoView } from 'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView';
import { PanelWrapperProps } from 'container/PanelWrapper/panelWrapper.types';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useResizeObserver } from 'hooks/useDimensions';
import { LegendPosition } from 'lib/uPlotV2/components/types';
import { DashboardCursorSync } from 'lib/uPlotV2/plugins/TooltipPlugin/types';
import { useTimezone } from 'providers/Timezone';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import uPlot from 'uplot';
import Histogram from '../../charts/Histogram/Histogram';
import ChartManager from '../../components/ChartManager/ChartManager';
import {
prepareHistogramPanelConfig,
prepareHistogramPanelData,
} from './utils';
import '../Panel.styles.scss';
function HistogramPanel(props: PanelWrapperProps): JSX.Element {
const {
panelMode,
queryResponse,
widget,
isFullViewMode,
onToggleModelHandler,
} = props;
const uPlotRef = useRef<uPlot | null>(null);
const graphRef = useRef<HTMLDivElement>(null);
const containerDimensions = useResizeObserver(graphRef);
const isDarkMode = useIsDarkMode();
const { timezone } = useTimezone();
useScrollWidgetIntoView(widget.id, graphRef);
const config = useMemo(() => {
return prepareHistogramPanelConfig({
widget,
isDarkMode,
apiResponse: queryResponse?.data?.payload as MetricRangePayloadProps,
panelMode,
});
}, [widget, isDarkMode, queryResponse?.data?.payload, panelMode]);
const chartData = useMemo(() => {
if (!queryResponse?.data?.payload) {
return [];
}
return prepareHistogramPanelData({
apiResponse: queryResponse?.data?.payload as MetricRangePayloadProps,
bucketWidth: widget?.bucketWidth,
bucketCount: widget?.bucketCount,
mergeAllActiveQueries: widget?.mergeAllActiveQueries,
});
}, [
queryResponse?.data?.payload,
widget?.bucketWidth,
widget?.bucketCount,
widget?.mergeAllActiveQueries,
]);
const layoutChildren = useMemo(() => {
if (!isFullViewMode || widget.mergeAllActiveQueries) {
return null;
}
return (
<ChartManager
config={config}
alignedData={chartData}
yAxisUnit={widget.yAxisUnit}
onCancel={onToggleModelHandler}
/>
);
}, [
isFullViewMode,
config,
chartData,
widget.yAxisUnit,
onToggleModelHandler,
widget.mergeAllActiveQueries,
]);
return (
<div className="panel-container" ref={graphRef}>
{containerDimensions.width > 0 && containerDimensions.height > 0 && (
<Histogram
config={config}
legendConfig={{
position: widget?.legendPosition ?? LegendPosition.BOTTOM,
}}
plotRef={(plot: uPlot | null): void => {
uPlotRef.current = plot;
}}
onDestroy={(): void => {
uPlotRef.current = null;
}}
isQueriesMerged={widget.mergeAllActiveQueries}
yAxisUnit={widget.yAxisUnit}
decimalPrecision={widget.decimalPrecision}
syncMode={DashboardCursorSync.Crosshair}
timezone={timezone.value}
data={chartData as uPlot.AlignedData}
width={containerDimensions.width}
height={containerDimensions.height}
layoutChildren={layoutChildren}
/>
)}
</div>
);
}
export default HistogramPanel;

View File

@@ -0,0 +1,223 @@
/* eslint-disable simple-import-sort/imports */
import type { UseQueryResult } from 'react-query';
import { render, screen } from 'tests/test-utils';
import { PanelMode } from 'container/DashboardContainer/visualization/panels/types';
import { LegendPosition } from 'lib/uPlotV2/components/types';
import { Widgets } from 'types/api/dashboard/getAll';
import {
MetricQueryRangeSuccessResponse,
MetricRangePayloadProps,
} from 'types/api/metrics/getQueryRange';
import HistogramPanel from '../HistogramPanel';
import { HistogramChartProps } from 'container/DashboardContainer/visualization/charts/types';
jest.mock('hooks/useDimensions', () => ({
useResizeObserver: jest.fn().mockReturnValue({ width: 800, height: 400 }),
}));
jest.mock('hooks/useDarkMode', () => ({
useIsDarkMode: jest.fn().mockReturnValue(false),
}));
jest.mock('providers/Timezone', () => ({
__esModule: true,
// Provide a no-op provider component so AllTheProviders can render
default: ({ children }: { children: React.ReactNode }): JSX.Element => (
<>{children}</>
),
// And mock the hook used by HistogramPanel
useTimezone: jest.fn().mockReturnValue({
timezone: { value: 'UTC' },
}),
}));
jest.mock(
'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView',
() => ({
useScrollWidgetIntoView: jest.fn(),
}),
);
jest.mock(
'container/DashboardContainer/visualization/charts/Histogram/Histogram',
() => ({
__esModule: true,
default: (props: HistogramChartProps): JSX.Element => (
<div data-testid="histogram-chart">
<div data-testid="histogram-props">
{JSON.stringify({
legendPosition: props.legendConfig?.position,
isQueriesMerged: props.isQueriesMerged,
yAxisUnit: props.yAxisUnit,
decimalPrecision: props.decimalPrecision,
})}
</div>
{props.layoutChildren}
</div>
),
}),
);
jest.mock(
'container/DashboardContainer/visualization/components/ChartManager/ChartManager',
() => ({
__esModule: true,
default: (): JSX.Element => (
<div data-testid="chart-manager">ChartManager</div>
),
}),
);
function createQueryResponse(
payloadOverrides: Partial<MetricRangePayloadProps> = {},
): { data: { payload: MetricRangePayloadProps } } {
const basePayload: MetricRangePayloadProps = {
data: {
result: [
{
metric: {},
queryName: 'A',
legend: 'Series A',
values: [
[1, '10'],
[2, '20'],
],
},
],
resultType: 'matrix',
newResult: {
data: {
result: [],
resultType: 'matrix',
},
},
},
};
return {
data: {
payload: {
...basePayload,
...payloadOverrides,
},
},
};
}
type WidgetLike = {
id: string;
yAxisUnit: string;
decimalPrecision: number;
legendPosition: LegendPosition;
mergeAllActiveQueries: boolean;
};
function createWidget(overrides: Partial<WidgetLike> = {}): WidgetLike {
return {
id: 'widget-id',
yAxisUnit: 'ms',
decimalPrecision: 2,
legendPosition: LegendPosition.BOTTOM,
mergeAllActiveQueries: false,
...overrides,
};
}
describe('HistogramPanel', () => {
it('renders Histogram when container has dimensions', () => {
const widget = (createWidget() as unknown) as Widgets;
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
MetricQueryRangeSuccessResponse,
Error
>;
render(
<HistogramPanel
panelMode={PanelMode.DASHBOARD_VIEW}
widget={widget}
queryResponse={queryResponse}
isFullViewMode={false}
onToggleModelHandler={jest.fn()}
onDragSelect={jest.fn()}
/>,
);
expect(screen.getByTestId('histogram-chart')).toBeInTheDocument();
});
it('passes legend position and other props to Histogram', () => {
const widget = (createWidget({
legendPosition: LegendPosition.RIGHT,
}) as unknown) as Widgets;
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
MetricQueryRangeSuccessResponse,
Error
>;
render(
<HistogramPanel
panelMode={PanelMode.DASHBOARD_VIEW}
widget={widget}
queryResponse={queryResponse}
isFullViewMode={false}
onToggleModelHandler={jest.fn()}
onDragSelect={jest.fn()}
/>,
);
const propsJson = screen.getByTestId('histogram-props').textContent || '{}';
const parsed = JSON.parse(propsJson);
expect(parsed.legendPosition).toBe(LegendPosition.RIGHT);
expect(parsed.yAxisUnit).toBe('ms');
expect(parsed.decimalPrecision).toBe(2);
});
it('renders ChartManager in full view when queries are not merged', () => {
const widget = (createWidget({
mergeAllActiveQueries: false,
}) as unknown) as Widgets;
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
MetricQueryRangeSuccessResponse,
Error
>;
render(
<HistogramPanel
panelMode={PanelMode.DASHBOARD_VIEW}
widget={widget}
queryResponse={queryResponse}
isFullViewMode
onToggleModelHandler={jest.fn()}
onDragSelect={jest.fn()}
/>,
);
expect(screen.getByTestId('chart-manager')).toBeInTheDocument();
});
it('does not render ChartManager when queries are merged', () => {
const widget = (createWidget({
mergeAllActiveQueries: true,
}) as unknown) as Widgets;
const queryResponse = (createQueryResponse() as unknown) as UseQueryResult<
MetricQueryRangeSuccessResponse,
Error
>;
render(
<HistogramPanel
panelMode={PanelMode.DASHBOARD_VIEW}
widget={widget}
queryResponse={queryResponse}
isFullViewMode
onToggleModelHandler={jest.fn()}
onDragSelect={jest.fn()}
/>,
);
expect(screen.queryByTestId('chart-manager')).not.toBeInTheDocument();
});
});

View File

@@ -0,0 +1,231 @@
import { histogramBucketSizes } from '@grafana/data';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { DEFAULT_BUCKET_COUNT } from 'container/PanelWrapper/constants';
import { getLegend } from 'lib/dashboard/getQueryResults';
import getLabelName from 'lib/getLabelName';
import { DrawStyle } from 'lib/uPlotV2/config/types';
import { UPlotConfigBuilder } from 'lib/uPlotV2/config/UPlotConfigBuilder';
import { Widgets } from 'types/api/dashboard/getAll';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { AlignedData } from 'uplot';
import { incrRoundDn, roundDecimals } from 'utils/round';
import { PanelMode } from '../types';
import { buildBaseConfig } from '../utils/baseConfigBuilder';
import {
buildHistogramBuckets,
mergeAlignedDataTables,
prependNullBinToFirstHistogramSeries,
replaceUndefinedWithNullInAlignedData,
} from '../utils/histogram';
export interface PrepareHistogramPanelDataParams {
apiResponse: MetricRangePayloadProps;
bucketWidth?: number;
bucketCount?: number;
mergeAllActiveQueries?: boolean;
}
const BUCKET_OFFSET = 0;
const HIST_SORT = (a: number, b: number): number => a - b;
function extractNumericValues(
result: MetricRangePayloadProps['data']['result'],
): number[] {
const values: number[] = [];
for (const item of result) {
for (const [, valueStr] of item.values) {
values.push(Number.parseFloat(valueStr) || 0);
}
}
return values;
}
function computeSmallestDelta(sortedValues: number[]): number {
if (sortedValues.length <= 1) {
return 0;
}
let smallest = Infinity;
for (let i = 1; i < sortedValues.length; i++) {
const delta = sortedValues[i] - sortedValues[i - 1];
if (delta > 0) {
smallest = Math.min(smallest, delta);
}
}
return smallest === Infinity ? 0 : smallest;
}
function selectBucketSize({
range,
bucketCount,
smallestDelta,
bucketWidthOverride,
}: {
range: number;
bucketCount: number;
smallestDelta: number;
bucketWidthOverride?: number;
}): number {
if (bucketWidthOverride != null && bucketWidthOverride > 0) {
return bucketWidthOverride;
}
const targetSize = range / bucketCount;
for (const candidate of histogramBucketSizes) {
if (targetSize < candidate && candidate >= smallestDelta) {
return candidate;
}
}
return 0;
}
function buildFrames(
result: MetricRangePayloadProps['data']['result'],
mergeAllActiveQueries: boolean,
): number[][] {
const frames: number[][] = result.map((item) =>
item.values.map(([, valueStr]) => Number.parseFloat(valueStr) || 0),
);
if (mergeAllActiveQueries && frames.length > 1) {
const first = frames[0];
for (let i = 1; i < frames.length; i++) {
first.push(...frames[i]);
frames[i] = [];
}
}
return frames;
}
export function prepareHistogramPanelData({
apiResponse,
bucketWidth,
bucketCount: bucketCountProp = DEFAULT_BUCKET_COUNT,
mergeAllActiveQueries = false,
}: PrepareHistogramPanelDataParams): AlignedData {
const bucketCount = bucketCountProp ?? DEFAULT_BUCKET_COUNT;
const result = apiResponse.data.result;
const seriesValues = extractNumericValues(result);
if (seriesValues.length === 0) {
return [[]];
}
const sorted = [...seriesValues].sort((a, b) => a - b);
const min = sorted[0];
const max = sorted[sorted.length - 1];
const range = max - min;
const smallestDelta = computeSmallestDelta(sorted);
let bucketSize = selectBucketSize({
range,
bucketCount,
smallestDelta,
bucketWidthOverride: bucketWidth,
});
if (bucketSize <= 0) {
bucketSize = range > 0 ? range / bucketCount : 1;
}
const getBucket = (v: number): number =>
roundDecimals(incrRoundDn(v - BUCKET_OFFSET, bucketSize) + BUCKET_OFFSET, 9);
const frames = buildFrames(result, mergeAllActiveQueries);
const histogramsPerSeries: AlignedData[] = frames
.filter((frame) => frame.length > 0)
.map((frame) => buildHistogramBuckets(frame, getBucket, HIST_SORT));
if (histogramsPerSeries.length === 0) {
return [[]];
}
const mergedHistogramData = mergeAlignedDataTables(histogramsPerSeries);
replaceUndefinedWithNullInAlignedData(mergedHistogramData);
prependNullBinToFirstHistogramSeries(mergedHistogramData, bucketSize);
return mergedHistogramData;
}
export function prepareHistogramPanelConfig({
widget,
apiResponse,
panelMode,
isDarkMode,
}: {
widget: Widgets;
apiResponse: MetricRangePayloadProps;
panelMode: PanelMode;
isDarkMode: boolean;
}): UPlotConfigBuilder {
const builder = buildBaseConfig({
widget,
isDarkMode,
apiResponse,
panelMode,
panelType: PANEL_TYPES.HISTOGRAM,
});
builder.setCursor({
drag: {
x: false,
y: false,
setScale: true,
},
focus: {
prox: 1e3,
},
});
builder.addScale({
scaleKey: 'x',
time: false,
auto: true,
});
builder.addScale({
scaleKey: 'y',
time: false,
auto: true,
min: 0,
});
const currentQuery = widget.query;
const mergeAllActiveQueries = widget?.mergeAllActiveQueries ?? false;
// When merged, data has only one y column; add one series to match. Otherwise add one per result.
if (mergeAllActiveQueries) {
builder.addSeries({
label: '',
scaleKey: 'y',
drawStyle: DrawStyle.Bar,
panelType: PANEL_TYPES.HISTOGRAM,
colorMapping: widget.customLegendColors ?? {},
spanGaps: false,
barWidthFactor: 1,
pointSize: 5,
lineColor: '#3f5ecc',
fillColor: '#4E74F8',
isDarkMode,
});
} else {
apiResponse.data.result.forEach((series) => {
const baseLabelName = getLabelName(
series.metric,
series.queryName || '', // query
series.legend || '',
);
const label = currentQuery
? getLegend(series, currentQuery, baseLabelName)
: baseLabelName;
builder.addSeries({
label: label,
scaleKey: 'y',
drawStyle: DrawStyle.Bar,
panelType: PANEL_TYPES.HISTOGRAM,
colorMapping: widget.customLegendColors ?? {},
spanGaps: false,
barWidthFactor: 1,
pointSize: 5,
isDarkMode,
});
});
}
return builder;
}

View File

@@ -2,12 +2,12 @@ import { useEffect, useMemo, useRef, useState } from 'react';
import TimeSeries from 'container/DashboardContainer/visualization/charts/TimeSeries/TimeSeries';
import ChartManager from 'container/DashboardContainer/visualization/components/ChartManager/ChartManager';
import { usePanelContextMenu } from 'container/DashboardContainer/visualization/hooks/usePanelContextMenu';
import { useScrollWidgetIntoView } from 'container/DashboardContainer/visualization/hooks/useScrollWidgetIntoView';
import { PanelWrapperProps } from 'container/PanelWrapper/panelWrapper.types';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useResizeObserver } from 'hooks/useDimensions';
import { LegendPosition } from 'lib/uPlotV2/components/types';
import { ContextMenu } from 'periscope/components/ContextMenu';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { useTimezone } from 'providers/Timezone';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import uPlot from 'uplot';
@@ -26,7 +26,6 @@ function TimeSeriesPanel(props: PanelWrapperProps): JSX.Element {
isFullViewMode,
onToggleModelHandler,
} = props;
const { toScrollWidgetId, setToScrollWidgetId } = useDashboard();
const graphRef = useRef<HTMLDivElement>(null);
const [minTimeScale, setMinTimeScale] = useState<number>();
const [maxTimeScale, setMaxTimeScale] = useState<number>();
@@ -35,16 +34,7 @@ function TimeSeriesPanel(props: PanelWrapperProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const { timezone } = useTimezone();
useEffect(() => {
if (toScrollWidgetId === widget.id) {
graphRef.current?.scrollIntoView({
behavior: 'smooth',
block: 'center',
});
graphRef.current?.focus();
setToScrollWidgetId('');
}
}, [toScrollWidgetId, setToScrollWidgetId, widget.id]);
useScrollWidgetIntoView(widget.id, graphRef);
useEffect((): void => {
const { startTime, endTime } = getTimeRange(queryResponse);

View File

@@ -19,9 +19,9 @@ export interface BaseConfigBuilderProps {
widget: Widgets;
apiResponse: MetricRangePayloadProps;
isDarkMode: boolean;
onClick: OnClickPluginOpts['onClick'];
onDragSelect: (startTime: number, endTime: number) => void;
timezone: Timezone;
onClick?: OnClickPluginOpts['onClick'];
onDragSelect?: (startTime: number, endTime: number) => void;
timezone?: Timezone;
panelMode: PanelMode;
panelType: PANEL_TYPES;
minTimeScale?: number;
@@ -40,8 +40,10 @@ export function buildBaseConfig({
minTimeScale,
maxTimeScale,
}: BaseConfigBuilderProps): UPlotConfigBuilder {
const tzDate = (timestamp: number): Date =>
uPlot.tzDate(new Date(timestamp * 1e3), timezone.value);
const tzDate = timezone
? (timestamp: number): Date =>
uPlot.tzDate(new Date(timestamp * 1e3), timezone.value)
: undefined;
const builder = new UPlotConfigBuilder({
onDragSelect,

View File

@@ -0,0 +1,225 @@
import {
NULL_EXPAND,
NULL_REMOVE,
NULL_RETAIN,
} from 'container/PanelWrapper/constants';
import { AlignedData } from 'uplot';
/**
* Expands contiguous runs of `null` values to the left and right of their
* original positions so that visual gaps in the series are continuous.
*
* This is used when `NULL_EXPAND` mode is selected while joining series.
*/
function propagateNullsAcrossNeighbors(
seriesValues: Array<number | null>,
nullIndices: number[],
alignedLength: number,
): void {
for (
let i = 0, currentIndex, lastExpandedNullIndex = -1;
i < nullIndices.length;
i++
) {
const nullIndex = nullIndices[i];
if (nullIndex > lastExpandedNullIndex) {
// expand left until we hit a non-null value
currentIndex = nullIndex - 1;
while (currentIndex >= 0 && seriesValues[currentIndex] == null) {
seriesValues[currentIndex--] = null;
}
// expand right until we hit a non-null value
currentIndex = nullIndex + 1;
while (currentIndex < alignedLength && seriesValues[currentIndex] == null) {
seriesValues[(lastExpandedNullIndex = currentIndex++)] = null;
}
}
}
}
/**
* Merges multiple uPlot `AlignedData` tables into a single aligned table.
*
* - Merges and sorts all distinct x-values from each table.
* - Re-aligns every series onto the merged x-axis.
* - Applies per-series null handling (`NULL_REMOVE`, `NULL_RETAIN`, `NULL_EXPAND`).
*/
/* eslint-disable sonarjs/cognitive-complexity */
export function mergeAlignedDataTables(
alignedTables: AlignedData[],
nullModes?: number[][],
): AlignedData {
let mergedXValues: Set<number>;
// eslint-disable-next-line prefer-const
mergedXValues = new Set();
// Collect all unique x-values from every table.
for (let tableIndex = 0; tableIndex < alignedTables.length; tableIndex++) {
const table = alignedTables[tableIndex];
const xValues = table[0];
const xLength = xValues.length;
for (let i = 0; i < xLength; i++) {
mergedXValues.add(xValues[i]);
}
}
// Sorted, merged x-axis used by the final result.
const alignedData: (number | null | undefined)[][] = [
Array.from(mergedXValues).sort((a, b) => a - b),
];
const alignedLength = alignedData[0].length;
// Map from x-value to its index in the merged x-axis.
const xValueToIndexMap = new Map<number, number>();
for (let i = 0; i < alignedLength; i++) {
xValueToIndexMap.set(alignedData[0][i] as number, i);
}
// Re-align all series from all tables onto the merged x-axis.
for (let tableIndex = 0; tableIndex < alignedTables.length; tableIndex++) {
const table = alignedTables[tableIndex];
const xValues = table[0];
for (let seriesIndex = 1; seriesIndex < table.length; seriesIndex++) {
const seriesValues = table[seriesIndex];
const alignedSeriesValues = Array(alignedLength).fill(undefined);
const nullHandlingMode = nullModes
? nullModes[tableIndex][seriesIndex]
: NULL_RETAIN;
const nullIndices: number[] = [];
for (let i = 0; i < seriesValues.length; i++) {
const valueAtPoint = seriesValues[i];
const alignedIndex = xValueToIndexMap.get(xValues[i]);
if (alignedIndex == null) {
continue;
}
if (valueAtPoint === null) {
if (nullHandlingMode !== NULL_REMOVE) {
alignedSeriesValues[alignedIndex] = valueAtPoint;
if (nullHandlingMode === NULL_EXPAND) {
nullIndices.push(alignedIndex);
}
}
} else {
alignedSeriesValues[alignedIndex] = valueAtPoint;
}
}
// Optionally expand nulls to visually preserve gaps.
propagateNullsAcrossNeighbors(
alignedSeriesValues,
nullIndices,
alignedLength,
);
alignedData.push(alignedSeriesValues);
}
}
return alignedData as AlignedData;
}
/**
* Builds histogram buckets from raw values.
*
* - Each value is mapped into a bucket via `getBucketForValue`.
* - Counts how many values fall into each bucket.
* - Optionally sorts buckets using the provided comparator.
*/
export function buildHistogramBuckets(
values: number[],
getBucketForValue: (value: number) => number,
sortBuckets?: ((a: number, b: number) => number) | null,
): AlignedData {
const bucketMap = new Map<number, { value: number; count: number }>();
for (let i = 0; i < values.length; i++) {
let value = values[i];
if (value != null) {
value = getBucketForValue(value);
}
const bucket = bucketMap.get(value);
if (bucket) {
bucket.count++;
} else {
bucketMap.set(value, { value, count: 1 });
}
}
const buckets = [...bucketMap.values()];
// eslint-disable-next-line @typescript-eslint/no-unused-expressions
sortBuckets && buckets.sort((a, b) => sortBuckets(a.value, b.value));
const bucketValues = Array(buckets.length);
const bucketCounts = Array(buckets.length);
for (let i = 0; i < buckets.length; i++) {
bucketValues[i] = buckets[i].value;
bucketCounts[i] = buckets[i].count;
}
return [bucketValues, bucketCounts];
}
/**
* Mutates an `AlignedData` instance, replacing all `undefined` entries
* with explicit `null` values so uPlot treats them as gaps.
*/
export function replaceUndefinedWithNullInAlignedData(
data: AlignedData,
): AlignedData {
const seriesList = data as (number | null | undefined)[][];
for (let seriesIndex = 0; seriesIndex < seriesList.length; seriesIndex++) {
for (
let pointIndex = 0;
pointIndex < seriesList[seriesIndex].length;
pointIndex++
) {
if (seriesList[seriesIndex][pointIndex] === undefined) {
seriesList[seriesIndex][pointIndex] = null;
}
}
}
return data;
}
/**
* Ensures the first histogram series has a leading "empty" bin so that
* all series line up visually when rendered as bars.
*
* - Prepends a new x-value (first x - `bucketSize`) to the first series.
* - Prepends `null` to all subsequent series at the same index.
*/
export function prependNullBinToFirstHistogramSeries(
alignedData: AlignedData,
bucketSize: number,
): void {
const seriesList = alignedData as (number | null)[][];
if (
seriesList.length > 0 &&
seriesList[0].length > 0 &&
seriesList[0][0] !== null
) {
seriesList[0].unshift(seriesList[0][0] - bucketSize);
for (let seriesIndex = 1; seriesIndex < seriesList.length; seriesIndex++) {
seriesList[seriesIndex].unshift(null);
}
}
}

View File

@@ -1,8 +1,8 @@
import { PANEL_TYPES } from 'constants/queryBuilder';
import BarPanel from 'container/DashboardContainer/visualization/panels/BarPanel/BarPanel';
import HistogramPanel from 'container/DashboardContainer/visualization/panels/HistogramPanel/HistogramPanel';
import TimeSeriesPanel from '../DashboardContainer/visualization/panels/TimeSeriesPanel/TimeSeriesPanel';
import HistogramPanelWrapper from './HistogramPanelWrapper';
import ListPanelWrapper from './ListPanelWrapper';
import PiePanelWrapper from './PiePanelWrapper';
import TablePanelWrapper from './TablePanelWrapper';
@@ -17,7 +17,7 @@ export const PanelTypeVsPanelWrapper = {
[PANEL_TYPES.EMPTY_WIDGET]: null,
[PANEL_TYPES.PIE]: PiePanelWrapper,
[PANEL_TYPES.BAR]: BarPanel,
[PANEL_TYPES.HISTOGRAM]: HistogramPanelWrapper,
[PANEL_TYPES.HISTOGRAM]: HistogramPanel,
};
export const DEFAULT_BUCKET_COUNT = 30;

View File

@@ -0,0 +1,8 @@
import { HistogramTooltipProps } from '../types';
import Tooltip from './Tooltip';
export default function HistogramTooltip(
props: HistogramTooltipProps,
): JSX.Element {
return <Tooltip {...props} showTooltipHeader={false} />;
}

View File

@@ -16,12 +16,16 @@ export default function Tooltip({
uPlotInstance,
timezone,
content,
showTooltipHeader = true,
}: TooltipProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const tooltipContent = content ?? [];
const headerTitle = useMemo(() => {
if (!showTooltipHeader) {
return null;
}
const data = uPlotInstance.data;
const cursorIdx = uPlotInstance.cursor.idx;
if (cursorIdx == null) {
@@ -30,7 +34,12 @@ export default function Tooltip({
return dayjs(data[0][cursorIdx] * 1000)
.tz(timezone)
.format(DATE_TIME_FORMATS.MONTH_DATETIME_SECONDS);
}, [timezone, uPlotInstance.data, uPlotInstance.cursor.idx]);
}, [
timezone,
uPlotInstance.data,
uPlotInstance.cursor.idx,
showTooltipHeader,
]);
return (
<div
@@ -39,9 +48,11 @@ export default function Tooltip({
isDarkMode ? 'darkMode' : 'lightMode',
)}
>
<div className="uplot-tooltip-header">
<span>{headerTitle}</span>
</div>
{showTooltipHeader && (
<div className="uplot-tooltip-header">
<span>{headerTitle}</span>
</div>
)}
<div
style={{
height: Math.min(

View File

@@ -60,6 +60,7 @@ export interface TooltipRenderArgs {
}
export interface BaseTooltipProps {
showTooltipHeader?: boolean;
timezone: string;
yAxisUnit?: string;
decimalPrecision?: PrecisionOption;
@@ -74,7 +75,14 @@ export interface BarTooltipProps extends BaseTooltipProps, TooltipRenderArgs {
isStackedBarChart?: boolean;
}
export type TooltipProps = TimeSeriesTooltipProps | BarTooltipProps;
export interface HistogramTooltipProps
extends BaseTooltipProps,
TooltipRenderArgs {}
export type TooltipProps =
| TimeSeriesTooltipProps
| BarTooltipProps
| HistogramTooltipProps;
export enum LegendPosition {
BOTTOM = 'bottom',

View File

@@ -387,7 +387,7 @@ export class UPlotConfigBuilder extends ConfigBuilder<
} = this.getVisibilityResolutionState();
config.series = [
{ value: (): string => '' }, // Base series for timestamp
{ value: (): string => '', label: 'Timestamp' }, // Base series for timestamp
...this.series.map((s) => {
const series = s.getConfig();
// Stored visibility[0] is x-axis/time; data series start at visibility[1]

View File

@@ -49,7 +49,7 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
}: {
resolvedLineColor: string;
}): Partial<Series> {
const { lineWidth, lineStyle, lineCap } = this.props;
const { lineWidth, lineStyle, lineCap, fillColor } = this.props;
const lineConfig: Partial<Series> = {
stroke: resolvedLineColor,
width: lineWidth ?? 2,
@@ -63,8 +63,12 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
lineConfig.cap = lineCap;
}
if (this.props.panelType === PANEL_TYPES.BAR) {
if (fillColor) {
lineConfig.fill = fillColor;
} else if (this.props.panelType === PANEL_TYPES.BAR) {
lineConfig.fill = resolvedLineColor;
} else if (this.props.panelType === PANEL_TYPES.HISTOGRAM) {
lineConfig.fill = `${resolvedLineColor}40`;
}
return lineConfig;
@@ -147,6 +151,8 @@ export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
pointsConfig.show = false;
} else if (showPoints === VisibilityMode.Always) {
pointsConfig.show = true;
} else {
pointsConfig.show = false; // default to hidden
}
return pointsConfig;

View File

@@ -175,6 +175,7 @@ export interface SeriesProps extends LineConfig, PointsConfig, BarConfig {
pointsBuilder?: Series.Points.Show;
show?: boolean;
spanGaps?: boolean;
fillColor?: string;
isDarkMode?: boolean;
stepInterval?: number;
}

View File

@@ -11,22 +11,6 @@ import { Threshold } from '../hooks/types';
import { findMinMaxThresholdValues } from './threshold';
import { LogScaleLimits, RangeFunctionParams } from './types';
/**
* Rounds a number down to the nearest multiple of incr.
* Used for linear scale min so the axis starts on a clean tick.
*/
export function incrRoundDn(num: number, incr: number): number {
return Math.floor(num / incr) * incr;
}
/**
* Rounds a number up to the nearest multiple of incr.
* Used for linear scale max so the axis ends on a clean tick.
*/
export function incrRoundUp(num: number, incr: number): number {
return Math.ceil(num / incr) * incr;
}
/**
* Snaps min/max/softMin/softMax to valid log-scale values (powers of logBase).
* Only applies when distribution is logarithmic; otherwise returns limits unchanged.
@@ -213,25 +197,6 @@ function getLogScaleRange(
);
}
/**
* Snaps linear scale min down and max up to whole numbers so axis bounds are clean.
*/
function roundLinearRange(minMax: Range.MinMax): Range.MinMax {
const [currentMin, currentMax] = minMax;
let roundedMin = currentMin;
let roundedMax = currentMax;
if (roundedMin != null) {
roundedMin = incrRoundDn(roundedMin, 1);
}
if (roundedMax != null) {
roundedMax = incrRoundUp(roundedMax, 1);
}
return [roundedMin, roundedMax];
}
/**
* Snaps log-scale [min, max] to exact powers of logBase (nearest magnitude below/above).
* If min and max would be equal after snapping, max is increased by one magnitude so the range is valid.
@@ -330,7 +295,6 @@ export function createRangeFunction(
if (scale.distr === 1) {
minMax = getLinearScaleRange(minMax, params, dataMin, dataMax);
minMax = roundLinearRange(minMax);
} else if (scale.distr === 3) {
minMax = getLogScaleRange(minMax, params, dataMin, dataMax, logBase);
const logFn = scale.log === 2 ? Math.log2 : Math.log10;

View File

@@ -3,3 +3,33 @@
* Example: 1.5 → 2, 1.49 → 1
*/
export const roundHalfUp = (value: number): number => Math.floor(value + 0.5);
/**
* Rounds a number down to the nearest multiple of incr.
* Used for linear scale min so the axis starts on a clean tick.
*/
export function incrRoundDn(num: number, incr: number): number {
return Math.floor(num / incr) * incr;
}
/**
* Rounds a number up to the nearest multiple of incr.
* Used for linear scale max so the axis ends on a clean tick.
*/
export function incrRoundUp(num: number, incr: number): number {
return Math.ceil(num / incr) * incr;
}
/**
* Rounds a number to the nearest multiple of 10^dec.
* Used for decimal precision.
*/
export function roundDecimals(val: number, dec = 0): number {
if (Number.isInteger(val)) {
return val;
}
const p = 10 ** dec;
const n = val * p * (1 + Number.EPSILON);
return Math.round(n) / p;
}

287
go.mod
View File

@@ -3,23 +3,22 @@ module github.com/SigNoz/signoz
go 1.24.0
require (
dario.cat/mergo v1.0.1
dario.cat/mergo v1.0.2
github.com/AfterShip/clickhouse-sql-parser v0.4.16
github.com/ClickHouse/clickhouse-go/v2 v2.40.1
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
github.com/SigNoz/signoz-otel-collector v0.129.10-rc.9
github.com/SigNoz/signoz-otel-collector v0.142.1-rc.1
github.com/antlr4-go/antlr/v4 v4.13.1
github.com/antonmedv/expr v1.15.3
github.com/bytedance/sonic v1.14.1
github.com/cespare/xxhash/v2 v2.3.0
github.com/coreos/go-oidc/v3 v3.14.1
github.com/coreos/go-oidc/v3 v3.16.0
github.com/dgraph-io/ristretto/v2 v2.3.0
github.com/dustin/go-humanize v1.0.1
github.com/gin-gonic/gin v1.11.0
github.com/go-co-op/gocron v1.30.1
github.com/go-openapi/runtime v0.28.0
github.com/go-openapi/strfmt v0.23.0
github.com/go-openapi/strfmt v0.24.0
github.com/go-redis/redismock/v9 v9.2.0
github.com/go-viper/mapstructure/v2 v2.4.0
github.com/gojek/heimdall/v7 v7.0.3
@@ -30,22 +29,22 @@ require (
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
github.com/huandu/go-sqlbuilder v1.35.0
github.com/jackc/pgx/v5 v5.7.6
github.com/json-iterator/go v1.1.12
github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12
github.com/knadh/koanf v1.5.0
github.com/knadh/koanf/v2 v2.2.0
github.com/mailru/easyjson v0.7.7
github.com/open-telemetry/opamp-go v0.19.0
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.128.0
github.com/knadh/koanf/v2 v2.3.0
github.com/mailru/easyjson v0.9.0
github.com/open-telemetry/opamp-go v0.22.0
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.142.0
github.com/openfga/api/proto v0.0.0-20250909172242-b4b2a12f5c67
github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20250428093642-7aeebe78bbfe
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/prometheus/alertmanager v0.28.1
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.66.1
github.com/prometheus/prometheus v0.304.1
github.com/prometheus/common v0.67.4
github.com/prometheus/prometheus v0.308.0
github.com/redis/go-redis/extra/redisotel/v9 v9.15.1
github.com/redis/go-redis/v9 v9.15.1
github.com/redis/go-redis/v9 v9.17.2
github.com/rs/cors v1.11.1
github.com/russellhaering/gosaml2 v0.9.0
github.com/russellhaering/goxmldsig v1.2.0
@@ -54,7 +53,7 @@ require (
github.com/sethvargo/go-password v0.2.0
github.com/smartystreets/goconvey v1.8.1
github.com/soheilhy/cmux v0.1.5
github.com/spf13/cobra v1.10.1
github.com/spf13/cobra v1.10.2
github.com/srikanthccv/ClickHouse-go-mock v0.13.0
github.com/stretchr/testify v1.11.1
github.com/swaggest/jsonschema-go v0.3.78
@@ -64,43 +63,59 @@ require (
github.com/uptrace/bun/dialect/pgdialect v1.2.9
github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
github.com/uptrace/bun/extra/bunotel v1.2.9
go.opentelemetry.io/collector/confmap v1.34.0
go.opentelemetry.io/collector/otelcol v0.128.0
go.opentelemetry.io/collector/pdata v1.34.0
go.opentelemetry.io/collector/confmap v1.48.0
go.opentelemetry.io/collector/otelcol v0.142.0
go.opentelemetry.io/collector/pdata v1.48.0
go.opentelemetry.io/contrib/config v0.10.0
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.63.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel/metric v1.38.0
go.opentelemetry.io/otel/sdk v1.38.0
go.opentelemetry.io/otel/trace v1.38.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
go.opentelemetry.io/otel v1.39.0
go.opentelemetry.io/otel/metric v1.39.0
go.opentelemetry.io/otel/sdk v1.39.0
go.opentelemetry.io/otel/trace v1.39.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
go.uber.org/zap v1.27.1
golang.org/x/crypto v0.46.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/net v0.47.0
golang.org/x/oauth2 v0.30.0
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
golang.org/x/net v0.48.0
golang.org/x/oauth2 v0.33.0
golang.org/x/sync v0.19.0
golang.org/x/text v0.32.0
google.golang.org/protobuf v1.36.9
google.golang.org/protobuf v1.36.10
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/apimachinery v0.34.0
k8s.io/apimachinery v0.35.0-alpha.0
modernc.org/sqlite v1.39.1
)
require (
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
github.com/aws/aws-sdk-go-v2/config v1.32.1 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.1 // indirect
github.com/aws/smithy-go v1.23.2 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.14.1 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/goccy/go-yaml v1.19.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.15.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/swaggest/refl v1.4.0 // indirect
@@ -108,24 +123,27 @@ require (
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 // indirect
go.opentelemetry.io/collector/config/configretry v1.34.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.opentelemetry.io/collector/client v1.48.0 // indirect
go.opentelemetry.io/collector/config/configoptional v1.48.0 // indirect
go.opentelemetry.io/collector/config/configretry v1.48.0 // indirect
go.opentelemetry.io/collector/exporter/exporterhelper v0.142.0 // indirect
go.opentelemetry.io/collector/pdata/xpdata v0.142.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/arch v0.20.0 // indirect
golang.org/x/tools/godoc v0.1.0-deprecated // indirect
modernc.org/libc v1.66.10 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect
)
require (
cel.dev/expr v0.24.0 // indirect
cloud.google.com/go/auth v0.16.1 // indirect
cel.dev/expr v0.25.1 // indirect
cloud.google.com/go/auth v0.17.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.8.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
github.com/ClickHouse/ch-go v0.67.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Yiling-J/theine-go v0.6.2 // indirect
@@ -133,35 +151,35 @@ require (
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.55.7 // indirect
github.com/aws/aws-sdk-go v1.55.8 // indirect
github.com/beevik/etree v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/coder/quartz v0.1.2 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/coreos/go-systemd/v22 v22.6.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/ebitengine/purego v0.9.1 // indirect
github.com/edsrzf/mmap-go v1.2.0 // indirect
github.com/elastic/lunes v0.1.0 // indirect
github.com/elastic/lunes v0.2.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/expr-lang/expr v1.17.5
github.com/expr-lang/expr v1.17.7
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-faster/city v1.0.1 // indirect
github.com/go-faster/errors v0.7.1 // indirect
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
github.com/go-openapi/errors v0.22.3 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/loads v0.22.0 // indirect
@@ -178,19 +196,19 @@ require (
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.1 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/go-version v1.8.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/memberlist v0.5.1 // indirect
@@ -206,21 +224,21 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/compress v1.18.2 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/leodido/go-syslog/v4 v4.2.0 // indirect
github.com/leodido/go-syslog/v4 v4.3.0 // indirect
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
github.com/magefile/mage v1.15.0 // indirect
github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/socket v0.4.1 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect
github.com/miekg/dns v1.1.65 // indirect
github.com/miekg/dns v1.1.68 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
@@ -229,14 +247,14 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/natefinch/wrap v0.2.0 // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/oklog/run v1.2.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/oklog/ulid/v2 v2.1.1
github.com/open-feature/go-sdk v1.17.0
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.128.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.142.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0 // indirect
github.com/openfga/openfga v1.10.1
github.com/paulmach/orb v0.11.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
@@ -246,10 +264,10 @@ require (
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/pressly/goose/v3 v3.25.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/exporter-toolkit v0.14.0 // indirect
github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/prometheus/sigv4 v0.1.2 // indirect
github.com/prometheus/exporter-toolkit v0.15.0 // indirect
github.com/prometheus/otlptranslator v1.0.0 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/prometheus/sigv4 v0.3.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sagikazarmark/locafero v0.9.0 // indirect
@@ -257,7 +275,7 @@ require (
github.com/segmentio/asm v1.2.0 // indirect
github.com/segmentio/backo-go v1.0.1 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
github.com/shirou/gopsutil/v4 v4.25.11 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
@@ -272,9 +290,9 @@ require (
github.com/subosito/gotenv v1.6.0 // indirect
github.com/swaggest/openapi-go v0.2.60
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.15 // indirect
github.com/tklauser/numcpus v0.10.0 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.16 // indirect
github.com/tklauser/numcpus v0.11.0 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/trivago/tgo v1.0.7 // indirect
github.com/valyala/fastjson v1.6.4 // indirect
@@ -283,83 +301,82 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.mongodb.org/mongo-driver v1.17.1 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/component v1.34.0 // indirect
go.opentelemetry.io/collector/component/componentstatus v0.128.0 // indirect
go.opentelemetry.io/collector/component/componenttest v0.128.0 // indirect
go.opentelemetry.io/collector/config/configtelemetry v0.128.0 // indirect
go.opentelemetry.io/collector/confmap/provider/envprovider v1.34.0 // indirect
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.34.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect
go.opentelemetry.io/collector/connector v0.128.0 // indirect
go.opentelemetry.io/collector/connector/connectortest v0.128.0 // indirect
go.opentelemetry.io/collector/connector/xconnector v0.128.0 // indirect
go.opentelemetry.io/collector/consumer v1.34.0 // indirect
go.opentelemetry.io/collector/consumer/consumererror v0.128.0 // indirect
go.opentelemetry.io/collector/consumer/consumertest v0.128.0 // indirect
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 // indirect
go.opentelemetry.io/collector/exporter v0.128.0 // indirect
go.opentelemetry.io/collector/exporter/exportertest v0.128.0 // indirect
go.opentelemetry.io/collector/exporter/xexporter v0.128.0 // indirect
go.opentelemetry.io/collector/extension v1.34.0 // indirect
go.opentelemetry.io/collector/extension/extensioncapabilities v0.128.0 // indirect
go.opentelemetry.io/collector/extension/extensiontest v0.128.0 // indirect
go.opentelemetry.io/collector/extension/xextension v0.128.0 // indirect
go.opentelemetry.io/collector/featuregate v1.34.0 // indirect
go.opentelemetry.io/collector/internal/fanoutconsumer v0.128.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect
go.opentelemetry.io/collector/pdata/pprofile v0.128.0 // indirect
go.opentelemetry.io/collector/pdata/testdata v0.128.0 // indirect
go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
go.opentelemetry.io/collector/pipeline/xpipeline v0.128.0 // indirect
go.opentelemetry.io/collector/processor v1.34.0 // indirect
go.opentelemetry.io/collector/processor/processorhelper v0.128.0 // indirect
go.opentelemetry.io/collector/processor/processortest v0.128.0 // indirect
go.opentelemetry.io/collector/processor/xprocessor v0.128.0 // indirect
go.opentelemetry.io/collector/receiver v1.34.0 // indirect
go.opentelemetry.io/collector/receiver/receiverhelper v0.128.0 // indirect
go.opentelemetry.io/collector/receiver/receivertest v0.128.0 // indirect
go.opentelemetry.io/collector/receiver/xreceiver v0.128.0 // indirect
go.opentelemetry.io/collector/semconv v0.128.0
go.opentelemetry.io/collector/service v0.128.0 // indirect
go.opentelemetry.io/collector/service/hostcapabilities v0.128.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
go.opentelemetry.io/contrib/otelconf v0.16.0 // indirect
go.opentelemetry.io/contrib/propagators/b3 v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.58.0
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
go.opentelemetry.io/otel/log v0.12.2 // indirect
go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect
go.opentelemetry.io/otel/sdk/metric v1.38.0
go.opentelemetry.io/proto/otlp v1.8.0 // indirect
go.mongodb.org/mongo-driver v1.17.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/collector/component v1.48.0 // indirect
go.opentelemetry.io/collector/component/componentstatus v0.142.0 // indirect
go.opentelemetry.io/collector/component/componenttest v0.142.0 // indirect
go.opentelemetry.io/collector/config/configtelemetry v0.142.0 // indirect
go.opentelemetry.io/collector/confmap/provider/envprovider v1.48.0 // indirect
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.48.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.142.0 // indirect
go.opentelemetry.io/collector/connector v0.142.0 // indirect
go.opentelemetry.io/collector/connector/connectortest v0.142.0 // indirect
go.opentelemetry.io/collector/connector/xconnector v0.142.0 // indirect
go.opentelemetry.io/collector/consumer v1.48.0 // indirect
go.opentelemetry.io/collector/consumer/consumererror v0.142.0 // indirect
go.opentelemetry.io/collector/consumer/consumertest v0.142.0 // indirect
go.opentelemetry.io/collector/consumer/xconsumer v0.142.0 // indirect
go.opentelemetry.io/collector/exporter v1.48.0 // indirect
go.opentelemetry.io/collector/exporter/exportertest v0.142.0 // indirect
go.opentelemetry.io/collector/exporter/xexporter v0.142.0 // indirect
go.opentelemetry.io/collector/extension v1.48.0 // indirect
go.opentelemetry.io/collector/extension/extensioncapabilities v0.142.0 // indirect
go.opentelemetry.io/collector/extension/extensiontest v0.142.0 // indirect
go.opentelemetry.io/collector/extension/xextension v0.142.0 // indirect
go.opentelemetry.io/collector/featuregate v1.48.0 // indirect
go.opentelemetry.io/collector/internal/fanoutconsumer v0.142.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.142.0 // indirect
go.opentelemetry.io/collector/pdata/pprofile v0.142.0 // indirect
go.opentelemetry.io/collector/pdata/testdata v0.142.0 // indirect
go.opentelemetry.io/collector/pipeline v1.48.0 // indirect
go.opentelemetry.io/collector/pipeline/xpipeline v0.142.0 // indirect
go.opentelemetry.io/collector/processor v1.48.0 // indirect
go.opentelemetry.io/collector/processor/processorhelper v0.142.0 // indirect
go.opentelemetry.io/collector/processor/processortest v0.142.0 // indirect
go.opentelemetry.io/collector/processor/xprocessor v0.142.0 // indirect
go.opentelemetry.io/collector/receiver v1.48.0 // indirect
go.opentelemetry.io/collector/receiver/receiverhelper v0.142.0 // indirect
go.opentelemetry.io/collector/receiver/receivertest v0.142.0 // indirect
go.opentelemetry.io/collector/receiver/xreceiver v0.142.0 // indirect
go.opentelemetry.io/collector/semconv v0.128.1-0.20250610090210-188191247685
go.opentelemetry.io/collector/service v0.142.0 // indirect
go.opentelemetry.io/collector/service/hostcapabilities v0.142.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect
go.opentelemetry.io/contrib/otelconf v0.18.0 // indirect
go.opentelemetry.io/contrib/propagators/b3 v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.60.0
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 // indirect
go.opentelemetry.io/otel/log v0.15.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.39.0
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.6.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.39.0 // indirect
gonum.org/v1/gonum v0.16.0 // indirect
google.golang.org/api v0.236.0
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/grpc v1.75.1 // indirect
google.golang.org/api v0.257.0
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/grpc v1.77.0 // indirect
gopkg.in/telebot.v3 v3.3.8 // indirect
k8s.io/client-go v0.34.0 // indirect
k8s.io/client-go v0.34.2 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
replace github.com/expr-lang/expr => github.com/SigNoz/expr v1.17.7-beta

731
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -3,9 +3,8 @@ package flagger
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
var (
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
FeatureInterpolationEnabled = featuretypes.MustNewName("interpolation_enabled")
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
)
func MustNewRegistry() featuretypes.Registry {
@@ -18,14 +17,6 @@ func MustNewRegistry() featuretypes.Registry {
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureInterpolationEnabled,
Kind: featuretypes.KindBoolean,
Stage: featuretypes.StageExperimental,
Description: "Controls whether to enable interpolation",
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureKafkaSpanEval,
Kind: featuretypes.KindBoolean,

View File

@@ -30,3 +30,7 @@ func (module *getter) ListByOwnedKeyRange(ctx context.Context) ([]*types.Organiz
return module.store.ListByKeyRange(ctx, start, end)
}
func (module *getter) GetByName(ctx context.Context, name string) (*types.Organization, error) {
return module.store.GetByName(ctx, name)
}

View File

@@ -47,6 +47,22 @@ func (store *store) Get(ctx context.Context, id valuer.UUID) (*types.Organizatio
return organization, nil
}
func (store *store) GetByName(ctx context.Context, name string) (*types.Organization, error) {
organization := new(types.Organization)
err := store.
sqlstore.
BunDB().
NewSelect().
Model(organization).
Where("name = ?", name).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrOrganizationNotFound, "organization with name %s does not exist", name)
}
return organization, nil
}
func (store *store) GetAll(ctx context.Context) ([]*types.Organization, error) {
organizations := make([]*types.Organization, 0)
err := store.

View File

@@ -14,6 +14,9 @@ type Getter interface {
// ListByOwnedKeyRange gets all the organizations owned by the instance
ListByOwnedKeyRange(context.Context) ([]*types.Organization, error)
// Gets the organization by name
GetByName(context.Context, string) (*types.Organization, error)
}
type Setter interface {

View File

@@ -76,7 +76,7 @@ func (m *module) ListPromotedAndIndexedPaths(ctx context.Context) ([]promotetype
// add the paths that are not promoted but have indexes
for path, indexes := range aggr {
path := strings.TrimPrefix(path, telemetrylogs.BodyJSONColumnPrefix)
path := strings.TrimPrefix(path, telemetrylogs.BodyV2ColumnPrefix)
path = telemetrytypes.BodyJSONStringSearchPrefix + path
response = append(response, promotetypes.PromotePath{
Path: path,
@@ -156,7 +156,7 @@ func (m *module) PromoteAndIndexPaths(
}
}
if len(it.Indexes) > 0 {
parentColumn := telemetrylogs.LogsV2BodyJSONColumn
parentColumn := telemetrylogs.LogsV2BodyV2Column
// if the path is already promoted or is being promoted, add it to the promoted column
if _, promoted := existingPromotedPaths[it.Path]; promoted || it.Promote {
parentColumn = telemetrylogs.LogsV2BodyPromotedColumn

View File

@@ -151,6 +151,10 @@ func (module *module) CreateCallbackAuthNSession(ctx context.Context, authNProvi
return "", err
}
if err := user.ErrIfRoot(); err != nil {
return "", errors.WithAdditionalf(err, "root user can only authenticate via password")
}
token, err := module.tokenizer.CreateToken(ctx, authtypes.NewIdentity(user.ID, user.OrgID, user.Email, user.Role), map[string]string{})
if err != nil {
return "", err

View File

@@ -5,11 +5,26 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Config struct {
Password PasswordConfig `mapstructure:"password"`
Root RootConfig `mapstructure:"root"`
}
type RootConfig struct {
Enabled bool `mapstructure:"enabled"`
Email valuer.Email `mapstructure:"email"`
Password string `mapstructure:"password"`
Org OrgConfig `mapstructure:"org"`
}
type OrgConfig struct {
Name string `mapstructure:"name"`
}
type PasswordConfig struct {
Reset ResetConfig `mapstructure:"reset"`
}
@@ -31,6 +46,12 @@ func newConfig() factory.Config {
MaxTokenLifetime: 6 * time.Hour,
},
},
Root: RootConfig{
Enabled: false,
Org: OrgConfig{
Name: "default",
},
},
}
}
@@ -39,5 +60,17 @@ func (c Config) Validate() error {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::reset::max_token_lifetime must be positive")
}
if c.Root.Enabled {
if c.Root.Email.IsZero() {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::email is required when root user is enabled")
}
if c.Root.Password == "" {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::password is required when root user is enabled")
}
if !types.IsPasswordValid(c.Root.Password) {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::password does not meet password requirements")
}
}
return nil
}

View File

@@ -16,6 +16,10 @@ func NewGetter(store types.UserStore) user.Getter {
return &getter{store: store}
}
func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
return module.store.GetRootUserByOrgID(ctx, orgID)
}
func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.User, error) {
users, err := module.store.ListUsersByOrgID(ctx, orgID)
if err != nil {

View File

@@ -103,6 +103,12 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
return nil, err
}
if existingUser != nil {
if err := existingUser.ErrIfRoot(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot send invite to root user")
}
}
if existingUser != nil {
return nil, errors.New(errors.TypeAlreadyExists, errors.CodeAlreadyExists, "User already exists with the same email")
}
@@ -202,27 +208,21 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
return nil, err
}
if err := existingUser.ErrIfRoot(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot update root user")
}
requestor, err := m.store.GetUser(ctx, valuer.MustNewUUID(updatedBy))
if err != nil {
return nil, err
}
// only displayName, role can be updated
if user.DisplayName == "" {
user.DisplayName = existingUser.DisplayName
}
if user.Role == "" {
user.Role = existingUser.Role
}
if user.Role != existingUser.Role && requestor.Role != types.RoleAdmin {
if user.Role != "" && user.Role != existingUser.Role && requestor.Role != types.RoleAdmin {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "only admins can change roles")
}
// Make sure that th e request is not demoting the last admin user.
// also an admin user can only change role of their own or other user
if user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin {
// Make sure that the request is not demoting the last admin user.
if user.Role != "" && user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin {
adminUsers, err := m.store.GetUsersByRoleAndOrgID(ctx, types.RoleAdmin, orgID)
if err != nil {
return nil, err
@@ -233,7 +233,7 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
}
}
if user.Role != existingUser.Role {
if user.Role != "" && user.Role != existingUser.Role {
err = m.authz.ModifyGrant(ctx,
orgID,
roletypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role),
@@ -245,23 +245,28 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
}
}
user.UpdatedAt = time.Now()
updatedUser, err := m.store.UpdateUser(ctx, orgID, id, user)
if err != nil {
existingUser.Update(user.DisplayName, user.Role)
if err := m.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
return nil, err
}
traits := types.NewTraitsFromUser(updatedUser)
m.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traits)
return existingUser, nil
}
traits["updated_by"] = updatedBy
m.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Updated", traits)
if err := m.tokenizer.DeleteIdentity(ctx, valuer.MustNewUUID(id)); err != nil {
return nil, err
func (module *Module) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
if err := module.store.UpdateUser(ctx, orgID, user); err != nil {
return err
}
return updatedUser, nil
traits := types.NewTraitsFromUser(user)
module.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traits)
module.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Updated", traits)
if err := module.tokenizer.DeleteIdentity(ctx, user.ID); err != nil {
return err
}
return nil
}
func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error {
@@ -270,6 +275,10 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot delete root user")
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
}
@@ -302,6 +311,15 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
}
func (module *Module) GetOrCreateResetPasswordToken(ctx context.Context, userID valuer.UUID) (*types.ResetPasswordToken, error) {
user, err := module.store.GetUser(ctx, userID)
if err != nil {
return nil, err
}
if err := user.ErrIfRoot(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot reset password for root user")
}
password, err := module.store.GetPasswordByUserID(ctx, userID)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
@@ -364,6 +382,10 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot reset password for root user")
}
token, err := module.GetOrCreateResetPasswordToken(ctx, user.ID)
if err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token", "error", err)
@@ -407,6 +429,15 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
return err
}
user, err := module.store.GetUser(ctx, valuer.MustNewUUID(password.UserID))
if err != nil {
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot reset password for root user")
}
if err := password.Update(passwd); err != nil {
return err
}
@@ -415,6 +446,15 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
}
func (module *Module) UpdatePassword(ctx context.Context, userID valuer.UUID, oldpasswd string, passwd string) error {
user, err := module.store.GetUser(ctx, userID)
if err != nil {
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot change password for root user")
}
password, err := module.store.GetPasswordByUserID(ctx, userID)
if err != nil {
return err
@@ -476,7 +516,7 @@ func (m *Module) RevokeAPIKey(ctx context.Context, id, removedByUserID valuer.UU
}
func (module *Module) CreateFirstUser(ctx context.Context, organization *types.Organization, name string, email valuer.Email, passwd string) (*types.User, error) {
user, err := types.NewUser(name, email, types.RoleAdmin, organization.ID)
user, err := types.NewRootUser(name, email, organization.ID)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,187 @@
package impluser
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type service struct {
settings factory.ScopedProviderSettings
store types.UserStore
module user.Module
orgGetter organization.Getter
authz authz.AuthZ
config user.RootConfig
stopC chan struct{}
}
func NewService(
providerSettings factory.ProviderSettings,
store types.UserStore,
module user.Module,
orgGetter organization.Getter,
authz authz.AuthZ,
config user.RootConfig,
) user.Service {
return &service{
settings: factory.NewScopedProviderSettings(providerSettings, "go.signoz.io/pkg/modules/user"),
store: store,
module: module,
orgGetter: orgGetter,
authz: authz,
config: config,
stopC: make(chan struct{}),
}
}
func (s *service) Start(ctx context.Context) error {
if !s.config.Enabled {
<-s.stopC
return nil
}
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
err := s.reconcile(ctx)
if err == nil {
s.settings.Logger().InfoContext(ctx, "root user reconciliation completed successfully")
<-s.stopC
return nil
}
s.settings.Logger().WarnContext(ctx, "root user reconciliation failed, retrying", "error", err)
select {
case <-s.stopC:
return nil
case <-ticker.C:
continue
}
}
}
func (s *service) Stop(ctx context.Context) error {
close(s.stopC)
return nil
}
func (s *service) reconcile(ctx context.Context) error {
org, err := s.orgGetter.GetByName(ctx, s.config.Org.Name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
newOrg := types.NewOrganization(s.config.Org.Name, s.config.Org.Name)
_, err := s.module.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
return err
}
return err
}
return s.reconcileRootUser(ctx, org.ID)
}
func (s *service) reconcileRootUser(ctx context.Context, orgID valuer.UUID) error {
existingRoot, err := s.store.GetRootUserByOrgID(ctx, orgID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return err
}
if existingRoot == nil {
return s.createOrPromoteRootUser(ctx, orgID)
}
return s.updateExistingRootUser(ctx, orgID, existingRoot)
}
func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID) error {
existingUser, err := s.store.GetUserByEmailAndOrgID(ctx, s.config.Email, orgID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return err
}
if existingUser != nil {
oldRole := existingUser.Role
existingUser.PromoteToRoot()
if err := s.module.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
return err
}
if oldRole != types.RoleAdmin {
if err := s.authz.ModifyGrant(ctx,
orgID,
roletypes.MustGetSigNozManagedRoleFromExistingRole(oldRole),
roletypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin),
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), orgID, nil),
); err != nil {
return err
}
}
return s.setPassword(ctx, existingUser.ID)
}
// Create new root user
newUser, err := types.NewRootUser(s.config.Email.String(), s.config.Email, orgID)
if err != nil {
return err
}
factorPassword, err := types.NewFactorPassword(s.config.Password, newUser.ID.StringValue())
if err != nil {
return err
}
return s.module.CreateUser(ctx, newUser, user.WithFactorPassword(factorPassword))
}
func (s *service) updateExistingRootUser(ctx context.Context, orgID valuer.UUID, existingRoot *types.User) error {
existingRoot.PromoteToRoot()
if existingRoot.Email != s.config.Email {
existingRoot.UpdateEmail(s.config.Email)
if err := s.module.UpdateAnyUser(ctx, orgID, existingRoot); err != nil {
return err
}
}
return s.setPassword(ctx, existingRoot.ID)
}
func (s *service) setPassword(ctx context.Context, userID valuer.UUID) error {
password, err := s.store.GetPasswordByUserID(ctx, userID)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
return err
}
factorPassword, err := types.NewFactorPassword(s.config.Password, userID.StringValue())
if err != nil {
return err
}
return s.store.CreatePassword(ctx, factorPassword)
}
if !password.Equals(s.config.Password) {
if err := password.Update(s.config.Password); err != nil {
return err
}
return s.store.UpdatePassword(ctx, password)
}
return nil
}

View File

@@ -210,20 +210,24 @@ func (store *store) GetUsersByRoleAndOrgID(ctx context.Context, role types.Role,
return users, nil
}
func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.User) (*types.User, error) {
user.UpdatedAt = time.Now()
_, err := store.sqlstore.BunDB().NewUpdate().
func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
_, err := store.
sqlstore.
BunDBCtx(ctx).
NewUpdate().
Model(user).
Column("display_name").
Column("email").
Column("role").
Column("is_root").
Column("updated_at").
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("id = ?", user.ID).
Exec(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "user with id: %s does not exist in org: %s", id, orgID)
return store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "user does not exist in org: %s", orgID)
}
return user, nil
return nil
}
func (store *store) ListUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.GettableUser, error) {
@@ -602,6 +606,22 @@ func (store *store) RunInTx(ctx context.Context, cb func(ctx context.Context) er
})
}
func (store *store) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
user := new(types.User)
err := store.
sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(user).
Where("org_id = ?", orgID).
Where("is_root = ?", true).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "root user for org %s not found", orgID)
}
return user, nil
}
func (store *store) ListUsersByEmailAndOrgIDs(ctx context.Context, email valuer.Email, orgIDs []valuer.UUID) ([]*types.User, error) {
users := []*types.User{}
err := store.

View File

@@ -0,0 +1,7 @@
package user
import "github.com/SigNoz/signoz/pkg/factory"
type Service interface {
factory.Service
}

View File

@@ -34,6 +34,9 @@ type Module interface {
ForgotPassword(ctx context.Context, orgID valuer.UUID, email valuer.Email, frontendBaseURL string) error
UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.User, updatedBy string) (*types.User, error)
// UpdateAnyUser updates a user and persists the changes to the database along with the analytics and identity deletion.
UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error
DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error
// invite
@@ -54,6 +57,9 @@ type Module interface {
}
type Getter interface {
// Get root user by org id.
GetRootUserByOrgID(context.Context, valuer.UUID) (*types.User, error)
// Get gets the users based on the given id
ListByOrgID(context.Context, valuer.UUID) ([]*types.User, error)

View File

@@ -87,6 +87,24 @@ func (client *client) Read(ctx context.Context, query *prompb.Query, sortSeries
return remote.FromQueryResult(sortSeries, res), nil
}
func (c *client) ReadMultiple(ctx context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
if len(queries) == 0 {
return storage.EmptySeriesSet(), nil
}
if len(queries) == 1 {
return c.Read(ctx, queries[0], sortSeries)
}
sets := make([]storage.SeriesSet, 0, len(queries))
for _, q := range queries {
ss, err := c.Read(ctx, q, sortSeries)
if err != nil {
return nil, err
}
sets = append(sets, ss)
}
return storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge), nil
}
func (client *client) queryToClickhouseQuery(_ context.Context, query *prompb.Query, metricName string, subQuery bool) (string, []any, error) {
var clickHouseQuery string
var conditions []string

View File

@@ -2,6 +2,7 @@ package prometheus
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
)
@@ -15,30 +16,24 @@ func RemoveExtraLabels(res *promql.Result, labelsToRemove ...string) error {
toRemove[l] = struct{}{}
}
dropLabels := func(metric labels.Labels) labels.Labels {
b := labels.NewBuilder(metric)
for name := range toRemove {
b.Del(name)
}
return b.Labels()
}
switch res.Value.(type) {
case promql.Vector:
value := res.Value.(promql.Vector)
for i := range value {
series := &(value)[i]
dst := series.Metric[:0]
for _, lbl := range series.Metric {
if _, drop := toRemove[lbl.Name]; !drop {
dst = append(dst, lbl)
}
}
series.Metric = dst
(value)[i].Metric = dropLabels((value)[i].Metric)
}
case promql.Matrix:
value := res.Value.(promql.Matrix)
for i := range value {
series := &(value)[i]
dst := series.Metric[:0]
for _, lbl := range series.Metric {
if _, drop := toRemove[lbl.Name]; !drop {
dst = append(dst, lbl)
}
}
series.Metric = dst
(value)[i].Metric = dropLabels((value)[i].Metric)
}
case promql.Scalar:
return nil

View File

@@ -10,11 +10,9 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/telemetrystore"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/bytedance/sonic"
)
type builderQuery[T any] struct {
@@ -250,40 +248,6 @@ func (q *builderQuery[T]) executeWithContext(ctx context.Context, query string,
return nil, err
}
// merge body_json and promoted into body
if q.spec.Signal == telemetrytypes.SignalLogs {
switch typedPayload := payload.(type) {
case *qbtypes.RawData:
for _, rr := range typedPayload.Rows {
seeder := func() error {
body, ok := rr.Data[telemetrylogs.LogsV2BodyJSONColumn].(map[string]any)
if !ok {
return nil
}
promoted, ok := rr.Data[telemetrylogs.LogsV2BodyPromotedColumn].(map[string]any)
if !ok {
return nil
}
seed(promoted, body)
str, err := sonic.MarshalString(body)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to marshal body")
}
rr.Data["body"] = str
return nil
}
err := seeder()
if err != nil {
return nil, err
}
delete(rr.Data, telemetrylogs.LogsV2BodyJSONColumn)
delete(rr.Data, telemetrylogs.LogsV2BodyPromotedColumn)
}
payload = typedPayload
}
}
return &qbtypes.Result{
Type: q.kind,
Value: payload,
@@ -411,18 +375,3 @@ func decodeCursor(cur string) (int64, error) {
}
return strconv.ParseInt(string(b), 10, 64)
}
func seed(promoted map[string]any, body map[string]any) {
for key, fromValue := range promoted {
if toValue, ok := body[key]; !ok {
body[key] = fromValue
} else {
if fromValue, ok := fromValue.(map[string]any); ok {
if toValue, ok := toValue.(map[string]any); ok {
seed(fromValue, toValue)
body[key] = toValue
}
}
}
}
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/bytedance/sonic"
)
var (
@@ -394,17 +393,11 @@ func readAsRaw(rows driver.Rows, queryName string) (*qbtypes.RawData, error) {
// de-reference the typed pointer to any
val := reflect.ValueOf(cellPtr).Elem().Interface()
// Post-process JSON columns: normalize into structured values
// Post-process JSON columns: normalize into String value
if strings.HasPrefix(strings.ToUpper(colTypes[i].DatabaseTypeName()), "JSON") {
switch x := val.(type) {
case []byte:
if len(x) > 0 {
var v any
if err := sonic.Unmarshal(x, &v); err == nil {
val = v
}
}
val = string(x)
default:
// already a structured type (map[string]any, []any, etc.)
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/SigNoz/signoz/pkg/querybuilder"
qbv5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
)
@@ -240,13 +241,13 @@ func (q *promqlQuery) Execute(ctx context.Context) (*qbv5.Result, error) {
var series []*qbv5.TimeSeries
for _, v := range matrix {
var s qbv5.TimeSeries
lbls := make([]*qbv5.Label, 0, len(v.Metric))
for name, value := range v.Metric.Copy().Map() {
lbls := make([]*qbv5.Label, 0, v.Metric.Len())
v.Metric.Range(func(l labels.Label) {
lbls = append(lbls, &qbv5.Label{
Key: telemetrytypes.TelemetryFieldKey{Name: name},
Value: value,
Key: telemetrytypes.TelemetryFieldKey{Name: l.Name},
Value: l.Value,
})
}
})
s.Labels = lbls

View File

@@ -183,7 +183,7 @@ type APIHandlerOpts struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, error) {
querierOpts := querier.QuerierOptions{
Reader: opts.Reader,
Cache: opts.Signoz.Cache,
@@ -270,6 +270,11 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
}
}
// If the root user is enabled, the setup is complete
if config.User.Root.Enabled {
aH.SetupCompleted = true
}
aH.Upgrader = &websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
@@ -2045,7 +2050,7 @@ func (aH *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
return
}
organization := types.NewOrganization(req.OrgDisplayName)
organization := types.NewOrganization(req.OrgDisplayName, req.OrgName)
user, errv2 := aH.Signoz.Modules.User.CreateFirstUser(r.Context(), organization, req.Name, req.Email, req.Password)
if errv2 != nil {
render.Error(w, errv2)

View File

@@ -135,7 +135,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
})
}, config)
if err != nil {
return nil, err
}

View File

@@ -43,11 +43,11 @@ var (
// FromUnit returns a converter for the given unit
func FromUnit(u Unit) Converter {
switch u {
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min":
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min", "w", "wk":
return DurationConverter
case "bytes", "decbytes", "bits", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy":
case "bytes", "decbytes", "bits", "bit", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy", "EBy", "ZBy", "YBy", "KiBy", "MiBy", "GiBy", "TiBy", "PiBy", "EiBy", "ZiBy", "YiBy", "kbit", "Mbit", "Gbit", "Tbit", "Pbit", "Ebit", "Zbit", "Ybit", "Kibit", "Mibit", "Gibit", "Tibit", "Pibit":
return DataConverter
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s":
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "EBy/s", "ZBy/s", "YBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s", "Ebit/s", "Zbit/s", "Ybit/s", "KiBy/s", "MiBy/s", "GiBy/s", "TiBy/s", "PiBy/s", "EiBy/s", "ZiBy/s", "YiBy/s", "Kibit/s", "Mibit/s", "Gibit/s", "Tibit/s", "Pibit/s", "Eibit/s", "Zibit/s", "Yibit/s":
return DataRateConverter
case "percent", "percentunit", "%":
return PercentConverter

View File

@@ -58,36 +58,80 @@ func (*dataConverter) Name() string {
return "data"
}
// Notation followed by UCUM:
// https://ucum.org/ucum
// kibi = Ki, mebi = Mi, gibi = Gi, tebi = Ti, pibi = Pi
// kilo = k, mega = M, giga = G, tera = T, peta = P
// exa = E, zetta = Z, yotta = Y
// byte = By, bit = bit
func FromDataUnit(u Unit) float64 {
switch u {
case "bytes", "By": // base 2
return Byte
case "decbytes": // base 10
return Byte
case "bits": // base 2
case "bits", "bit": // base 2
return Bit
case "decbits": // base 10
return Bit
case "kbytes", "kBy": // base 2
case "kbytes", "KiBy": // base 2
return Kibibyte
case "decKbytes", "deckbytes": // base 10
case "decKbytes", "deckbytes", "kBy": // base 10
return Kilobyte
case "mbytes", "MBy": // base 2
case "mbytes", "MiBy": // base 2
return Mebibyte
case "decMbytes", "decmbytes": // base 10
case "decMbytes", "decmbytes", "MBy": // base 10
return Megabyte
case "gbytes", "GBy": // base 2
case "gbytes", "GiBy": // base 2
return Gibibyte
case "decGbytes", "decgbytes": // base 10
case "decGbytes", "decgbytes", "GBy": // base 10
return Gigabyte
case "tbytes", "TBy": // base 2
case "tbytes", "TiBy": // base 2
return Tebibyte
case "decTbytes", "dectbytes": // base 10
case "decTbytes", "dectbytes", "TBy": // base 10
return Terabyte
case "pbytes", "PBy": // base 2
case "pbytes", "PiBy": // base 2
return Pebibyte
case "decPbytes", "decpbytes": // base 10
case "decPbytes", "decpbytes", "PBy": // base 10
return Petabyte
case "EBy": // base 10
return Exabyte
case "ZBy": // base 10
return Zettabyte
case "YBy": // base 10
return Yottabyte
case "Kibit": // base 2
return Kibibit
case "Mibit": // base 2
return Mebibit
case "Gibit": // base 2
return Gibibit
case "Tibit": // base 2
return Tebibit
case "Pibit": // base 2
return Pebibit
case "EiBy": // base 2
return Exbibyte
case "ZiBy": // base 2
return Zebibyte
case "YiBy": // base 2
return Yobibyte
case "kbit": // base 10
return Kilobit
case "Mbit": // base 10
return Megabit
case "Gbit": // base 10
return Gigabit
case "Tbit": // base 10
return Terabit
case "Pbit": // base 10
return Petabit
case "Ebit": // base 10
return Exabit
case "Zbit": // base 10
return Zettabit
case "Ybit": // base 10
return Yottabit
default:
return 1
}

View File

@@ -54,6 +54,12 @@ func (*dataRateConverter) Name() string {
return "data_rate"
}
// Notation followed by UCUM:
// https://ucum.org/ucum
// kibi = Ki, mebi = Mi, gibi = Gi, tebi = Ti, pibi = Pi
// kilo = k, mega = M, giga = G, tera = T, peta = P
// exa = E, zetta = Z, yotta = Y
// byte = By, bit = bit
func FromDataRateUnit(u Unit) float64 {
// See https://github.com/SigNoz/signoz/blob/5a81f5f90b34845f5b4b3bdd46acf29d04bf3987/frontend/src/container/NewWidget/RightContainer/dataFormatCategories.ts#L62-L85
switch u {
@@ -65,46 +71,70 @@ func FromDataRateUnit(u Unit) float64 {
return BitPerSecond
case "bps", "bit/s": // bits/sec(SI)
return BitPerSecond
case "KiBs": // kibibytes/sec
case "KiBs", "KiBy/s": // kibibytes/sec
return KibibytePerSecond
case "Kibits": // kibibits/sec
case "Kibits", "Kibit/s": // kibibits/sec
return KibibitPerSecond
case "KBs", "kBy/s": // kilobytes/sec
return KilobytePerSecond
case "Kbits", "kbit/s": // kilobits/sec
return KilobitPerSecond
case "MiBs": // mebibytes/sec
case "MiBs", "MiBy/s": // mebibytes/sec
return MebibytePerSecond
case "Mibits": // mebibits/sec
case "Mibits", "Mibit/s": // mebibits/sec
return MebibitPerSecond
case "MBs", "MBy/s": // megabytes/sec
return MegabytePerSecond
case "Mbits", "Mbit/s": // megabits/sec
return MegabitPerSecond
case "GiBs": // gibibytes/sec
case "GiBs", "GiBy/s": // gibibytes/sec
return GibibytePerSecond
case "Gibits": // gibibits/sec
case "Gibits", "Gibit/s": // gibibits/sec
return GibibitPerSecond
case "GBs", "GBy/s": // gigabytes/sec
return GigabytePerSecond
case "Gbits", "Gbit/s": // gigabits/sec
return GigabitPerSecond
case "TiBs": // tebibytes/sec
case "TiBs", "TiBy/s": // tebibytes/sec
return TebibytePerSecond
case "Tibits": // tebibits/sec
case "Tibits", "Tibit/s": // tebibits/sec
return TebibitPerSecond
case "TBs", "TBy/s": // terabytes/sec
return TerabytePerSecond
case "Tbits", "Tbit/s": // terabits/sec
return TerabitPerSecond
case "PiBs": // pebibytes/sec
case "PiBs", "PiBy/s": // pebibytes/sec
return PebibytePerSecond
case "Pibits": // pebibits/sec
case "Pibits", "Pibit/s": // pebibits/sec
return PebibitPerSecond
case "PBs", "PBy/s": // petabytes/sec
return PetabytePerSecond
case "Pbits", "Pbit/s": // petabits/sec
return PetabitPerSecond
case "EBy/s": // exabytes/sec
return ExabytePerSecond
case "Ebit/s": // exabits/sec
return ExabitPerSecond
case "EiBy/s": // exbibytes/sec
return ExbibytePerSecond
case "Eibit/s": // exbibits/sec
return ExbibitPerSecond
case "ZBy/s": // zettabytes/sec
return ZettabytePerSecond
case "Zbit/s": // zettabits/sec
return ZettabitPerSecond
case "ZiBy/s": // zebibytes/sec
return ZebibytePerSecond
case "Zibit/s": // zebibits/sec
return ZebibitPerSecond
case "YBy/s": // yottabytes/sec
return YottabytePerSecond
case "Ybit/s": // yottabits/sec
return YottabitPerSecond
case "YiBy/s": // yobibytes/sec
return YobibytePerSecond
case "Yibit/s": // yobibits/sec
return YobibitPerSecond
default:
return 1
}

View File

@@ -75,3 +75,83 @@ func TestDataRate(t *testing.T) {
// 1024 * 1024 * 1024 bytes = 1 gbytes
assert.Equal(t, Value{F: 1, U: "GiBs"}, dataRateConverter.Convert(Value{F: 1024 * 1024 * 1024, U: "binBps"}, "GiBs"))
}
func TestDataRateConversionUCUMUnit(t *testing.T) {
dataRateConverter := NewDataRateConverter()
tests := []struct {
name string
input Value
toUnit Unit
expected Value
}{
// Binary byte scaling
{name: "Binary byte scaling: 1024 By/s = 1 KiBy/s", input: Value{F: 1024, U: "By/s"}, toUnit: "KiBy/s", expected: Value{F: 1, U: "KiBy/s"}},
{name: "Kibibyte to bytes: 1 KiBy/s = 1024 By/s", input: Value{F: 1, U: "KiBy/s"}, toUnit: "By/s", expected: Value{F: 1024, U: "By/s"}},
{name: "Binary byte scaling: 1024 KiBy/s = 1 MiBy/s", input: Value{F: 1024, U: "KiBy/s"}, toUnit: "MiBy/s", expected: Value{F: 1, U: "MiBy/s"}},
{name: "Gibibyte to bytes: 1 GiBy/s = 1073741824 By/s", input: Value{F: 1, U: "GiBy/s"}, toUnit: "By/s", expected: Value{F: 1024 * 1024 * 1024, U: "By/s"}},
{name: "Binary byte scaling: 1024 MiBy/s = 1 GiBy/s", input: Value{F: 1024, U: "MiBy/s"}, toUnit: "GiBy/s", expected: Value{F: 1, U: "GiBy/s"}},
{name: "Gibibyte to mebibyte: 1 GiBy/s = 1024 MiBy/s", input: Value{F: 1, U: "GiBy/s"}, toUnit: "MiBy/s", expected: Value{F: 1024, U: "MiBy/s"}},
{name: "Binary byte scaling: 1024 GiBy/s = 1 TiBy/s", input: Value{F: 1024, U: "GiBy/s"}, toUnit: "TiBy/s", expected: Value{F: 1, U: "TiBy/s"}},
{name: "Tebibyte to bytes: 1 TiBy/s = 1099511627776 By/s", input: Value{F: 1, U: "TiBy/s"}, toUnit: "By/s", expected: Value{F: 1024 * 1024 * 1024 * 1024, U: "By/s"}},
{name: "Binary byte scaling: 1024 TiBy/s = 1 PiBy/s", input: Value{F: 1024, U: "TiBy/s"}, toUnit: "PiBy/s", expected: Value{F: 1, U: "PiBy/s"}},
{name: "Pebibyte to tebibyte: 1 PiBy/s = 1024 TiBy/s", input: Value{F: 1, U: "PiBy/s"}, toUnit: "TiBy/s", expected: Value{F: 1024, U: "TiBy/s"}},
// Binary bit scaling
{name: "Binary bit scaling: 1024 bit/s = 1 Kibit/s", input: Value{F: 1024, U: "bit/s"}, toUnit: "Kibit/s", expected: Value{F: 1, U: "Kibit/s"}},
{name: "Kibibit to bits: 1 Kibit/s = 1024 bit/s", input: Value{F: 1, U: "Kibit/s"}, toUnit: "bit/s", expected: Value{F: 1024, U: "bit/s"}},
{name: "Binary bit scaling: 1024 Kibit/s = 1 Mibit/s", input: Value{F: 1024, U: "Kibit/s"}, toUnit: "Mibit/s", expected: Value{F: 1, U: "Mibit/s"}},
{name: "Gibibit to bits: 1 Gibit/s = 1073741824 bit/s", input: Value{F: 1, U: "Gibit/s"}, toUnit: "bit/s", expected: Value{F: 1024 * 1024 * 1024, U: "bit/s"}},
{name: "Binary bit scaling: 1024 Mibit/s = 1 Gibit/s", input: Value{F: 1024, U: "Mibit/s"}, toUnit: "Gibit/s", expected: Value{F: 1, U: "Gibit/s"}},
{name: "Gibibit to mebibit: 1 Gibit/s = 1024 Mibit/s", input: Value{F: 1, U: "Gibit/s"}, toUnit: "Mibit/s", expected: Value{F: 1024, U: "Mibit/s"}},
{name: "Binary bit scaling: 1024 Gibit/s = 1 Tibit/s", input: Value{F: 1024, U: "Gibit/s"}, toUnit: "Tibit/s", expected: Value{F: 1, U: "Tibit/s"}},
{name: "Tebibit to gibibit: 1 Tibit/s = 1024 Gibit/s", input: Value{F: 1, U: "Tibit/s"}, toUnit: "Gibit/s", expected: Value{F: 1024, U: "Gibit/s"}},
{name: "Binary bit scaling: 1024 Tibit/s = 1 Pibit/s", input: Value{F: 1024, U: "Tibit/s"}, toUnit: "Pibit/s", expected: Value{F: 1, U: "Pibit/s"}},
{name: "Pebibit to tebibit: 1 Pibit/s = 1024 Tibit/s", input: Value{F: 1, U: "Pibit/s"}, toUnit: "Tibit/s", expected: Value{F: 1024, U: "Tibit/s"}},
// Bytes to bits
{name: "Bytes to bits: 1 KiBy/s = 8 Kibit/s", input: Value{F: 1, U: "KiBy/s"}, toUnit: "Kibit/s", expected: Value{F: 8, U: "Kibit/s"}},
{name: "Bytes to bits: 1 MiBy/s = 8 Mibit/s", input: Value{F: 1, U: "MiBy/s"}, toUnit: "Mibit/s", expected: Value{F: 8, U: "Mibit/s"}},
{name: "Bytes to bits: 1 GiBy/s = 8 Gibit/s", input: Value{F: 1, U: "GiBy/s"}, toUnit: "Gibit/s", expected: Value{F: 8, U: "Gibit/s"}},
// Unit alias
{name: "Unit alias: 1 KiBs = 1 KiBy/s", input: Value{F: 1, U: "KiBs"}, toUnit: "KiBy/s", expected: Value{F: 1, U: "KiBy/s"}},
{name: "Unit alias: 1 Kibits = 1 Kibit/s", input: Value{F: 1, U: "Kibits"}, toUnit: "Kibit/s", expected: Value{F: 1, U: "Kibit/s"}},
// SI byte scaling (Exa, Zetta, Yotta)
{name: "SI byte scaling: 1000 PBy/s = 1 EBy/s", input: Value{F: 1000, U: "PBy/s"}, toUnit: "EBy/s", expected: Value{F: 1, U: "EBy/s"}},
{name: "Exabyte to bytes: 1 EBy/s = 1e18 By/s", input: Value{F: 1, U: "EBy/s"}, toUnit: "By/s", expected: Value{F: 1e18, U: "By/s"}},
{name: "SI byte scaling: 1000 EBy/s = 1 ZBy/s", input: Value{F: 1000, U: "EBy/s"}, toUnit: "ZBy/s", expected: Value{F: 1, U: "ZBy/s"}},
{name: "Zettabyte to petabytes: 1 ZBy/s = 1000000 PBy/s", input: Value{F: 1, U: "ZBy/s"}, toUnit: "PBy/s", expected: Value{F: 1e6, U: "PBy/s"}},
{name: "SI byte scaling: 1000 ZBy/s = 1 YBy/s", input: Value{F: 1000, U: "ZBy/s"}, toUnit: "YBy/s", expected: Value{F: 1, U: "YBy/s"}},
{name: "Yottabyte to zettabyte: 1 YBy/s = 1000 ZBy/s", input: Value{F: 1, U: "YBy/s"}, toUnit: "ZBy/s", expected: Value{F: 1000, U: "ZBy/s"}},
// Binary byte scaling (Exbi, Zebi, Yobi)
{name: "Binary byte scaling: 1024 PiBy/s = 1 EiBy/s", input: Value{F: 1024, U: "PiBy/s"}, toUnit: "EiBy/s", expected: Value{F: 1, U: "EiBy/s"}},
{name: "Exbibyte to tebibytes: 1 EiBy/s = 1048576 TiBy/s", input: Value{F: 1, U: "EiBy/s"}, toUnit: "TiBy/s", expected: Value{F: 1024 * 1024, U: "TiBy/s"}},
{name: "Binary byte scaling: 1024 EiBy/s = 1 ZiBy/s", input: Value{F: 1024, U: "EiBy/s"}, toUnit: "ZiBy/s", expected: Value{F: 1, U: "ZiBy/s"}},
{name: "Zebibyte to exbibyte: 1 ZiBy/s = 1024 EiBy/s", input: Value{F: 1, U: "ZiBy/s"}, toUnit: "EiBy/s", expected: Value{F: 1024, U: "EiBy/s"}},
{name: "Binary byte scaling: 1024 ZiBy/s = 1 YiBy/s", input: Value{F: 1024, U: "ZiBy/s"}, toUnit: "YiBy/s", expected: Value{F: 1, U: "YiBy/s"}},
{name: "Yobibyte to zebibyte: 1 YiBy/s = 1024 ZiBy/s", input: Value{F: 1, U: "YiBy/s"}, toUnit: "ZiBy/s", expected: Value{F: 1024, U: "ZiBy/s"}},
// SI bit scaling (Exa, Zetta, Yotta)
{name: "SI bit scaling: 1000 Pbit/s = 1 Ebit/s", input: Value{F: 1000, U: "Pbit/s"}, toUnit: "Ebit/s", expected: Value{F: 1, U: "Ebit/s"}},
{name: "Exabit to gigabits: 1 Ebit/s = 1e9 Gbit/s", input: Value{F: 1, U: "Ebit/s"}, toUnit: "Gbit/s", expected: Value{F: 1e9, U: "Gbit/s"}},
{name: "SI bit scaling: 1000 Ebit/s = 1 Zbit/s", input: Value{F: 1000, U: "Ebit/s"}, toUnit: "Zbit/s", expected: Value{F: 1, U: "Zbit/s"}},
{name: "Zettabit to exabit: 1 Zbit/s = 1000 Ebit/s", input: Value{F: 1, U: "Zbit/s"}, toUnit: "Ebit/s", expected: Value{F: 1000, U: "Ebit/s"}},
{name: "SI bit scaling: 1000 Zbit/s = 1 Ybit/s", input: Value{F: 1000, U: "Zbit/s"}, toUnit: "Ybit/s", expected: Value{F: 1, U: "Ybit/s"}},
{name: "Yottabit to zettabit: 1 Ybit/s = 1000 Zbit/s", input: Value{F: 1, U: "Ybit/s"}, toUnit: "Zbit/s", expected: Value{F: 1000, U: "Zbit/s"}},
// Binary bit scaling (Exbi, Zebi, Yobi)
{name: "Binary bit scaling: 1024 Pibit/s = 1 Eibit/s", input: Value{F: 1024, U: "Pibit/s"}, toUnit: "Eibit/s", expected: Value{F: 1, U: "Eibit/s"}},
{name: "Exbibit to pebibit: 1 Eibit/s = 1024 Pibit/s", input: Value{F: 1, U: "Eibit/s"}, toUnit: "Pibit/s", expected: Value{F: 1024, U: "Pibit/s"}},
{name: "Binary bit scaling: 1024 Eibit/s = 1 Zibit/s", input: Value{F: 1024, U: "Eibit/s"}, toUnit: "Zibit/s", expected: Value{F: 1, U: "Zibit/s"}},
{name: "Zebibit to exbibit: 1 Zibit/s = 1024 Eibit/s", input: Value{F: 1, U: "Zibit/s"}, toUnit: "Eibit/s", expected: Value{F: 1024, U: "Eibit/s"}},
{name: "Binary bit scaling: 1024 Zibit/s = 1 Yibit/s", input: Value{F: 1024, U: "Zibit/s"}, toUnit: "Yibit/s", expected: Value{F: 1, U: "Yibit/s"}},
{name: "Yobibit to zebibit: 1 Yibit/s = 1024 Zibit/s", input: Value{F: 1, U: "Yibit/s"}, toUnit: "Zibit/s", expected: Value{F: 1024, U: "Zibit/s"}},
// Bytes to bits (Exbi, Zebi, Yobi)
{name: "Bytes to bits: 1 EiBy/s = 8 Eibit/s", input: Value{F: 1, U: "EiBy/s"}, toUnit: "Eibit/s", expected: Value{F: 8, U: "Eibit/s"}},
{name: "Bytes to bits: 1 ZiBy/s = 8 Zibit/s", input: Value{F: 1, U: "ZiBy/s"}, toUnit: "Zibit/s", expected: Value{F: 8, U: "Zibit/s"}},
{name: "Bytes to bits: 1 YiBy/s = 8 Yibit/s", input: Value{F: 1, U: "YiBy/s"}, toUnit: "Yibit/s", expected: Value{F: 8, U: "Yibit/s"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := dataRateConverter.Convert(tt.input, tt.toUnit)
assert.Equal(t, tt.expected, got)
})
}
}

View File

@@ -13,7 +13,7 @@ func TestData(t *testing.T) {
assert.Equal(t, Value{F: 1, U: "By"}, dataConverter.Convert(Value{F: 8, U: "bits"}, "By"))
// 1024 bytes = 1 kbytes
assert.Equal(t, Value{F: 1, U: "kbytes"}, dataConverter.Convert(Value{F: 1024, U: "bytes"}, "kbytes"))
assert.Equal(t, Value{F: 1, U: "kBy"}, dataConverter.Convert(Value{F: 1024, U: "bytes"}, "kBy"))
assert.Equal(t, Value{F: 1, U: "kBy"}, dataConverter.Convert(Value{F: 1000, U: "bytes"}, "kBy"))
// 1 byte = 8 bits
assert.Equal(t, Value{F: 8, U: "bits"}, dataConverter.Convert(Value{F: 1, U: "bytes"}, "bits"))
// 1 mbytes = 1024 kbytes
@@ -22,7 +22,7 @@ func TestData(t *testing.T) {
assert.Equal(t, Value{F: 1024, U: "bytes"}, dataConverter.Convert(Value{F: 1, U: "kbytes"}, "bytes"))
// 1024 kbytes = 1 mbytes
assert.Equal(t, Value{F: 1, U: "mbytes"}, dataConverter.Convert(Value{F: 1024, U: "kbytes"}, "mbytes"))
assert.Equal(t, Value{F: 1, U: "MBy"}, dataConverter.Convert(Value{F: 1024, U: "kbytes"}, "MBy"))
assert.Equal(t, Value{F: 1, U: "MBy"}, dataConverter.Convert(Value{F: 1000, U: "kBy"}, "MBy"))
// 1 mbytes = 1024 * 1024 bytes
assert.Equal(t, Value{F: 1024 * 1024, U: "bytes"}, dataConverter.Convert(Value{F: 1, U: "mbytes"}, "bytes"))
// 1024 mbytes = 1 gbytes
@@ -45,10 +45,90 @@ func TestData(t *testing.T) {
assert.Equal(t, Value{F: 1024 * 1024 * 1024 * 1024, U: "bytes"}, dataConverter.Convert(Value{F: 1, U: "tbytes"}, "bytes"))
// 1024 tbytes = 1 pbytes
assert.Equal(t, Value{F: 1, U: "pbytes"}, dataConverter.Convert(Value{F: 1024, U: "tbytes"}, "pbytes"))
// 1024 tbytes = 1 pbytes
assert.Equal(t, Value{F: 1, U: "PBy"}, dataConverter.Convert(Value{F: 1024, U: "tbytes"}, "PBy"))
// 1024 tbytes = 1 PiBy
assert.Equal(t, Value{F: 1, U: "PiBy"}, dataConverter.Convert(Value{F: 1024, U: "tbytes"}, "PiBy"))
// 1 pbytes = 1024 tbytes
assert.Equal(t, Value{F: 1024, U: "tbytes"}, dataConverter.Convert(Value{F: 1, U: "pbytes"}, "tbytes"))
// 1024 pbytes = 1 tbytes
assert.Equal(t, Value{F: 1024, U: "TBy"}, dataConverter.Convert(Value{F: 1, U: "pbytes"}, "TBy"))
// 1024 TiBy = 1 pbytes
assert.Equal(t, Value{F: 1024, U: "TiBy"}, dataConverter.Convert(Value{F: 1, U: "pbytes"}, "TiBy"))
}
func TestDataConversionUCUMUnit(t *testing.T) {
dataConverter := NewDataConverter()
tests := []struct {
name string
input Value
toUnit Unit
expected Value
}{
// Bits to bytes
{name: "Bits to bytes: 8 bit = 1 By", input: Value{F: 8, U: "bit"}, toUnit: "By", expected: Value{F: 1, U: "By"}},
{name: "Byte to bits: 1 By = 8 bit", input: Value{F: 1, U: "By"}, toUnit: "bit", expected: Value{F: 8, U: "bit"}},
// Binary byte scaling
{name: "Binary byte scaling: 1024 By = 1 KiBy", input: Value{F: 1024, U: "By"}, toUnit: "KiBy", expected: Value{F: 1, U: "KiBy"}},
{name: "Kibibyte to bytes: 1 KiBy = 1024 By", input: Value{F: 1, U: "KiBy"}, toUnit: "By", expected: Value{F: 1024, U: "By"}},
{name: "Binary byte scaling: 1024 KiBy = 1 MiBy", input: Value{F: 1024, U: "KiBy"}, toUnit: "MiBy", expected: Value{F: 1, U: "MiBy"}},
{name: "Binary byte scaling: 1024 MiBy = 1 GiBy", input: Value{F: 1024, U: "MiBy"}, toUnit: "GiBy", expected: Value{F: 1, U: "GiBy"}},
{name: "Gibibyte to mebibyte: 1 GiBy = 1024 MiBy", input: Value{F: 1, U: "GiBy"}, toUnit: "MiBy", expected: Value{F: 1024, U: "MiBy"}},
{name: "Binary byte scaling: 1024 GiBy = 1 TiBy", input: Value{F: 1024, U: "GiBy"}, toUnit: "TiBy", expected: Value{F: 1, U: "TiBy"}},
{name: "Binary byte scaling: 1024 TiBy = 1 PiBy", input: Value{F: 1024, U: "TiBy"}, toUnit: "PiBy", expected: Value{F: 1, U: "PiBy"}},
{name: "Pebibyte to tebibyte: 1 PiBy = 1024 TiBy", input: Value{F: 1, U: "PiBy"}, toUnit: "TiBy", expected: Value{F: 1024, U: "TiBy"}},
{name: "Gibibyte to bytes: 1 GiBy = 1073741824 By", input: Value{F: 1, U: "GiBy"}, toUnit: "By", expected: Value{F: 1024 * 1024 * 1024, U: "By"}},
{name: "Tebibyte to bytes: 1 TiBy = 1099511627776 By", input: Value{F: 1, U: "TiBy"}, toUnit: "By", expected: Value{F: 1024 * 1024 * 1024 * 1024, U: "By"}},
// SI bit scaling
{name: "SI bit scaling: 1000 bit = 1 kbit", input: Value{F: 1000, U: "bit"}, toUnit: "kbit", expected: Value{F: 1, U: "kbit"}},
{name: "Kilobit to bits: 1 kbit = 1000 bit", input: Value{F: 1, U: "kbit"}, toUnit: "bit", expected: Value{F: 1000, U: "bit"}},
{name: "SI bit scaling: 1000 kbit = 1 Mbit", input: Value{F: 1000, U: "kbit"}, toUnit: "Mbit", expected: Value{F: 1, U: "Mbit"}},
{name: "Gigabit to bits: 1 Gbit = 1000000000 bit", input: Value{F: 1, U: "Gbit"}, toUnit: "bit", expected: Value{F: 1000 * 1000 * 1000, U: "bit"}},
{name: "SI bit scaling: 1000 Mbit = 1 Gbit", input: Value{F: 1000, U: "Mbit"}, toUnit: "Gbit", expected: Value{F: 1, U: "Gbit"}},
{name: "Gigabit to megabit: 1 Gbit = 1000 Mbit", input: Value{F: 1, U: "Gbit"}, toUnit: "Mbit", expected: Value{F: 1000, U: "Mbit"}},
{name: "SI bit scaling: 1000 Gbit = 1 Tbit", input: Value{F: 1000, U: "Gbit"}, toUnit: "Tbit", expected: Value{F: 1, U: "Tbit"}},
{name: "Terabit to gigabit: 1 Tbit = 1000 Gbit", input: Value{F: 1, U: "Tbit"}, toUnit: "Gbit", expected: Value{F: 1000, U: "Gbit"}},
{name: "SI bit scaling: 1000 Tbit = 1 Pbit", input: Value{F: 1000, U: "Tbit"}, toUnit: "Pbit", expected: Value{F: 1, U: "Pbit"}},
{name: "Petabit to terabit: 1 Pbit = 1000 Tbit", input: Value{F: 1, U: "Pbit"}, toUnit: "Tbit", expected: Value{F: 1000, U: "Tbit"}},
// Binary bit scaling
{name: "Binary bit scaling: 1024 bit = 1 Kibit", input: Value{F: 1024, U: "bit"}, toUnit: "Kibit", expected: Value{F: 1, U: "Kibit"}},
{name: "Kibibit to bits: 1 Kibit = 1024 bit", input: Value{F: 1, U: "Kibit"}, toUnit: "bit", expected: Value{F: 1024, U: "bit"}},
{name: "Binary bit scaling: 1024 Kibit = 1 Mibit", input: Value{F: 1024, U: "Kibit"}, toUnit: "Mibit", expected: Value{F: 1, U: "Mibit"}},
{name: "Mebibit to kibibit: 1 Mibit = 1024 Kibit", input: Value{F: 1, U: "Mibit"}, toUnit: "Kibit", expected: Value{F: 1024, U: "Kibit"}},
{name: "Binary bit scaling: 1024 Mibit = 1 Gibit", input: Value{F: 1024, U: "Mibit"}, toUnit: "Gibit", expected: Value{F: 1, U: "Gibit"}},
{name: "Gibibit to mebibit: 1 Gibit = 1024 Mibit", input: Value{F: 1, U: "Gibit"}, toUnit: "Mibit", expected: Value{F: 1024, U: "Mibit"}},
{name: "Binary bit scaling: 1024 Gibit = 1 Tibit", input: Value{F: 1024, U: "Gibit"}, toUnit: "Tibit", expected: Value{F: 1, U: "Tibit"}},
{name: "Tebibit to gibibit: 1 Tibit = 1024 Gibit", input: Value{F: 1, U: "Tibit"}, toUnit: "Gibit", expected: Value{F: 1024, U: "Gibit"}},
{name: "Binary bit scaling: 1024 Tibit = 1 Pibit", input: Value{F: 1024, U: "Tibit"}, toUnit: "Pibit", expected: Value{F: 1, U: "Pibit"}},
{name: "Pebibit to tebibit: 1 Pibit = 1024 Tibit", input: Value{F: 1, U: "Pibit"}, toUnit: "Tibit", expected: Value{F: 1024, U: "Tibit"}},
// Bytes to bits
{name: "Bytes to bits: 1 KiBy = 8 Kibit", input: Value{F: 1, U: "KiBy"}, toUnit: "Kibit", expected: Value{F: 8, U: "Kibit"}},
{name: "Bytes to bits: 1 MiBy = 8 Mibit", input: Value{F: 1, U: "MiBy"}, toUnit: "Mibit", expected: Value{F: 8, U: "Mibit"}},
{name: "Bytes to bits: 1 GiBy = 8 Gibit", input: Value{F: 1, U: "GiBy"}, toUnit: "Gibit", expected: Value{F: 8, U: "Gibit"}},
// SI byte scaling (Exa, Zetta, Yotta)
{name: "SI byte scaling: 1000 PBy = 1 EBy", input: Value{F: 1000, U: "PBy"}, toUnit: "EBy", expected: Value{F: 1, U: "EBy"}},
{name: "Exabyte to bytes: 1 EBy = 1e18 By", input: Value{F: 1, U: "EBy"}, toUnit: "By", expected: Value{F: 1e18, U: "By"}},
{name: "SI byte scaling: 1000 EBy = 1 ZBy", input: Value{F: 1000, U: "EBy"}, toUnit: "ZBy", expected: Value{F: 1, U: "ZBy"}},
{name: "Zettabyte to petabytes: 1 ZBy = 1000000 PBy", input: Value{F: 1, U: "ZBy"}, toUnit: "PBy", expected: Value{F: 1e6, U: "PBy"}},
{name: "SI byte scaling: 1000 ZBy = 1 YBy", input: Value{F: 1000, U: "ZBy"}, toUnit: "YBy", expected: Value{F: 1, U: "YBy"}},
{name: "Yottabyte to zettabyte: 1 YBy = 1000 ZBy", input: Value{F: 1, U: "YBy"}, toUnit: "ZBy", expected: Value{F: 1000, U: "ZBy"}},
// Binary byte scaling (Exbi, Zebi, Yobi)
{name: "Binary byte scaling: 1024 PiBy = 1 EiBy", input: Value{F: 1024, U: "PiBy"}, toUnit: "EiBy", expected: Value{F: 1, U: "EiBy"}},
{name: "Exbibyte to tebibytes: 1 EiBy = 1048576 TiBy", input: Value{F: 1, U: "EiBy"}, toUnit: "TiBy", expected: Value{F: 1024 * 1024, U: "TiBy"}},
{name: "Binary byte scaling: 1024 EiBy = 1 ZiBy", input: Value{F: 1024, U: "EiBy"}, toUnit: "ZiBy", expected: Value{F: 1, U: "ZiBy"}},
{name: "Zebibyte to exbibyte: 1 ZiBy = 1024 EiBy", input: Value{F: 1, U: "ZiBy"}, toUnit: "EiBy", expected: Value{F: 1024, U: "EiBy"}},
{name: "Binary byte scaling: 1024 ZiBy = 1 YiBy", input: Value{F: 1024, U: "ZiBy"}, toUnit: "YiBy", expected: Value{F: 1, U: "YiBy"}},
{name: "Yobibyte to zebibyte: 1 YiBy = 1024 ZiBy", input: Value{F: 1, U: "YiBy"}, toUnit: "ZiBy", expected: Value{F: 1024, U: "ZiBy"}},
// SI bit scaling (Exa, Zetta, Yotta)
{name: "SI bit scaling: 1000 Pbit = 1 Ebit", input: Value{F: 1000, U: "Pbit"}, toUnit: "Ebit", expected: Value{F: 1, U: "Ebit"}},
{name: "Exabit to gigabits: 1 Ebit = 1e9 Gbit", input: Value{F: 1, U: "Ebit"}, toUnit: "Gbit", expected: Value{F: 1e9, U: "Gbit"}},
{name: "SI bit scaling: 1000 Ebit = 1 Zbit", input: Value{F: 1000, U: "Ebit"}, toUnit: "Zbit", expected: Value{F: 1, U: "Zbit"}},
{name: "Zettabit to exabit: 1 Zbit = 1000 Ebit", input: Value{F: 1, U: "Zbit"}, toUnit: "Ebit", expected: Value{F: 1000, U: "Ebit"}},
{name: "SI bit scaling: 1000 Zbit = 1 Ybit", input: Value{F: 1000, U: "Zbit"}, toUnit: "Ybit", expected: Value{F: 1, U: "Ybit"}},
{name: "Yottabit to zettabit: 1 Ybit = 1000 Zbit", input: Value{F: 1, U: "Ybit"}, toUnit: "Zbit", expected: Value{F: 1000, U: "Zbit"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := dataConverter.Convert(tt.input, tt.toUnit)
assert.Equal(t, tt.expected, got)
})
}
}

View File

@@ -47,7 +47,7 @@ func FromTimeUnit(u Unit) Duration {
return Hour
case "d":
return Day
case "w":
case "w", "wk":
return Week
default:
return Second

View File

@@ -54,4 +54,13 @@ func TestDurationConvert(t *testing.T) {
assert.Equal(t, Value{F: 1, U: "ms"}, timeConverter.Convert(Value{F: 1000, U: "us"}, "ms"))
// 1000000000 ns = 1 s
assert.Equal(t, Value{F: 1, U: "s"}, timeConverter.Convert(Value{F: 1000000000, U: "ns"}, "s"))
// 7 d = 1 wk
assert.Equal(t, Value{F: 1, U: "wk"}, timeConverter.Convert(Value{F: 7, U: "d"}, "wk"))
// 1 wk = 7 d
assert.Equal(t, Value{F: 7, U: "d"}, timeConverter.Convert(Value{F: 1, U: "wk"}, "d"))
// 1 wk = 168 h
assert.Equal(t, Value{F: 168, U: "h"}, timeConverter.Convert(Value{F: 1, U: "wk"}, "h"))
// 604800 s = 1 wk
assert.Equal(t, Value{F: 1, U: "wk"}, timeConverter.Convert(Value{F: 604800, U: "s"}, "wk"))
}

View File

@@ -24,30 +24,62 @@ func (f *dataFormatter) Format(value float64, unit string) string {
return humanize.IBytes(uint64(value))
case "decbytes":
return humanize.Bytes(uint64(value))
case "bits":
return humanize.IBytes(uint64(value * converter.Bit))
case "bits", "bit":
// humanize.IBytes/Bytes doesn't support bits
// and returns 0 B for values less than a byte
if value < 8 {
return fmt.Sprintf("%v b", value)
}
return humanize.IBytes(uint64(value / 8))
case "decbits":
return humanize.Bytes(uint64(value * converter.Bit))
case "kbytes", "kBy":
if value < 8 {
return fmt.Sprintf("%v b", value)
}
return humanize.Bytes(uint64(value / 8))
case "kbytes", "KiBy":
return humanize.IBytes(uint64(value * converter.Kibibit))
case "decKbytes", "deckbytes":
return humanize.IBytes(uint64(value * converter.Kilobit))
case "mbytes", "MBy":
case "Kibit":
return humanize.IBytes(uint64(value * converter.Kibibit / 8))
case "decKbytes", "deckbytes", "kBy":
return humanize.Bytes(uint64(value * converter.Kilobit))
case "kbit":
return humanize.Bytes(uint64(value * converter.Kilobit / 8))
case "mbytes", "MiBy":
return humanize.IBytes(uint64(value * converter.Mebibit))
case "decMbytes", "decmbytes":
case "Mibit":
return humanize.IBytes(uint64(value * converter.Mebibit / 8))
case "decMbytes", "decmbytes", "MBy":
return humanize.Bytes(uint64(value * converter.Megabit))
case "gbytes", "GBy":
case "Mbit":
return humanize.Bytes(uint64(value * converter.Megabit / 8))
case "gbytes", "GiBy":
return humanize.IBytes(uint64(value * converter.Gibibit))
case "decGbytes", "decgbytes":
case "Gibit":
return humanize.IBytes(uint64(value * converter.Gibibit / 8))
case "decGbytes", "decgbytes", "GBy":
return humanize.Bytes(uint64(value * converter.Gigabit))
case "tbytes", "TBy":
case "Gbit":
return humanize.Bytes(uint64(value * converter.Gigabit / 8))
case "tbytes", "TiBy":
return humanize.IBytes(uint64(value * converter.Tebibit))
case "decTbytes", "dectbytes":
case "Tibit":
return humanize.IBytes(uint64(value * converter.Tebibit / 8))
case "decTbytes", "dectbytes", "TBy":
return humanize.Bytes(uint64(value * converter.Terabit))
case "pbytes", "PBy":
case "Tbit":
return humanize.Bytes(uint64(value * converter.Terabit / 8))
case "pbytes", "PiBy":
return humanize.IBytes(uint64(value * converter.Pebibit))
case "decPbytes", "decpbytes":
case "Pbit":
return humanize.Bytes(uint64(value * converter.Petabit / 8))
case "decPbytes", "decpbytes", "PBy":
return humanize.Bytes(uint64(value * converter.Petabit))
case "EiBy":
return humanize.IBytes(uint64(value * converter.Exbibit))
case "Ebit":
return humanize.Bytes(uint64(value * converter.Exabit / 8))
case "EBy":
return humanize.Bytes(uint64(value * converter.Exabit))
}
// When unit is not matched, return the value as it is.
return fmt.Sprintf("%v", value)

View File

@@ -25,49 +25,66 @@ func (f *dataRateFormatter) Format(value float64, unit string) string {
case "Bps", "By/s":
return humanize.Bytes(uint64(value)) + "/s"
case "binbps":
return humanize.IBytes(uint64(value*converter.BitPerSecond)) + "/s"
// humanize.IBytes/Bytes doesn't support bits
// and returns 0 B for values less than a byte
if value < 8 {
return fmt.Sprintf("%v b/s", value)
}
return humanize.IBytes(uint64(value/8)) + "/s"
case "bps", "bit/s":
return humanize.Bytes(uint64(value*converter.BitPerSecond)) + "/s"
case "KiBs":
if value < 8 {
return fmt.Sprintf("%v b/s", value)
}
return humanize.Bytes(uint64(value/8)) + "/s"
case "KiBs", "KiBy/s":
return humanize.IBytes(uint64(value*converter.KibibitPerSecond)) + "/s"
case "Kibits":
return humanize.IBytes(uint64(value*converter.KibibytePerSecond)) + "/s"
case "Kibits", "Kibit/s":
return humanize.IBytes(uint64(value*converter.KibibitPerSecond/8)) + "/s"
case "KBs", "kBy/s":
return humanize.IBytes(uint64(value*converter.KilobitPerSecond)) + "/s"
case "Kbits", "kbit/s":
return humanize.IBytes(uint64(value*converter.KilobytePerSecond)) + "/s"
case "MiBs":
return humanize.Bytes(uint64(value*converter.KilobitPerSecond/8)) + "/s"
case "MiBs", "MiBy/s":
return humanize.IBytes(uint64(value*converter.MebibitPerSecond)) + "/s"
case "Mibits":
return humanize.IBytes(uint64(value*converter.MebibytePerSecond)) + "/s"
case "Mibits", "Mibit/s":
return humanize.IBytes(uint64(value*converter.MebibitPerSecond/8)) + "/s"
case "MBs", "MBy/s":
return humanize.IBytes(uint64(value*converter.MegabitPerSecond)) + "/s"
case "Mbits", "Mbit/s":
return humanize.IBytes(uint64(value*converter.MegabytePerSecond)) + "/s"
case "GiBs":
return humanize.Bytes(uint64(value*converter.MegabitPerSecond/8)) + "/s"
case "GiBs", "GiBy/s":
return humanize.IBytes(uint64(value*converter.GibibitPerSecond)) + "/s"
case "Gibits":
return humanize.IBytes(uint64(value*converter.GibibytePerSecond)) + "/s"
case "Gibits", "Gibit/s":
return humanize.IBytes(uint64(value*converter.GibibitPerSecond/8)) + "/s"
case "GBs", "GBy/s":
return humanize.IBytes(uint64(value*converter.GigabitPerSecond)) + "/s"
case "Gbits", "Gbit/s":
return humanize.IBytes(uint64(value*converter.GigabytePerSecond)) + "/s"
case "TiBs":
return humanize.Bytes(uint64(value*converter.GigabitPerSecond/8)) + "/s"
case "TiBs", "TiBy/s":
return humanize.IBytes(uint64(value*converter.TebibitPerSecond)) + "/s"
case "Tibits":
return humanize.IBytes(uint64(value*converter.TebibytePerSecond)) + "/s"
case "Tibits", "Tibit/s":
return humanize.IBytes(uint64(value*converter.TebibitPerSecond/8)) + "/s"
case "TBs", "TBy/s":
return humanize.IBytes(uint64(value*converter.TerabitPerSecond)) + "/s"
case "Tbits", "Tbit/s":
return humanize.IBytes(uint64(value*converter.TerabytePerSecond)) + "/s"
case "PiBs":
return humanize.Bytes(uint64(value*converter.TerabitPerSecond/8)) + "/s"
case "PiBs", "PiBy/s":
return humanize.IBytes(uint64(value*converter.PebibitPerSecond)) + "/s"
case "Pibits":
return humanize.IBytes(uint64(value*converter.PebibytePerSecond)) + "/s"
case "Pibits", "Pibit/s":
return humanize.IBytes(uint64(value*converter.PebibitPerSecond/8)) + "/s"
case "PBs", "PBy/s":
return humanize.IBytes(uint64(value*converter.PetabitPerSecond)) + "/s"
case "Pbits", "Pbit/s":
return humanize.IBytes(uint64(value*converter.PetabytePerSecond)) + "/s"
return humanize.Bytes(uint64(value*converter.PetabitPerSecond/8)) + "/s"
// Exa units
case "EBy/s":
return humanize.Bytes(uint64(value*converter.ExabitPerSecond)) + "/s"
case "Ebit/s":
return humanize.Bytes(uint64(value*converter.ExabitPerSecond/8)) + "/s"
case "EiBy/s":
return humanize.IBytes(uint64(value*converter.ExbibitPerSecond)) + "/s"
case "Eibit/s":
return humanize.IBytes(uint64(value*converter.ExbibitPerSecond/8)) + "/s"
}
// When unit is not matched, return the value as it is.
return fmt.Sprintf("%v", value)

View File

@@ -0,0 +1,150 @@
package formatter
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDataRateFormatterComprehensive(t *testing.T) {
dataRateFormatter := NewDataRateFormatter()
tests := []struct {
name string
value float64
unit string
expected string
}{
// IEC Bits/sec - binbps, bps
{name: "binbps as Bps", value: 7, unit: "binbps", expected: "7 b/s"},
{name: "100 binbps as 12 Bps", value: 100, unit: "binbps", expected: "12 B/s"},
{name: "binbps as 23 GiBs", value: 8 * 1024 * 1024 * 1024 * 23, unit: "binbps", expected: "23 GiB/s"},
// SI Bits/sec - bps, bit/s
{name: "bps as Bps", value: 5, unit: "bps", expected: "5 b/s"},
{name: "200 bitps as 25 Bps", value: 200, unit: "bit/s", expected: "25 B/s"},
{name: "bitps as MBs", value: 8 * 1000 * 1000 * 7, unit: "bit/s", expected: "7.0 MB/s"},
// IEC Base bytes/sec - binBps
{name: "binBps as Bps", value: 0, unit: "binBps", expected: "0 B/s"},
{name: "1 binBps as 1 Bps", value: 1, unit: "binBps", expected: "1 B/s"},
{name: "binBps as Kibps", value: 1024, unit: "binBps", expected: "1.0 KiB/s"},
{name: "binBps as Mibps", value: 1024 * 1024, unit: "binBps", expected: "1.0 MiB/s"},
{name: "binBps as Gibps", value: 1024 * 1024 * 1024, unit: "binBps", expected: "1.0 GiB/s"},
// SI Base bytes/sec - Bps, By/s
{name: "Bps as Bps", value: 1, unit: "Bps", expected: "1 B/s"},
{name: "Bps as kbps", value: 1000, unit: "Bps", expected: "1.0 kB/s"},
{name: "Bps as Mbps", value: 1000 * 1000, unit: "Bps", expected: "1.0 MB/s"},
{name: "Byps as kbps", value: 1000, unit: "By/s", expected: "1.0 kB/s"},
// Kibibytes/sec - KiBs, KiBy/s
{name: "Kibs as Bps", value: 0, unit: "KiBs", expected: "0 B/s"},
{name: "Kibs as Kibps", value: 1, unit: "KiBs", expected: "1.0 KiB/s"},
{name: "Kibs as Mibps", value: 1024, unit: "KiBs", expected: "1.0 MiB/s"},
{name: "Kibs as Gibps", value: 3 * 1024 * 1024, unit: "KiBs", expected: "3.0 GiB/s"},
{name: "KiByps as Kibps", value: 1, unit: "KiBy/s", expected: "1.0 KiB/s"},
{name: "KiByps as Mibps", value: 1024, unit: "KiBy/s", expected: "1.0 MiB/s"},
// Kibibits/sec - Kibits, Kibit/s
{name: "Kibitps as Kibps", value: 1, unit: "Kibits", expected: "128 B/s"},
{name: "Kibitps as Mibps", value: 42 * 1024, unit: "Kibits", expected: "5.3 MiB/s"},
{name: "Kibitps as Kibps 10", value: 10, unit: "Kibit/s", expected: "1.3 KiB/s"},
// Kilobytes/sec (SI) - KBs, kBy/s
{name: "Kbs as Bps", value: 0.5, unit: "KBs", expected: "500 B/s"},
{name: "Kbs as Mibps", value: 1048.6, unit: "KBs", expected: "1.0 MiB/s"},
{name: "kByps as Bps", value: 1, unit: "kBy/s", expected: "1000 B/s"},
// Kilobits/sec (SI) - Kbits, kbit/s
{name: "Kbitps as Bps", value: 1, unit: "Kbits", expected: "125 B/s"},
{name: "kbitps as Bps", value: 1, unit: "kbit/s", expected: "125 B/s"},
// Mebibytes/sec - MiBs, MiBy/s
{name: "Mibs as Mibps", value: 1, unit: "MiBs", expected: "1.0 MiB/s"},
{name: "Mibs as Gibps", value: 1024, unit: "MiBs", expected: "1.0 GiB/s"},
{name: "Mibs as Tibps", value: 1024 * 1024, unit: "MiBs", expected: "1.0 TiB/s"},
{name: "MiByps as Mibps", value: 1, unit: "MiBy/s", expected: "1.0 MiB/s"},
// Mebibits/sec - Mibits, Mibit/s
{name: "Mibitps as Mibps", value: 40, unit: "Mibits", expected: "5.0 MiB/s"},
{name: "Mibitps as Mibps per second variant", value: 10, unit: "Mibit/s", expected: "1.3 MiB/s"},
// Megabytes/sec (SI) - MBs, MBy/s
{name: "Mbs as Kibps", value: 1, unit: "MBs", expected: "977 KiB/s"},
{name: "MByps as Kibps", value: 1, unit: "MBy/s", expected: "977 KiB/s"},
// Megabits/sec (SI) - Mbits, Mbit/s
{name: "Mbitps as Kibps", value: 1, unit: "Mbits", expected: "125 kB/s"},
{name: "Mbitps as Kibps per second variant", value: 1, unit: "Mbit/s", expected: "125 kB/s"},
// Gibibytes/sec - GiBs, GiBy/s
{name: "Gibs as Gibps", value: 1, unit: "GiBs", expected: "1.0 GiB/s"},
{name: "Gibs as Tibps", value: 1024, unit: "GiBs", expected: "1.0 TiB/s"},
{name: "GiByps as Tibps", value: 42 * 1024, unit: "GiBy/s", expected: "42 TiB/s"},
// Gibibits/sec - Gibits, Gibit/s
{name: "Gibitps as Tibps", value: 42 * 1024, unit: "Gibits", expected: "5.3 TiB/s"},
{name: "Gibitps as Tibps per second variant", value: 42 * 1024, unit: "Gibit/s", expected: "5.3 TiB/s"},
// Gigabytes/sec (SI) - GBs, GBy/s
{name: "Gbs as Tibps", value: 42 * 1000, unit: "GBs", expected: "38 TiB/s"},
{name: "GByps as Tibps", value: 42 * 1000, unit: "GBy/s", expected: "38 TiB/s"},
// Gigabits/sec (SI) - Gbits, Gbit/s
{name: "Gbitps as Tibps", value: 42 * 1000, unit: "Gbits", expected: "5.3 TB/s"},
{name: "Gbitps as Tibps per second variant", value: 42 * 1000, unit: "Gbit/s", expected: "5.3 TB/s"},
// Tebibytes/sec - TiBs, TiBy/s
{name: "Tibs as Tibps", value: 1, unit: "TiBs", expected: "1.0 TiB/s"},
{name: "Tibs as Pibps", value: 1024, unit: "TiBs", expected: "1.0 PiB/s"},
{name: "TiByps as Pibps", value: 42 * 1024, unit: "TiBy/s", expected: "42 PiB/s"},
// Tebibits/sec - Tibits, Tibit/s
{name: "Tibitps as Pibps", value: 42 * 1024, unit: "Tibits", expected: "5.3 PiB/s"},
{name: "Tibitps as Pibps per second variant", value: 42 * 1024, unit: "Tibit/s", expected: "5.3 PiB/s"},
// Terabytes/sec (SI) - TBs, TBy/s
{name: "Tbs as Pibps", value: 42 * 1000, unit: "TBs", expected: "37 PiB/s"},
{name: "TByps as Pibps", value: 42 * 1000, unit: "TBy/s", expected: "37 PiB/s"},
// Terabits/sec (SI) - Tbits, Tbit/s
{name: "Tbitps as Pibps", value: 42 * 1000, unit: "Tbits", expected: "5.3 PB/s"},
{name: "Tbitps as Pibps per second variant", value: 42 * 1000, unit: "Tbit/s", expected: "5.3 PB/s"},
// Pebibytes/sec - PiBs, PiBy/s
{name: "Pibs as Eibps", value: 10 * 1024, unit: "PiBs", expected: "10 EiB/s"},
{name: "PiByps as Eibps", value: 10 * 1024, unit: "PiBy/s", expected: "10 EiB/s"},
// Pebibits/sec - Pibits, Pibit/s
{name: "Pibitps as Eibps", value: 10 * 1024, unit: "Pibits", expected: "1.3 EiB/s"},
{name: "Pibitps as Eibps per second variant", value: 10 * 1024, unit: "Pibit/s", expected: "1.3 EiB/s"},
// Petabytes/sec (SI) - PBs, PBy/s
{name: "Pbs as Pibps", value: 42, unit: "PBs", expected: "37 PiB/s"},
{name: "PByps as Pibps", value: 42, unit: "PBy/s", expected: "37 PiB/s"},
// Petabits/sec (SI) - Pbits, Pbit/s
{name: "Pbitps as Pibps", value: 42, unit: "Pbits", expected: "5.3 PB/s"},
{name: "Pbitps as Pibps per second variant", value: 42, unit: "Pbit/s", expected: "5.3 PB/s"},
// Exabytes/sec (SI) - EBy/s
{name: "EByps as Ebps", value: 10, unit: "EBy/s", expected: "10 EB/s"},
// Exabits/sec (SI) - Ebit/s
{name: "Ebitps as Ebps", value: 10, unit: "Ebit/s", expected: "1.3 EB/s"},
// Exbibytes/sec (IEC) - EiBy/s
{name: "EiByps as Eibps", value: 10, unit: "EiBy/s", expected: "10 EiB/s"},
// Exbibits/sec (IEC) - Eibit/s
{name: "Eibitps as Eibps", value: 10, unit: "Eibit/s", expected: "1.3 EiB/s"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := dataRateFormatter.Format(tt.value, tt.unit)
assert.Equal(t, tt.expected, got)
})
}
}

View File

@@ -14,21 +14,174 @@ func TestData(t *testing.T) {
assert.Equal(t, "1.0 KiB", dataFormatter.Format(1024, "bytes"))
assert.Equal(t, "1.0 KiB", dataFormatter.Format(1024, "By"))
assert.Equal(t, "2.3 GiB", dataFormatter.Format(2.3*1024, "mbytes"))
assert.Equal(t, "2.3 GiB", dataFormatter.Format(2.3*1024, "MBy"))
assert.Equal(t, "2.3 GiB", dataFormatter.Format(2.3*1024, "MiBy"))
assert.Equal(t, "1.0 MiB", dataFormatter.Format(1024*1024, "bytes"))
assert.Equal(t, "1.0 MiB", dataFormatter.Format(1024*1024, "By"))
assert.Equal(t, "69 TiB", dataFormatter.Format(69*1024*1024, "mbytes"))
assert.Equal(t, "69 TiB", dataFormatter.Format(69*1024*1024, "MBy"))
assert.Equal(t, "69 TiB", dataFormatter.Format(69*1024*1024, "MiBy"))
assert.Equal(t, "102 KiB", dataFormatter.Format(102*1024, "bytes"))
assert.Equal(t, "102 KiB", dataFormatter.Format(102*1024, "By"))
assert.Equal(t, "240 MiB", dataFormatter.Format(240*1024, "kbytes"))
assert.Equal(t, "240 MiB", dataFormatter.Format(240*1024, "kBy"))
assert.Equal(t, "240 MiB", dataFormatter.Format(240*1024, "KiBy"))
assert.Equal(t, "1.0 GiB", dataFormatter.Format(1024*1024, "kbytes"))
assert.Equal(t, "1.0 GiB", dataFormatter.Format(1024*1024, "kBy"))
assert.Equal(t, "1.0 GiB", dataFormatter.Format(1024*1024, "KiBy"))
assert.Equal(t, "23 GiB", dataFormatter.Format(23*1024*1024, "kbytes"))
assert.Equal(t, "23 GiB", dataFormatter.Format(23*1024*1024, "kBy"))
assert.Equal(t, "23 GiB", dataFormatter.Format(23*1024*1024, "KiBy"))
assert.Equal(t, "32 TiB", dataFormatter.Format(32*1024*1024*1024, "kbytes"))
assert.Equal(t, "32 TiB", dataFormatter.Format(32*1024*1024*1024, "kBy"))
assert.Equal(t, "32 TiB", dataFormatter.Format(32*1024*1024*1024, "KiBy"))
assert.Equal(t, "24 MiB", dataFormatter.Format(24, "mbytes"))
assert.Equal(t, "24 MiB", dataFormatter.Format(24, "MBy"))
assert.Equal(t, "24 MiB", dataFormatter.Format(24, "MiBy"))
}
func TestDataFormatterComprehensive(t *testing.T) {
dataFormatter := NewDataFormatter()
tests := []struct {
name string
value float64
unit string
expected string
}{
// IEC Bits - bits, bit
{name: "bits: 1", value: 1, unit: "bits", expected: "1 b"},
{name: "bits: 7", value: 7, unit: "bit", expected: "7 b"},
{name: "bits: to MiB = 8 * 1024 * 1024 * 9", value: 8 * 1024 * 1024 * 9, unit: "bits", expected: "9.0 MiB"},
// SI Bits - decbits, bit
{name: "decbits: 1", value: 1, unit: "decbits", expected: "1 b"},
{name: "decbits: 7", value: 7, unit: "decbits", expected: "7 b"},
{name: "decbits: to MB = 8 * 1000 * 1000 * 4", value: 8 * 1000 * 1000 * 4, unit: "decbits", expected: "4.0 MB"},
// IEC Base bytes - bytes, By
{name: "bytes: 0", value: 0, unit: "bytes", expected: "0 B"},
{name: "bytes: 1", value: 1, unit: "bytes", expected: "1 B"},
{name: "bytes: 512", value: 512, unit: "bytes", expected: "512 B"},
{name: "bytes: 1023", value: 1023, unit: "bytes", expected: "1023 B"},
{name: "bytes: 1024 = 1 KiB", value: 1024, unit: "bytes", expected: "1.0 KiB"},
{name: "bytes: 1536", value: 1536, unit: "bytes", expected: "1.5 KiB"},
{name: "bytes: 1024*1024 = 1 MiB", value: 1024 * 1024, unit: "bytes", expected: "1.0 MiB"},
{name: "bytes: 1024*1024*1024 = 1 GiB", value: 1024 * 1024 * 1024, unit: "bytes", expected: "1.0 GiB"},
{name: "By: same as bytes", value: 1024, unit: "By", expected: "1.0 KiB"},
// SI Base bytes - decbytes
{name: "decbytes: 1", value: 1, unit: "decbytes", expected: "1 B"},
{name: "decbytes: 1000 = 1 kB", value: 1000, unit: "decbytes", expected: "1.0 kB"},
{name: "decbytes: 1000*1000 = 1 MB", value: 1000 * 1000, unit: "decbytes", expected: "1.0 MB"},
{name: "decbytes: 1000*1000*1000 = 1 GB", value: 1000 * 1000 * 1000, unit: "decbytes", expected: "1.0 GB"},
// Kibibytes - kbytes, KiBy (IEC)
{name: "kbytes: 0", value: 0, unit: "kbytes", expected: "0 B"},
{name: "kbytes: 1 = 1 KiB", value: 1, unit: "kbytes", expected: "1.0 KiB"},
{name: "kbytes: 512", value: 512, unit: "kbytes", expected: "512 KiB"},
{name: "kbytes: 1024 = 1 MiB", value: 1024, unit: "kbytes", expected: "1.0 MiB"},
{name: "kbytes: 1024*1024 = 1 GiB", value: 1024 * 1024, unit: "kbytes", expected: "1.0 GiB"},
{name: "kbytes: 2.3*1024 = 2.3 MiB", value: 2.3 * 1024, unit: "kbytes", expected: "2.3 MiB"},
{name: "KiBy: 1 = 1 KiB", value: 1, unit: "KiBy", expected: "1.0 KiB"},
{name: "KiBy: 1024 = 1 MiB", value: 1024, unit: "KiBy", expected: "1.0 MiB"},
{name: "kbytes and KiBy alias", value: 240 * 1024, unit: "KiBy", expected: "240 MiB"},
// SI Kilobytes - decKbytes, deckbytes, kBy
{name: "decKbytes: 1 = 1 kB", value: 1, unit: "decKbytes", expected: "1.0 kB"},
{name: "decKbytes: 1000 = 1 MB", value: 1000, unit: "decKbytes", expected: "1.0 MB"},
{name: "deckbytes: 1 = 1 kB", value: 1, unit: "deckbytes", expected: "1.0 kB"},
{name: "kBy: 1 = 1 kB", value: 1, unit: "kBy", expected: "1.0 kB"},
{name: "kBy: 1000 = 1 MB", value: 1000, unit: "kBy", expected: "1.0 MB"},
// Mebibytes - mbytes, MiBy (IEC)
{name: "mbytes: 1 = 1 MiB", value: 1, unit: "mbytes", expected: "1.0 MiB"},
{name: "mbytes: 24", value: 24, unit: "mbytes", expected: "24 MiB"},
{name: "mbytes: 1024 = 1 GiB", value: 1024, unit: "mbytes", expected: "1.0 GiB"},
{name: "mbytes: 1024*1024 = 1 TiB", value: 1024 * 1024, unit: "mbytes", expected: "1.0 TiB"},
{name: "mbytes: 69*1024 = 69 GiB", value: 69 * 1024, unit: "mbytes", expected: "69 GiB"},
{name: "mbytes: 69*1024*1024 = 69 TiB", value: 69 * 1024 * 1024, unit: "mbytes", expected: "69 TiB"},
{name: "MiBy: 1 = 1 MiB", value: 1, unit: "MiBy", expected: "1.0 MiB"},
{name: "MiBy: 1024 = 1 GiB", value: 1024, unit: "MiBy", expected: "1.0 GiB"},
// SI Megabytes - decMbytes, decmbytes, MBy
{name: "decMbytes: 1 = 1 MB", value: 1, unit: "decMbytes", expected: "1.0 MB"},
{name: "decMbytes: 1000 = 1 GB", value: 1000, unit: "decMbytes", expected: "1.0 GB"},
{name: "decmbytes: 1 = 1 MB", value: 1, unit: "decmbytes", expected: "1.0 MB"},
{name: "MBy: 1 = 1 MB", value: 1, unit: "MBy", expected: "1.0 MB"},
// Gibibytes - gbytes, GiBy (IEC)
{name: "gbytes: 1 = 1 GiB", value: 1, unit: "gbytes", expected: "1.0 GiB"},
{name: "gbytes: 1024 = 1 TiB", value: 1024, unit: "gbytes", expected: "1.0 TiB"},
{name: "GiBy: 42*1024 = 42 TiB", value: 42 * 1024, unit: "GiBy", expected: "42 TiB"},
// SI Gigabytes - decGbytes, decgbytes, GBy
{name: "decGbytes: 42*1000 = 42 TB", value: 42 * 1000, unit: "decGbytes", expected: "42 TB"},
{name: "GBy: 42*1000 = 42 TB", value: 42 * 1000, unit: "GBy", expected: "42 TB"},
// Tebibytes - tbytes, TiBy (IEC)
{name: "tbytes: 1 = 1 TiB", value: 1, unit: "tbytes", expected: "1.0 TiB"},
{name: "tbytes: 1024 = 1 PiB", value: 1024, unit: "tbytes", expected: "1.0 PiB"},
{name: "TiBy: 42*1024 = 42 PiB", value: 42 * 1024, unit: "TiBy", expected: "42 PiB"},
// SI Terabytes - decTbytes, dectbytes, TBy
{name: "decTbytes: 42*1000 = 42 PB", value: 42 * 1000, unit: "decTbytes", expected: "42 PB"},
{name: "dectbytes: 42*1000 = 42 PB", value: 42 * 1000, unit: "dectbytes", expected: "42 PB"},
{name: "TBy: 42*1000 = 42 PB", value: 42 * 1000, unit: "TBy", expected: "42 PB"},
// Pebibytes - pbytes, PiBy (IEC)
{name: "pbytes: 10*1024 = 10 EiB", value: 10 * 1024, unit: "pbytes", expected: "10 EiB"},
{name: "PiBy: 10*1024 = 10 EiB", value: 10 * 1024, unit: "PiBy", expected: "10 EiB"},
// SI Petabytes - decPbytes, decpbytes, PBy
{name: "decPbytes: 42 = 42 PB", value: 42, unit: "decPbytes", expected: "42 PB"},
{name: "decpbytes: 42 = 42 PB", value: 42, unit: "decpbytes", expected: "42 PB"},
{name: "PBy: 42 = 42 PB", value: 42, unit: "PBy", expected: "42 PB"},
// Exbibytes - EiBy (IEC)
{name: "EiBy: 10 = 10 EiB", value: 10, unit: "EiBy", expected: "10 EiB"},
// Exabytes - EBy (SI)
{name: "EBy: 10 = 10 EB", value: 10, unit: "EBy", expected: "10 EB"},
// Kibibits - Kibit (IEC): 1 Kibit = 1024 bits = 128 bytes
{name: "Kibit: 1 = 128 B", value: 1, unit: "Kibit", expected: "128 B"},
{name: "Kibit: 1024 = 128 KiB", value: 1024, unit: "Kibit", expected: "128 KiB"},
// Mebibits - Mibit (IEC): 1 Mibit = 1024 Kibit = 128 KiB
{name: "Mibit: 1 = 128 KiB", value: 1, unit: "Mibit", expected: "128 KiB"},
{name: "Mibit: 1024 = 128 MiB", value: 1024, unit: "Mibit", expected: "128 MiB"},
// Gibibits - Gibit (IEC): 1 Gibit = 1024 Mibit = 128 MiB
{name: "Gibit: 1 = 128 MiB", value: 1, unit: "Gibit", expected: "128 MiB"},
{name: "Gibit: 42*1024 = 5.3 TiB", value: 42 * 1024, unit: "Gibit", expected: "5.3 TiB"},
// Tebibits - Tibit (IEC): 1 Tibit = 1024 Gibit = 128 GiB
{name: "Tibit: 1 = 128 GiB", value: 1, unit: "Tibit", expected: "128 GiB"},
{name: "Tibit: 42*1024 = 5.3 PiB", value: 42 * 1024, unit: "Tibit", expected: "5.3 PiB"},
// Kilobits - kbit (SI): 1 kbit = 1000 bits = 125 bytes
{name: "kbit: 1 = 125 B", value: 1, unit: "kbit", expected: "125 B"},
{name: "kbit: 1000 = 125 kB", value: 1000, unit: "kbit", expected: "125 kB"},
// Megabits - Mbit (SI): 1 Mbit = 1000 kbit = 125 kB
{name: "Mbit: 1 = 125 kB", value: 1, unit: "Mbit", expected: "125 kB"},
{name: "Mbit: 1000 = 125 MB", value: 1000, unit: "Mbit", expected: "125 MB"},
// Gigabits - Gbit (SI): 1 Gbit = 1000 Mbit = 125 MB
{name: "Gbit: 1 = 125 MB", value: 1, unit: "Gbit", expected: "125 MB"},
{name: "Gbit: 42*1000 = 5.3 TB", value: 42 * 1000, unit: "Gbit", expected: "5.3 TB"},
// Terabits - Tbit (SI): 1 Tbit = 1000 Gbit = 125 GB
{name: "Tbit: 1 = 125 GB", value: 1, unit: "Tbit", expected: "125 GB"},
{name: "Tbit: 42*1000 = 5.3 PB", value: 42 * 1000, unit: "Tbit", expected: "5.3 PB"},
// Petabits - Pbit (SI): 1 Pbit = 1000 Tbit = 125 TB
{name: "Pbit: 1 = 125 TB", value: 1, unit: "Pbit", expected: "125 TB"},
{name: "Pbit: 42 = 5.3 PB", value: 42, unit: "Pbit", expected: "5.3 PB"},
// Exabits - Ebit (SI): 1 Ebit = 1000 Pbit = 125 PB
{name: "Ebit: 1 = 125 PB", value: 1, unit: "Ebit", expected: "125 PB"},
{name: "Ebit: 10 = 1.3 EB", value: 10, unit: "Ebit", expected: "1.3 EB"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := dataFormatter.Format(tt.value, tt.unit)
assert.Equal(t, tt.expected, got)
})
}
}

View File

@@ -18,11 +18,11 @@ var (
func FromUnit(u string) Formatter {
switch u {
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min":
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min", "w", "wk":
return DurationFormatter
case "bytes", "decbytes", "bits", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy":
case "bytes", "decbytes", "bits", "bit", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy", "EBy", "KiBy", "MiBy", "GiBy", "TiBy", "PiBy", "EiBy", "kbit", "Mbit", "Gbit", "Tbit", "Pbit", "Ebit", "Kibit", "Mibit", "Gibit", "Tibit", "Pibit":
return DataFormatter
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s":
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "EBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s", "Ebit/s", "KiBy/s", "MiBy/s", "GiBy/s", "TiBy/s", "PiBy/s", "EiBy/s", "Kibit/s", "Mibit/s", "Gibit/s", "Tibit/s", "Pibit/s", "Eibit/s":
return DataRateFormatter
case "percent", "percentunit", "%":
return PercentFormatter

View File

@@ -32,7 +32,7 @@ func (f *durationFormatter) Format(value float64, unit string) string {
return toHours(value)
case "d":
return toDays(value)
case "w":
case "w", "wk":
return toWeeks(value)
}
// When unit is not matched, return the value as it is.

View File

@@ -19,6 +19,7 @@ import (
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
)
@@ -461,12 +462,12 @@ func toCommonSeries(series promql.Series) v3.Series {
Points: make([]v3.Point, 0),
}
for _, lbl := range series.Metric {
commonSeries.Labels[lbl.Name] = lbl.Value
series.Metric.Range(func(l labels.Label) {
commonSeries.Labels[l.Name] = l.Value
commonSeries.LabelsArray = append(commonSeries.LabelsArray, map[string]string{
lbl.Name: lbl.Value,
l.Name: l.Value,
})
}
})
for _, f := range series.Floats {
commonSeries.Points = append(commonSeries.Points, v3.Point{

View File

@@ -205,7 +205,7 @@ func AdjustKey(key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemet
key.Indexes = matchingKey.Indexes
key.Materialized = matchingKey.Materialized
key.JSONPlan = matchingKey.JSONPlan
return actions
} else {
// multiple matching keys, set materialized only if all the keys are materialized

View File

@@ -204,7 +204,10 @@ func DataTypeCollisionHandledFieldName(key *telemetrytypes.TelemetryFieldKey, va
// While we expect user not to send the mixed data types, it inevitably happens
// So we handle the data type collisions here
switch key.FieldDataType {
case telemetrytypes.FieldDataTypeString, telemetrytypes.FieldDataTypeArrayString:
case telemetrytypes.FieldDataTypeString, telemetrytypes.FieldDataTypeArrayString, telemetrytypes.FieldDataTypeJSON:
if key.FieldDataType == telemetrytypes.FieldDataTypeJSON {
tblFieldName = fmt.Sprintf("toString(%s)", tblFieldName)
}
switch v := value.(type) {
case float64:
// try to convert the string value to to number
@@ -219,7 +222,6 @@ func DataTypeCollisionHandledFieldName(key *telemetrytypes.TelemetryFieldKey, va
// we don't have a toBoolOrNull in ClickHouse, so we need to convert the bool to a string
value = fmt.Sprintf("%t", v)
}
case telemetrytypes.FieldDataTypeInt64,
telemetrytypes.FieldDataTypeArrayInt64,
telemetrytypes.FieldDataTypeNumber,

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"log/slog"
"slices"
"strconv"
"strings"
@@ -383,6 +384,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, nil, v.builder, v.startNs, v.endNs)
if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build condition: %s", err.Error()))
return ""
}
conds = append(conds, condition)
@@ -483,6 +485,22 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
value1 := v.Visit(values[0])
value2 := v.Visit(values[1])
switch value1.(type) {
case float64:
if _, ok := value2.(float64); !ok {
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected number for both operands", keys[0].Name))
return ""
}
case string:
if _, ok := value2.(string); !ok {
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected string for both operands", keys[0].Name))
return ""
}
default:
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: operands must be number or string", keys[0].Name))
return ""
}
var conds []string
for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, []any{value1, value2}, v.builder, v.startNs, v.endNs)
@@ -855,7 +873,7 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
// 1. either user meant key ( this is already handled above in fieldKeysForName )
// 2. or user meant `attribute.key` we look up in the map for all possible field keys with name 'attribute.key'
// Note:
// Note:
// If user only wants to search `attribute.key`, then they have to use `attribute.attribute.key`
// If user only wants to search `key`, then they have to use `key`
// If user wants to search both, they can use `attribute.key` and we will resolve the ambiguity
@@ -891,6 +909,13 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
}
}
// TODO(Piyush): Handle this better; Use constants for the strings later
if BodyJSONQueryEnabled && fieldKey.Name == "body" && slices.ContainsFunc(fieldKeysForName, func(key *telemetrytypes.TelemetryFieldKey) bool {
return key.Name == "message" && key.FieldDataType == telemetrytypes.FieldDataTypeString && key.FieldContext == telemetrytypes.FieldContextBody
}) {
v.warnings = append(v.warnings, "You searched for body. Query Builder now defaults this to body.message:string. Check that this matches what you want to search.")
}
if len(fieldKeysForName) > 1 {
warnMsg := fmt.Sprintf(
"Key `%s` is ambiguous, found %d different combinations of field context / data type: %v.",

View File

@@ -375,13 +375,6 @@ func mergeAndEnsureBackwardCompatibility(ctx context.Context, logger *slog.Logge
config.Flagger.Config.Boolean[flagger.FeatureKafkaSpanEval.String()] = os.Getenv("KAFKA_SPAN_EVAL") == "true"
}
if os.Getenv("INTERPOLATION_ENABLED") != "" {
logger.WarnContext(ctx, "[Deprecated] env INTERPOLATION_ENABLED is deprecated and scheduled for removal. Please use SIGNOZ_FLAGGER_CONFIG_BOOLEAN_INTERPOLATION__ENABLED instead.")
if config.Flagger.Config.Boolean == nil {
config.Flagger.Config.Boolean = make(map[string]bool)
}
config.Flagger.Config.Boolean[flagger.FeatureInterpolationEnabled.String()] = os.Getenv("INTERPOLATION_ENABLED") == "true"
}
}
func (config Config) Collect(_ context.Context, _ valuer.UUID) (map[string]any, error) {

View File

@@ -167,6 +167,8 @@ func NewSQLMigrationProviderFactories(
sqlmigration.NewMigrateRbacToAuthzFactory(sqlstore),
sqlmigration.NewMigratePublicDashboardsFactory(sqlstore),
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
)
}

View File

@@ -389,6 +389,8 @@ func New(
// Initialize all modules
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard)
userService := impluser.NewService(providerSettings, impluser.NewStore(sqlstore, providerSettings), modules.User, orgGetter, authz, config.User.Root)
// Initialize all handlers for the modules
handlers := NewHandlers(modules, providerSettings, querier, licensing, global, flagger, gateway, telemetryMetadataStore, authz)
@@ -438,6 +440,7 @@ func New(
factory.NewNamedService(factory.MustNewName("statsreporter"), statsReporter),
factory.NewNamedService(factory.MustNewName("tokenizer"), tokenizer),
factory.NewNamedService(factory.MustNewName("authz"), authz),
factory.NewNamedService(factory.MustNewName("user"), userService),
)
if err != nil {
return nil, err

View File

@@ -0,0 +1,80 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addRootUser struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewAddRootUserFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_root_user"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return &addRootUser{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
})
}
func (migration *addRootUser) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addRootUser) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("users"))
if err != nil {
return err
}
column := &sqlschema.Column{
Name: sqlschema.ColumnName("is_root"),
DataType: sqlschema.DataTypeBoolean,
Nullable: false,
}
sqls := migration.sqlschema.Operator().AddColumn(table, uniqueConstraints, column, false)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *addRootUser) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -0,0 +1,61 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addUserEmailOrgIDIndex struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewAddUserEmailOrgIDIndexFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_user_email_org_id_index"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return &addUserEmailOrgIDIndex{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
})
}
func (migration *addUserEmailOrgIDIndex) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addUserEmailOrgIDIndex) Up(ctx context.Context, db *bun.DB) error {
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
sqls := migration.sqlschema.Operator().CreateIndex(&sqlschema.UniqueIndex{TableName: "users", ColumnNames: []sqlschema.ColumnName{"email", "org_id"}})
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (migration *addUserEmailOrgIDIndex) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -35,13 +35,19 @@ func (c *conditionBuilder) conditionFor(
return "", err
}
if column.IsJSONColumn() && querybuilder.BodyJSONQueryEnabled {
valueType, value := InferDataType(value, operator, key)
cond, err := NewJSONConditionBuilder(key, valueType).buildJSONCondition(operator, value, sb)
if err != nil {
return "", err
if column.Type.GetType() == schema.ColumnTypeEnumJSON {
// If field data is Not JSON Column Itself, then we need to build a JSON condition
if querybuilder.BodyJSONQueryEnabled && key.MustBuildJSONCondition() {
valueType, value := InferDataType(value, operator, key)
cond, err := NewJSONConditionBuilder(key, valueType).buildJSONCondition(operator, value, sb)
if err != nil {
return "", err
}
return cond, nil
} else if key.FieldDataType == telemetrytypes.FieldDataTypeJSON && !operator.IsOpValidForJSON() {
// Skip building condition for invalid operators on JSON columns
return "", nil
}
return cond, nil
}
if operator.IsStringSearchOperator() {
@@ -108,7 +114,6 @@ func (c *conditionBuilder) conditionFor(
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
case qbtypes.FilterOperatorNotContains:
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
case qbtypes.FilterOperatorRegexp:
// Note: Escape $$ to $$$$ to avoid sqlbuilder interpreting materialized $ signs
// Only needed because we are using sprintf instead of sb.Match (not implemented in sqlbuilder)
@@ -176,9 +181,16 @@ func (c *conditionBuilder) conditionFor(
var value any
switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON:
if operator == qbtypes.FilterOperatorExists {
return sb.IsNotNull(tblFieldName), nil
} else {
switch key.FieldDataType {
case telemetrytypes.FieldDataTypeJSON:
if operator == qbtypes.FilterOperatorExists {
return sb.EQ(fmt.Sprintf("empty(%s)", tblFieldName), false), nil
}
return sb.EQ(fmt.Sprintf("empty(%s)", tblFieldName), true), nil
default:
if operator == qbtypes.FilterOperatorExists {
return sb.IsNotNull(tblFieldName), nil
}
return sb.IsNull(tblFieldName), nil
}
case schema.ColumnTypeEnumLowCardinality:

View File

@@ -1,7 +1,10 @@
package telemetrylogs
import (
"fmt"
"github.com/SigNoz/signoz-otel-collector/constants"
"github.com/SigNoz/signoz/pkg/querybuilder"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -17,7 +20,7 @@ const (
LogsV2TimestampColumn = "timestamp"
LogsV2ObservedTimestampColumn = "observed_timestamp"
LogsV2BodyColumn = "body"
LogsV2BodyJSONColumn = constants.BodyJSONColumn
LogsV2BodyV2Column = constants.BodyV2Column
LogsV2BodyPromotedColumn = constants.BodyPromotedColumn
LogsV2TraceIDColumn = "trace_id"
LogsV2SpanIDColumn = "span_id"
@@ -34,11 +37,20 @@ const (
LogsV2ResourcesStringColumn = "resources_string"
LogsV2ScopeStringColumn = "scope_string"
BodyJSONColumnPrefix = constants.BodyJSONColumnPrefix
BodyV2ColumnPrefix = constants.BodyV2ColumnPrefix
BodyPromotedColumnPrefix = constants.BodyPromotedColumnPrefix
MessageSubColumn = "message"
)
var (
// mapping for body logical field to message sub column
BodyLogicalFieldJSONMapping = &telemetrytypes.TelemetryFieldKey{
Name: MessageSubColumn,
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextBody,
FieldDataType: telemetrytypes.FieldDataTypeString,
JSONDataType: &telemetrytypes.String,
}
DefaultFullTextColumn = &telemetrytypes.TelemetryFieldKey{
Name: "body",
Signal: telemetrytypes.SignalLogs,
@@ -118,3 +130,29 @@ var (
},
}
)
func bodyAliasExpression() string {
if !querybuilder.BodyJSONQueryEnabled {
return LogsV2BodyColumn
}
return fmt.Sprintf("%s as body", LogsV2BodyV2Column)
}
func init() {
// body logical field is mapped to message field in the body context that too only with String data type
err := BodyLogicalFieldJSONMapping.SetJSONAccessPlan(telemetrytypes.JSONColumnMetadata{
BaseColumn: LogsV2BodyV2Column,
PromotedColumn: LogsV2BodyPromotedColumn,
}, map[string][]telemetrytypes.JSONDataType{
MessageSubColumn: {telemetrytypes.String},
})
if err != nil {
panic(err)
}
if querybuilder.BodyJSONQueryEnabled {
DefaultFullTextColumn = BodyLogicalFieldJSONMapping
IntrinsicFields["body"] = *BodyLogicalFieldJSONMapping
}
}

View File

@@ -30,7 +30,7 @@ var (
"severity_text": {Name: "severity_text", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
"severity_number": {Name: "severity_number", Type: schema.ColumnTypeUInt8},
"body": {Name: "body", Type: schema.ColumnTypeString},
LogsV2BodyJSONColumn: {Name: LogsV2BodyJSONColumn, Type: schema.JSONColumnType{
LogsV2BodyV2Column: {Name: LogsV2BodyV2Column, Type: schema.JSONColumnType{
MaxDynamicTypes: utils.ToPointer(uint(32)),
MaxDynamicPaths: utils.ToPointer(uint(0)),
}},
@@ -91,9 +91,9 @@ func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.Telemetry
}
case telemetrytypes.FieldContextBody:
// Body context is for JSON body fields
// Use body_json if feature flag is enabled
// Use body_v2 if feature flag is enabled
if querybuilder.BodyJSONQueryEnabled {
return logsV2Columns[LogsV2BodyJSONColumn], nil
return logsV2Columns[LogsV2BodyV2Column], nil
}
// Fall back to legacy body column
return logsV2Columns["body"], nil
@@ -102,9 +102,9 @@ func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.Telemetry
if !ok {
// check if the key has body JSON search
if strings.HasPrefix(key.Name, telemetrytypes.BodyJSONStringSearchPrefix) {
// Use body_json if feature flag is enabled and we have a body condition builder
// Use body_v2 if feature flag is enabled and we have a body condition builder
if querybuilder.BodyJSONQueryEnabled {
return logsV2Columns[LogsV2BodyJSONColumn], nil
return logsV2Columns[LogsV2BodyV2Column], nil
}
// Fall back to legacy body column
return logsV2Columns["body"], nil
@@ -149,6 +149,9 @@ func (m *fieldMapper) FieldFor(ctx context.Context, key *telemetrytypes.Telemetr
}
return m.buildFieldForJSON(key)
case telemetrytypes.FieldContextLog:
// return the column name as is for log context fields
return column.Name, nil
default:
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource/body context fields are supported for json columns, got %s", key.FieldContext.String)
}
@@ -254,7 +257,7 @@ func (m *fieldMapper) buildFieldForJSON(key *telemetrytypes.TelemetryFieldKey) (
"plan length is less than 2 for promoted path: %s", key.Name)
}
// promoted column first then body_json column
// promoted column first then body_v2 column
// TODO(Piyush): Change this in future for better performance
expr = fmt.Sprintf("coalesce(%s, %s)",
fmt.Sprintf("dynamicElement(%s, '%s')", plan[1].FieldPath(), plan[1].TerminalConfig.ElemType.StringValue()),

View File

@@ -30,9 +30,8 @@ func NewJSONConditionBuilder(key *telemetrytypes.TelemetryFieldKey, valueType te
return &jsonConditionBuilder{key: key, valueType: telemetrytypes.MappingFieldDataTypeToJSONDataType[valueType]}
}
// BuildCondition builds the full WHERE condition for body_json JSON paths
// BuildCondition builds the full WHERE condition for body_v2 JSON paths
func (c *jsonConditionBuilder) buildJSONCondition(operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
conditions := []string{}
for _, node := range c.key.JSONPlan {
condition, err := c.emitPlannedCondition(node, operator, value, sb)
@@ -41,6 +40,7 @@ func (c *jsonConditionBuilder) buildJSONCondition(operator qbtypes.FilterOperato
}
conditions = append(conditions, condition)
}
return sb.Or(conditions...), nil
}
@@ -297,9 +297,9 @@ func (c *jsonConditionBuilder) applyOperator(sb *sqlbuilder.SelectBuilder, field
}
return sb.And(conds...), nil
case qbtypes.FilterOperatorExists:
return fmt.Sprintf("%s IS NOT NULL", fieldExpr), nil
return sb.IsNotNull(fieldExpr), nil
case qbtypes.FilterOperatorNotExists:
return fmt.Sprintf("%s IS NULL", fieldExpr), nil
return sb.IsNull(fieldExpr), nil
default:
return "", qbtypes.ErrUnsupportedOperator
}

File diff suppressed because one or more lines are too long

View File

@@ -203,7 +203,6 @@ func (b *logQueryStatementBuilder) adjustKeys(ctx context.Context, keys map[stri
}
func (b *logQueryStatementBuilder) adjustKey(key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey) []string {
// First check if it matches with any intrinsic fields
var intrinsicOrCalculatedField telemetrytypes.TelemetryFieldKey
if _, ok := IntrinsicFields[key.Name]; ok {
@@ -212,7 +211,6 @@ func (b *logQueryStatementBuilder) adjustKey(key *telemetrytypes.TelemetryFieldK
}
return querybuilder.AdjustKey(key, keys, nil)
}
// buildListQuery builds a query for list panel type
@@ -249,11 +247,7 @@ func (b *logQueryStatementBuilder) buildListQuery(
sb.SelectMore(LogsV2SeverityNumberColumn)
sb.SelectMore(LogsV2ScopeNameColumn)
sb.SelectMore(LogsV2ScopeVersionColumn)
sb.SelectMore(LogsV2BodyColumn)
if querybuilder.BodyJSONQueryEnabled {
sb.SelectMore(LogsV2BodyJSONColumn)
sb.SelectMore(LogsV2BodyPromotedColumn)
}
sb.SelectMore(bodyAliasExpression())
sb.SelectMore(LogsV2AttributesStringColumn)
sb.SelectMore(LogsV2AttributesNumberColumn)
sb.SelectMore(LogsV2AttributesBoolColumn)

View File

@@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/querybuilder/resourcefilter"
@@ -854,3 +855,154 @@ func TestAdjustKey(t *testing.T) {
})
}
}
func TestStmtBuilderBodyField(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
enableBodyJSONQuery bool
expected qbtypes.Statement
expectedErr error
}{
{
name: "body_exists",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "body Exists"},
Limit: 10,
},
enableBodyJSONQuery: true,
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (dynamicElement(body_v2.`message`, 'String') IS NOT NULL) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{"You searched for body. Query Builder now defaults this to body.message:string. Check that this matches what you want to search."},
},
expectedErr: nil,
},
{
name: "body_exists_disabled",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "body Exists"},
Limit: 10,
},
enableBodyJSONQuery: false,
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND body <> ? AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
{
name: "body_empty",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "body == ''"},
Limit: 10,
},
enableBodyJSONQuery: true,
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (dynamicElement(body_v2.`message`, 'String') = ?) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{"You searched for body. Query Builder now defaults this to body.message:string. Check that this matches what you want to search."},
},
expectedErr: nil,
},
{
name: "body_empty_disabled",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "body == ''"},
Limit: 10,
},
enableBodyJSONQuery: false,
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND body = ? AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
{
name: "body_contains",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "body CONTAINS 'error'"},
Limit: 10,
},
enableBodyJSONQuery: true,
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (LOWER(dynamicElement(body_v2.`message`, 'String')) LIKE LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "%error%", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{"You searched for body. Query Builder now defaults this to body.message:string. Check that this matches what you want to search."},
},
expectedErr: nil,
},
{
name: "body_contains_disabled",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "body CONTAINS 'error'"},
Limit: 10,
},
enableBodyJSONQuery: false,
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND LOWER(body) LIKE LOWER(?) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "%error%", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
}
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
enable, disable := jsonQueryTestUtil(t)
defer disable()
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
if c.enableBodyJSONQuery {
enable()
} else {
disable()
}
// build the key map after enabling/disabling body JSON query
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil)
resourceFilterStmtBuilder := resourceFilterStmtBuilder()
statementBuilder := NewLogQueryStatementBuilder(
instrumentationtest.New().ToProviderSettings(),
mockMetadataStore,
fm,
cb,
resourceFilterStmtBuilder,
aggExprRewriter,
DefaultFullTextColumn,
GetBodyJSONKey,
)
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error())
} else {
if err != nil {
_, _, _, _, _, add := errors.Unwrapb(err)
t.Logf("error additionals: %v", add)
}
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
}
}

View File

@@ -27,13 +27,6 @@ func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey {
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
"body": {
{
Name: "body",
FieldContext: telemetrytypes.FieldContextLog,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
"http.status_code": {
{
Name: "http.status_code",
@@ -945,6 +938,11 @@ func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey {
key.Signal = telemetrytypes.SignalLogs
}
}
// add intrinsic fields to the map
for fieldName, key := range IntrinsicFields {
keysMap[fieldName] = append(keysMap[fieldName], &key)
}
return keysMap
}

View File

@@ -253,7 +253,7 @@ func buildListLogsJSONIndexesQuery(cluster string, filters ...string) (string, [
sb.Where(sb.Equal("database", telemetrylogs.DBName))
sb.Where(sb.Equal("table", telemetrylogs.LogsV2LocalTableName))
sb.Where(sb.Or(
sb.ILike("expr", fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyJSONColumnPrefix))),
sb.ILike("expr", fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyV2ColumnPrefix))),
sb.ILike("expr", fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyPromotedColumnPrefix))),
))
@@ -337,7 +337,7 @@ func (t *telemetryMetaStore) ListJSONValues(ctx context.Context, path string, li
if promoted {
path = telemetrylogs.BodyPromotedColumnPrefix + path
} else {
path = telemetrylogs.BodyJSONColumnPrefix + path
path = telemetrylogs.BodyV2ColumnPrefix + path
}
from := fmt.Sprintf("%s.%s", telemetrylogs.DBName, telemetrylogs.LogsV2TableName)
@@ -527,7 +527,7 @@ func (t *telemetryMetaStore) GetPromotedPaths(ctx context.Context, paths ...stri
// TODO(Piyush): Remove this function
func CleanPathPrefixes(path string) string {
path = strings.TrimPrefix(path, telemetrytypes.BodyJSONStringSearchPrefix)
path = strings.TrimPrefix(path, telemetrylogs.BodyJSONColumnPrefix)
path = strings.TrimPrefix(path, telemetrylogs.BodyV2ColumnPrefix)
path = strings.TrimPrefix(path, telemetrylogs.BodyPromotedColumnPrefix)
return path
}

View File

@@ -117,7 +117,7 @@ func TestBuildListLogsJSONIndexesQuery(t *testing.T) {
expectedArgs: []any{
telemetrylogs.DBName,
telemetrylogs.LogsV2LocalTableName,
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyJSONColumnPrefix)),
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyV2ColumnPrefix)),
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyPromotedColumnPrefix)),
},
},
@@ -130,7 +130,7 @@ func TestBuildListLogsJSONIndexesQuery(t *testing.T) {
expectedArgs: []any{
telemetrylogs.DBName,
telemetrylogs.LogsV2LocalTableName,
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyJSONColumnPrefix)),
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyV2ColumnPrefix)),
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyPromotedColumnPrefix)),
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains("foo")),
fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains("bar")),

View File

@@ -100,7 +100,7 @@ func NewTelemetryMetaStore(
jsonColumnMetadata: map[telemetrytypes.Signal]map[telemetrytypes.FieldContext]telemetrytypes.JSONColumnMetadata{
telemetrytypes.SignalLogs: {
telemetrytypes.FieldContextBody: telemetrytypes.JSONColumnMetadata{
BaseColumn: telemetrylogs.LogsV2BodyJSONColumn,
BaseColumn: telemetrylogs.LogsV2BodyV2Column,
PromotedColumn: telemetrylogs.LogsV2BodyPromotedColumn,
},
},

View File

@@ -7,6 +7,6 @@ const (
AttributesMetadataTableName = "distributed_attributes_metadata"
AttributesMetadataLocalTableName = "attributes_metadata"
PathTypesTableName = otelcollectorconst.DistributedPathTypesTable
PromotedPathsTableName = otelcollectorconst.DistributedPromotedPathsTable
PromotedPathsTableName = "distributed_json_promoted_paths"
SkipIndexTableName = "system.data_skipping_indices"
)

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
@@ -73,7 +74,7 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
cteArgs [][]any
)
if b.metricsStatementBuilder.CanShortCircuitDelta(query) {
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
// spatial_aggregation_cte directly for certain delta queries
if frag, args, err := b.buildTemporalAggDeltaFastPath(ctx, start, end, query, keys, variables); err != nil {
return nil, err
@@ -91,8 +92,9 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
}
// spatial_aggregation_cte
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
if frag != "" {
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
return nil, err
} else if frag != "" {
cteFragments = append(cteFragments, frag)
cteArgs = append(cteArgs, args)
}
@@ -122,13 +124,16 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
if err != nil {
return "", []any{}, err
return "", nil, err
}
sb.SelectMore(col)
}
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -150,7 +155,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
Variables: variables,
}, start, end)
if err != nil {
return "", []any{}, err
return "", nil, err
}
}
if filterWhere != nil {
@@ -208,8 +213,11 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
}
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -278,7 +286,10 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
}
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
baseSb.From(fmt.Sprintf("%s.%s AS points", DBName, tbl))
@@ -315,25 +326,23 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
switch query.Aggregations[0].TimeAggregation {
case metrictypes.TimeAggregationRate:
rateExpr := fmt.Sprintf(telemetrymetrics.RateWithoutNegative, start, start)
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.RateTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
case metrictypes.TimeAggregationIncrease:
incExpr := fmt.Sprintf(telemetrymetrics.IncreaseWithoutNegative, start, start)
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.IncreaseTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
@@ -348,7 +357,15 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
_ uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
_ map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, []any) {
) (string, []any, error) {
if query.Aggregations[0].SpaceAggregation.IsZero() {
return "", nil, errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`]",
)
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("ts")
@@ -365,5 +382,5 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
}

View File

@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747785600000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747785600000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
},
expectedErr: nil,
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
},
expectedErr: nil,
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
},
expectedErr: nil,
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
},
expectedErr: nil,

View File

@@ -3,6 +3,7 @@ package telemetrymeter
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
)
@@ -63,7 +64,7 @@ func AggregationColumnForSamplesTable(
temporality metrictypes.Temporality,
timeAggregation metrictypes.TimeAggregation,
tableHints *metrictypes.MetricTableHints,
) string {
) (string, error) {
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
var aggregationColumn string
switch temporality {
@@ -190,5 +191,13 @@ func AggregationColumnForSamplesTable(
}
}
return aggregationColumn
if aggregationColumn == "" {
return "", errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
)
}
return aggregationColumn, nil
}

View File

@@ -29,13 +29,7 @@ func (c *conditionBuilder) conditionFor(
sb *sqlbuilder.SelectBuilder,
) (string, error) {
switch operator {
case qbtypes.FilterOperatorContains,
qbtypes.FilterOperatorNotContains,
qbtypes.FilterOperatorILike,
qbtypes.FilterOperatorNotILike,
qbtypes.FilterOperatorLike,
qbtypes.FilterOperatorNotLike:
if operator.IsStringSearchOperator() {
value = querybuilder.FormatValueForContains(value)
}
@@ -44,6 +38,18 @@ func (c *conditionBuilder) conditionFor(
return "", err
}
// TODO(srikanthccv): use the same data type collision handling when metrics schemas are updated
switch v := value.(type) {
case float64:
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
case []any:
if len(v) > 0 && (operator == qbtypes.FilterOperatorBetween || operator == qbtypes.FilterOperatorNotBetween) {
if _, ok := v[0].(float64); ok {
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
}
}
}
switch operator {
case qbtypes.FilterOperatorEqual:
return sb.E(tblFieldName, value), nil

View File

@@ -5,67 +5,27 @@ import (
"fmt"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/huandu/go-sqlbuilder"
"golang.org/x/exp/slices"
)
const (
RateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
IncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, ((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
RateTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))`
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s, ((%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
IncreaseTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value, per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)`
RateWithInterpolation = `
CASE
WHEN row_number() OVER rate_window = 1 THEN
-- First row: try to interpolate using next value
CASE
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
-- Assume linear growth to next point
(leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
(leadInFrame(ts, 1) OVER rate_window - ts)
ELSE
0 -- No next value either, can't interpolate
END
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
-- Counter reset detected
per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window)
ELSE
-- Normal case: calculate rate
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) /
(ts - lagInFrame(ts, 1) OVER rate_window)
END`
RateMultiTemporalityTmpl = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1) OVER rate_window), (%s - lagInFrame(%s, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))) AS per_series_value`
IncreaseWithInterpolation = `
CASE
WHEN row_number() OVER rate_window = 1 THEN
-- First row: try to interpolate using next value
CASE
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
-- Calculate the interpolated increase for this interval
((leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
(leadInFrame(ts, 1) OVER rate_window - ts)) *
(leadInFrame(ts, 1) OVER rate_window - ts)
ELSE
0 -- No next value either, can't interpolate
END
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
-- Counter reset detected: the increase is the current value
per_series_value
ELSE
-- Normal case: calculate increase
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)
END`
IncreaseMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s, (%s - lagInFrame(%s, 1) OVER rate_window))) AS per_series_value`
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
)
type MetricQueryStatementBuilder struct {
@@ -147,54 +107,6 @@ func (b *MetricQueryStatementBuilder) Build(
return b.buildPipelineStatement(ctx, start, end, query, keys, variables)
}
// Fastpath (no fingerprint grouping)
// canShortCircuitDelta returns true if we can use the optimized query
// for the given query
// This is used to avoid the group by fingerprint thus improving the performance
// for certain queries
// cases where we can short circuit:
// 1. time aggregation = (rate|increase) and space aggregation = sum
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
//
// 2. time aggregation = sum and space aggregation = sum
// - sum of sums is same as sum of all values
//
// 3. time aggregation = min and space aggregation = min
// - min of mins is same as min of all values
//
// 4. time aggregation = max and space aggregation = max
// - max of maxs is same as max of all values
//
// 5. special case exphist, there is no need for per series/fingerprint aggregation
// we can directly use the quantilesDDMerge function
//
// all of this is true only for delta metrics
func (b *MetricQueryStatementBuilder) CanShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool {
if q.Aggregations[0].Temporality != metrictypes.Delta {
return false
}
ta := q.Aggregations[0].TimeAggregation
sa := q.Aggregations[0].SpaceAggregation
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) && sa == metrictypes.SpaceAggregationSum {
return true
}
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
return true
}
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
return true
}
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
return true
}
if q.Aggregations[0].Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
return true
}
return false
}
func (b *MetricQueryStatementBuilder) buildPipelineStatement(
ctx context.Context,
start, end uint64,
@@ -256,10 +168,11 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
return nil, err
}
if b.CanShortCircuitDelta(query) {
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
// spatial_aggregation_cte directly for certain delta queries
frag, args := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs)
if frag != "" {
if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs); err != nil {
return nil, err
} else if frag != "" {
cteFragments = append(cteFragments, frag)
cteArgs = append(cteArgs, args)
}
@@ -273,8 +186,9 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
}
// spatial_aggregation_cte
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
if frag != "" {
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
return nil, err
} else if frag != "" {
cteFragments = append(cteFragments, frag)
cteArgs = append(cteArgs, args)
}
@@ -294,7 +208,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
timeSeriesCTE string,
timeSeriesCTEArgs []any,
) (string, []any) {
) (string, []any, error) {
stepSec := int64(query.StepInterval.Seconds())
sb := sqlbuilder.NewSelectBuilder()
@@ -307,11 +221,15 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggCol := AggregationColumnForSamplesTable(
aggCol, err := AggregationColumnForSamplesTable(
start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints,
)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -334,7 +252,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
}
func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
@@ -437,8 +355,12 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -461,7 +383,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
}
func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
ctx context.Context,
_ context.Context,
start, end uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
timeSeriesCTE string,
@@ -479,7 +401,10 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
baseSb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
@@ -496,36 +421,25 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
innerQuery, innerArgs := baseSb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
// ! TODO (balanikaran) Get OrgID via function parameter instead of valuer.GenerateUUID()
interpolationEnabled := b.flagger.BooleanOrEmpty(ctx, flagger.FeatureInterpolationEnabled, featuretypes.NewFlaggerEvaluationContext(valuer.GenerateUUID()))
switch query.Aggregations[0].TimeAggregation {
case metrictypes.TimeAggregationRate:
rateExpr := fmt.Sprintf(RateWithoutNegative, start, start)
if interpolationEnabled {
rateExpr = RateWithInterpolation
}
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", RateTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
case metrictypes.TimeAggregationIncrease:
incExpr := fmt.Sprintf(IncreaseWithoutNegative, start, start)
if interpolationEnabled {
incExpr = IncreaseWithInterpolation
}
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", IncreaseTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
@@ -534,7 +448,6 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
}
}
// because RateInterpolation is not enabled anywhere due to some gaps in the logic wrt cache handling, it hasn't been considered for the multi temporality
func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
_ context.Context,
start, end uint64,
@@ -553,18 +466,32 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggForDeltaTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggForCumulativeTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggForDeltaTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
aggForCumulativeTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggForDeltaTemporality = fmt.Sprintf("%s/%d", aggForDeltaTemporality, stepSec)
}
switch query.Aggregations[0].TimeAggregation {
case metrictypes.TimeAggregationRate:
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, aggForCumulativeTemporality, aggForCumulativeTemporality, start)
rateExpr := fmt.Sprintf(RateMultiTemporalityTmpl,
aggForDeltaTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality,
)
sb.SelectMore(rateExpr)
case metrictypes.TimeAggregationIncrease:
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, start)
increaseExpr := fmt.Sprintf(IncreaseMultiTemporality,
aggForDeltaTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality,
)
sb.SelectMore(increaseExpr)
default:
expr := fmt.Sprintf(OthersMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality)
@@ -592,7 +519,14 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
_ uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
_ map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, []any) {
) (string, []any, error) {
if query.Aggregations[0].SpaceAggregation.IsZero() {
return "", nil, errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`]",
)
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("ts")
@@ -609,7 +543,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
}
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
@@ -641,9 +575,7 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
quantile,
))
sb.From("__spatial_aggregation_cte")
for _, g := range query.GroupBy {
sb.GroupBy(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
@@ -659,6 +591,8 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
sb.Where(rewrittenExpr)
}
}
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.OrderBy("ts")
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil

View File

@@ -50,7 +50,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
@@ -83,7 +83,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
@@ -116,7 +116,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
@@ -148,7 +148,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
@@ -181,7 +181,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,
@@ -210,7 +210,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,

View File

@@ -3,6 +3,7 @@ package telemetrymetrics
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
)
@@ -168,7 +169,7 @@ func AggregationColumnForSamplesTable(
temporality metrictypes.Temporality,
timeAggregation metrictypes.TimeAggregation,
tableHints *metrictypes.MetricTableHints,
) string {
) (string, error) {
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
var aggregationColumn string
switch temporality {
@@ -298,5 +299,12 @@ func AggregationColumnForSamplesTable(
}
}
}
return aggregationColumn
if aggregationColumn == "" {
return "", errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
)
}
return aggregationColumn, nil
}

View File

@@ -35,13 +35,7 @@ func (c *conditionBuilder) conditionFor(
sb *sqlbuilder.SelectBuilder,
) (string, error) {
switch operator {
case qbtypes.FilterOperatorContains,
qbtypes.FilterOperatorNotContains,
qbtypes.FilterOperatorILike,
qbtypes.FilterOperatorNotILike,
qbtypes.FilterOperatorLike,
qbtypes.FilterOperatorNotLike:
if operator.IsStringSearchOperator() {
value = querybuilder.FormatValueForContains(value)
}

Some files were not shown because too many files have changed in this diff Show More