mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-09 11:12:21 +00:00
Compare commits
1 Commits
feat/cloud
...
feat/infra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1332098b7d |
@@ -3928,9 +3928,19 @@ components:
|
||||
isMonotonic:
|
||||
type: boolean
|
||||
temporality:
|
||||
$ref: '#/components/schemas/MetrictypesTemporality'
|
||||
enum:
|
||||
- delta
|
||||
- cumulative
|
||||
- unspecified
|
||||
type: string
|
||||
type:
|
||||
$ref: '#/components/schemas/MetrictypesType'
|
||||
enum:
|
||||
- gauge
|
||||
- sum
|
||||
- histogram
|
||||
- summary
|
||||
- exponentialhistogram
|
||||
type: string
|
||||
unit:
|
||||
type: string
|
||||
required:
|
||||
@@ -3953,7 +3963,13 @@ components:
|
||||
minimum: 0
|
||||
type: integer
|
||||
type:
|
||||
$ref: '#/components/schemas/MetrictypesType'
|
||||
enum:
|
||||
- gauge
|
||||
- sum
|
||||
- histogram
|
||||
- summary
|
||||
- exponentialhistogram
|
||||
type: string
|
||||
unit:
|
||||
type: string
|
||||
required:
|
||||
@@ -4014,11 +4030,6 @@ components:
|
||||
- percentage
|
||||
- totalValue
|
||||
type: object
|
||||
MetricsexplorertypesTreemapMode:
|
||||
enum:
|
||||
- timeseries
|
||||
- samples
|
||||
type: string
|
||||
MetricsexplorertypesTreemapRequest:
|
||||
properties:
|
||||
end:
|
||||
@@ -4029,7 +4040,10 @@ components:
|
||||
limit:
|
||||
type: integer
|
||||
mode:
|
||||
$ref: '#/components/schemas/MetricsexplorertypesTreemapMode'
|
||||
enum:
|
||||
- timeseries
|
||||
- samples
|
||||
type: string
|
||||
start:
|
||||
format: int64
|
||||
type: integer
|
||||
@@ -4064,9 +4078,19 @@ components:
|
||||
metricName:
|
||||
type: string
|
||||
temporality:
|
||||
$ref: '#/components/schemas/MetrictypesTemporality'
|
||||
enum:
|
||||
- delta
|
||||
- cumulative
|
||||
- unspecified
|
||||
type: string
|
||||
type:
|
||||
$ref: '#/components/schemas/MetrictypesType'
|
||||
enum:
|
||||
- gauge
|
||||
- sum
|
||||
- histogram
|
||||
- summary
|
||||
- exponentialhistogram
|
||||
type: string
|
||||
unit:
|
||||
type: string
|
||||
required:
|
||||
@@ -4077,20 +4101,6 @@ components:
|
||||
- temporality
|
||||
- isMonotonic
|
||||
type: object
|
||||
MetrictypesTemporality:
|
||||
enum:
|
||||
- delta
|
||||
- cumulative
|
||||
- unspecified
|
||||
type: string
|
||||
MetrictypesType:
|
||||
enum:
|
||||
- gauge
|
||||
- sum
|
||||
- histogram
|
||||
- summary
|
||||
- exponentialhistogram
|
||||
type: string
|
||||
PreferencetypesPreference:
|
||||
properties:
|
||||
allowedScopes:
|
||||
|
||||
@@ -762,6 +762,18 @@ export interface MetricsexplorertypesMetricHighlightsResponseDTO {
|
||||
totalTimeSeries: number;
|
||||
}
|
||||
|
||||
export enum MetricsexplorertypesMetricMetadataDTOTemporality {
|
||||
delta = 'delta',
|
||||
cumulative = 'cumulative',
|
||||
unspecified = 'unspecified',
|
||||
}
|
||||
export enum MetricsexplorertypesMetricMetadataDTOType {
|
||||
gauge = 'gauge',
|
||||
sum = 'sum',
|
||||
histogram = 'histogram',
|
||||
summary = 'summary',
|
||||
exponentialhistogram = 'exponentialhistogram',
|
||||
}
|
||||
export interface MetricsexplorertypesMetricMetadataDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -771,14 +783,29 @@ export interface MetricsexplorertypesMetricMetadataDTO {
|
||||
* @type boolean
|
||||
*/
|
||||
isMonotonic: boolean;
|
||||
temporality: MetrictypesTemporalityDTO;
|
||||
type: MetrictypesTypeDTO;
|
||||
/**
|
||||
* @enum delta,cumulative,unspecified
|
||||
* @type string
|
||||
*/
|
||||
temporality: MetricsexplorertypesMetricMetadataDTOTemporality;
|
||||
/**
|
||||
* @enum gauge,sum,histogram,summary,exponentialhistogram
|
||||
* @type string
|
||||
*/
|
||||
type: MetricsexplorertypesMetricMetadataDTOType;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
unit: string;
|
||||
}
|
||||
|
||||
export enum MetricsexplorertypesStatDTOType {
|
||||
gauge = 'gauge',
|
||||
sum = 'sum',
|
||||
histogram = 'histogram',
|
||||
summary = 'summary',
|
||||
exponentialhistogram = 'exponentialhistogram',
|
||||
}
|
||||
export interface MetricsexplorertypesStatDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -798,7 +825,11 @@ export interface MetricsexplorertypesStatDTO {
|
||||
* @minimum 0
|
||||
*/
|
||||
timeseries: number;
|
||||
type: MetrictypesTypeDTO;
|
||||
/**
|
||||
* @enum gauge,sum,histogram,summary,exponentialhistogram
|
||||
* @type string
|
||||
*/
|
||||
type: MetricsexplorertypesStatDTOType;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -858,7 +889,7 @@ export interface MetricsexplorertypesTreemapEntryDTO {
|
||||
totalValue: number;
|
||||
}
|
||||
|
||||
export enum MetricsexplorertypesTreemapModeDTO {
|
||||
export enum MetricsexplorertypesTreemapRequestDTOMode {
|
||||
timeseries = 'timeseries',
|
||||
samples = 'samples',
|
||||
}
|
||||
@@ -873,7 +904,11 @@ export interface MetricsexplorertypesTreemapRequestDTO {
|
||||
* @type integer
|
||||
*/
|
||||
limit: number;
|
||||
mode: MetricsexplorertypesTreemapModeDTO;
|
||||
/**
|
||||
* @enum timeseries,samples
|
||||
* @type string
|
||||
*/
|
||||
mode: MetricsexplorertypesTreemapRequestDTOMode;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
@@ -894,6 +929,18 @@ export interface MetricsexplorertypesTreemapResponseDTO {
|
||||
timeseries: MetricsexplorertypesTreemapEntryDTO[] | null;
|
||||
}
|
||||
|
||||
export enum MetricsexplorertypesUpdateMetricMetadataRequestDTOTemporality {
|
||||
delta = 'delta',
|
||||
cumulative = 'cumulative',
|
||||
unspecified = 'unspecified',
|
||||
}
|
||||
export enum MetricsexplorertypesUpdateMetricMetadataRequestDTOType {
|
||||
gauge = 'gauge',
|
||||
sum = 'sum',
|
||||
histogram = 'histogram',
|
||||
summary = 'summary',
|
||||
exponentialhistogram = 'exponentialhistogram',
|
||||
}
|
||||
export interface MetricsexplorertypesUpdateMetricMetadataRequestDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -907,26 +954,22 @@ export interface MetricsexplorertypesUpdateMetricMetadataRequestDTO {
|
||||
* @type string
|
||||
*/
|
||||
metricName: string;
|
||||
temporality: MetrictypesTemporalityDTO;
|
||||
type: MetrictypesTypeDTO;
|
||||
/**
|
||||
* @enum delta,cumulative,unspecified
|
||||
* @type string
|
||||
*/
|
||||
temporality: MetricsexplorertypesUpdateMetricMetadataRequestDTOTemporality;
|
||||
/**
|
||||
* @enum gauge,sum,histogram,summary,exponentialhistogram
|
||||
* @type string
|
||||
*/
|
||||
type: MetricsexplorertypesUpdateMetricMetadataRequestDTOType;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
unit: string;
|
||||
}
|
||||
|
||||
export enum MetrictypesTemporalityDTO {
|
||||
delta = 'delta',
|
||||
cumulative = 'cumulative',
|
||||
unspecified = 'unspecified',
|
||||
}
|
||||
export enum MetrictypesTypeDTO {
|
||||
gauge = 'gauge',
|
||||
sum = 'sum',
|
||||
histogram = 'histogram',
|
||||
summary = 'summary',
|
||||
exponentialhistogram = 'exponentialhistogram',
|
||||
}
|
||||
export interface PreferencetypesPreferenceDTO {
|
||||
/**
|
||||
* @type array
|
||||
@@ -1345,7 +1388,7 @@ export interface TypesPostableForgotPasswordDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
email: string;
|
||||
email?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -1353,7 +1396,7 @@ export interface TypesPostableForgotPasswordDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
orgId: string;
|
||||
orgId?: string;
|
||||
}
|
||||
|
||||
export interface TypesPostableInviteDTO {
|
||||
|
||||
@@ -39,6 +39,7 @@ export interface HostData {
|
||||
waitTimeSeries: TimeSeries;
|
||||
load15: number;
|
||||
load15TimeSeries: TimeSeries;
|
||||
filesystem: number;
|
||||
}
|
||||
|
||||
export interface HostListResponse {
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
.labels-row,
|
||||
.values-row {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1.5fr 1.5fr 1.5fr;
|
||||
grid-template-columns: 1fr 1.5fr 1.5fr 1.5fr 1.5fr;
|
||||
gap: 30px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
@@ -426,6 +426,12 @@ function HostMetricsDetails({
|
||||
>
|
||||
MEMORY USAGE
|
||||
</Typography.Text>
|
||||
<Typography.Text
|
||||
type="secondary"
|
||||
className="host-details-metadata-label"
|
||||
>
|
||||
DISK USAGE
|
||||
</Typography.Text>
|
||||
</div>
|
||||
|
||||
<div className="values-row">
|
||||
@@ -478,6 +484,23 @@ function HostMetricsDetails({
|
||||
className="progress-bar"
|
||||
/>
|
||||
</div>
|
||||
<div className="progress-container">
|
||||
<Progress
|
||||
percent={Number((host.filesystem * 100).toFixed(1))}
|
||||
size="small"
|
||||
strokeColor={((): string => {
|
||||
const filesystemPercent = Number((host.filesystem * 100).toFixed(1));
|
||||
if (filesystemPercent >= 90) {
|
||||
return Color.BG_CHERRY_500;
|
||||
}
|
||||
if (filesystemPercent >= 60) {
|
||||
return Color.BG_AMBER_500;
|
||||
}
|
||||
return Color.BG_FOREST_500;
|
||||
})()}
|
||||
className="progress-bar"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -27,6 +27,7 @@ export interface HostRowData {
|
||||
hostName: string;
|
||||
cpu: React.ReactNode;
|
||||
memory: React.ReactNode;
|
||||
filesystem: React.ReactNode;
|
||||
wait: string;
|
||||
load15: number;
|
||||
active: React.ReactNode;
|
||||
@@ -108,6 +109,14 @@ export const getHostsListColumns = (): ColumnType<HostRowData>[] => [
|
||||
sorter: true,
|
||||
align: 'right',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header-right">Disk Usage</div>,
|
||||
dataIndex: 'filesystem',
|
||||
key: 'filesystem',
|
||||
width: 100,
|
||||
sorter: true,
|
||||
align: 'right',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header-right">IOWait</div>,
|
||||
dataIndex: 'wait',
|
||||
@@ -178,6 +187,26 @@ export const formatDataForTable = (data: HostData[]): HostRowData[] =>
|
||||
/>
|
||||
</div>
|
||||
),
|
||||
filesystem: (
|
||||
<div className="progress-container">
|
||||
<Progress
|
||||
percent={Number((host.filesystem * 100).toFixed(1))}
|
||||
strokeLinecap="butt"
|
||||
size="small"
|
||||
strokeColor={((): string => {
|
||||
const filesystemPercent = Number((host.filesystem * 100).toFixed(1));
|
||||
if (filesystemPercent >= 90) {
|
||||
return Color.BG_CHERRY_500;
|
||||
}
|
||||
if (filesystemPercent >= 60) {
|
||||
return Color.BG_AMBER_500;
|
||||
}
|
||||
return Color.BG_FOREST_500;
|
||||
})()}
|
||||
className="progress-bar"
|
||||
/>
|
||||
</div>
|
||||
),
|
||||
wait: `${Number((host.wait * 100).toFixed(1))}%`,
|
||||
load15: host.load15,
|
||||
}));
|
||||
|
||||
@@ -1623,6 +1623,9 @@ export const getHostQueryPayload = (
|
||||
const diskPendingKey = dotMetricsEnabled
|
||||
? 'system.disk.pending_operations'
|
||||
: 'system_disk_pending_operations';
|
||||
const fsUsageKey = dotMetricsEnabled
|
||||
? 'system.filesystem.usage'
|
||||
: 'system_filesystem_usage';
|
||||
|
||||
return [
|
||||
{
|
||||
@@ -2558,6 +2561,143 @@ export const getHostQueryPayload = (
|
||||
start,
|
||||
end,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'system_filesystem_usage--float64--Gauge--true',
|
||||
|
||||
key: fsUsageKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'avg',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: true,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'fs_f1',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'host_name--string--tag--false',
|
||||
|
||||
key: hostNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: hostName,
|
||||
},
|
||||
{
|
||||
id: 'fs_f2',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'state--string--tag--false',
|
||||
|
||||
key: 'state',
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: 'used',
|
||||
},
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'mountpoint--string--tag--false',
|
||||
|
||||
key: 'mountpoint',
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: '{{mountpoint}}',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'system_filesystem_usage--float64--Gauge--true',
|
||||
|
||||
key: fsUsageKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'avg',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: true,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'fs_f3',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'host_name--string--tag--false',
|
||||
|
||||
key: hostNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: hostName,
|
||||
},
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'mountpoint--string--tag--false',
|
||||
|
||||
key: 'mountpoint',
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: '{{mountpoint}}',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
],
|
||||
queryFormulas: [
|
||||
{
|
||||
disabled: false,
|
||||
expression: 'A/B',
|
||||
legend: '{{mountpoint}}',
|
||||
queryName: 'F1',
|
||||
},
|
||||
],
|
||||
queryTraceOperator: [],
|
||||
},
|
||||
clickhouse_sql: [{ disabled: false, legend: '', name: 'A', query: '' }],
|
||||
id: 'a1b2c3d4-e5f6-7890-abcd-ef1234567890',
|
||||
promql: [{ disabled: false, legend: '', name: 'A', query: '' }],
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
start,
|
||||
end,
|
||||
},
|
||||
];
|
||||
};
|
||||
|
||||
@@ -2632,5 +2772,5 @@ export const hostWidgetInfo = [
|
||||
{ title: 'System disk io (bytes transferred)', yAxisUnit: 'bytes' },
|
||||
{ title: 'System disk operations/s', yAxisUnit: 'short' },
|
||||
{ title: 'Queue size', yAxisUnit: 'short' },
|
||||
{ title: 'Disk operations time', yAxisUnit: 's' },
|
||||
{ title: 'Disk Usage', yAxisUnit: 'percentunit' },
|
||||
];
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
var dotMetricMap = map[string]string{
|
||||
"system_filesystem_usage": "system.filesystem.usage",
|
||||
"system_cpu_time": "system.cpu.time",
|
||||
"system_memory_usage": "system.memory.usage",
|
||||
"system_cpu_load_average_15m": "system.cpu.load_average.15m",
|
||||
|
||||
@@ -2,6 +2,7 @@ package inframetrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -53,10 +54,11 @@ var (
|
||||
}
|
||||
|
||||
queryNamesForTopHosts = map[string][]string{
|
||||
"cpu": {"A", "B", "F1"},
|
||||
"memory": {"C", "D", "F2"},
|
||||
"wait": {"E", "F", "F3"},
|
||||
"load15": {"G"},
|
||||
"cpu": {"A", "B", "F1"},
|
||||
"memory": {"C", "D", "F2"},
|
||||
"wait": {"E", "F", "F3"},
|
||||
"load15": {"G"},
|
||||
"filesystem": {"H", "I", "F4"},
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric
|
||||
@@ -67,10 +69,11 @@ var (
|
||||
GetDotMetrics("os_type"),
|
||||
}
|
||||
metricNamesForHosts = map[string]string{
|
||||
"cpu": GetDotMetrics("system_cpu_time"),
|
||||
"memory": GetDotMetrics("system_memory_usage"),
|
||||
"load15": GetDotMetrics("system_cpu_load_average_15m"),
|
||||
"wait": GetDotMetrics("system_cpu_time"),
|
||||
"cpu": GetDotMetrics("system_cpu_time"),
|
||||
"memory": GetDotMetrics("system_memory_usage"),
|
||||
"load15": GetDotMetrics("system_cpu_load_average_15m"),
|
||||
"wait": GetDotMetrics("system_cpu_time"),
|
||||
"filesystem": GetDotMetrics("system_filesystem_usage"),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -386,6 +389,11 @@ func (h *HostsRepo) IsSendingK8SAgentMetrics(ctx context.Context, req model.Host
|
||||
return maps.Keys(clusterNames), maps.Keys(nodeNames), nil
|
||||
}
|
||||
|
||||
func marshalInterface(inter any) string {
|
||||
b, _ := json.Marshal(inter)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req model.HostListRequest) (model.HostListResponse, error) {
|
||||
resp := model.HostListResponse{}
|
||||
|
||||
@@ -479,11 +487,15 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("======> printing query: ", marshalInterface(query.CompositeQuery.BuilderQueries))
|
||||
|
||||
queryResponse, _, err := h.querierV2.QueryRange(ctx, orgID, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// fmt.Println("====> queryResponse:", marshalInterface(queryResponse))
|
||||
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
@@ -494,10 +506,11 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
|
||||
for _, result := range formattedResponse {
|
||||
for _, row := range result.Table.Rows {
|
||||
record := model.HostListRecord{
|
||||
CPU: -1,
|
||||
Memory: -1,
|
||||
Wait: -1,
|
||||
Load15: -1,
|
||||
CPU: -1,
|
||||
Memory: -1,
|
||||
Wait: -1,
|
||||
Load15: -1,
|
||||
Filesystem: -1,
|
||||
}
|
||||
|
||||
if hostName, ok := row.Data[hostNameAttrKey].(string); ok {
|
||||
@@ -516,6 +529,9 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
|
||||
if load15, ok := row.Data["G"].(float64); ok {
|
||||
record.Load15 = load15
|
||||
}
|
||||
if filesystem, ok := row.Data["F4"].(float64); ok {
|
||||
record.Filesystem = filesystem
|
||||
}
|
||||
record.Meta = map[string]string{}
|
||||
if _, ok := hostAttrs[record.HostName]; ok {
|
||||
record.Meta = hostAttrs[record.HostName]
|
||||
|
||||
@@ -269,42 +269,130 @@ var HostsTableListQuery = v3.QueryRangeParamsV3{
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
},
|
||||
"G": {
|
||||
QueryName: "G",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForHosts["load15"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "G",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
Legend: "CPU Load Average (15m)",
|
||||
"G": {
|
||||
QueryName: "G",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForHosts["load15"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "G",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
Legend: "CPU Load Average (15m)",
|
||||
},
|
||||
"H": {
|
||||
QueryName: "H",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForHosts["filesystem"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "state",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "used",
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "H",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"I": {
|
||||
QueryName: "I",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForHosts["filesystem"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "I",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F4": {
|
||||
QueryName: "F4",
|
||||
Expression: "H/I",
|
||||
Legend: "Disk Usage (%)",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
|
||||
@@ -26,14 +26,15 @@ type HostListRequest struct {
|
||||
}
|
||||
|
||||
type HostListRecord struct {
|
||||
HostName string `json:"hostName"`
|
||||
Active bool `json:"active"`
|
||||
OS string `json:"os"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
Wait float64 `json:"wait"`
|
||||
Load15 float64 `json:"load15"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
HostName string `json:"hostName"`
|
||||
Active bool `json:"active"`
|
||||
OS string `json:"os"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
Wait float64 `json:"wait"`
|
||||
Load15 float64 `json:"load15"`
|
||||
Filesystem float64 `json:"filesystem"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
type HostListResponse struct {
|
||||
@@ -64,6 +65,10 @@ func (r *HostListResponse) SortBy(orderBy *v3.OrderBy) {
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].Wait > r.Records[j].Wait
|
||||
})
|
||||
case "filesystem":
|
||||
sort.Slice(r.Records, func(i, j int) bool {
|
||||
return r.Records[i].Filesystem > r.Records[j].Filesystem
|
||||
})
|
||||
}
|
||||
// the default is descending
|
||||
if orderBy.Order == v3.DirectionAsc {
|
||||
|
||||
@@ -151,8 +151,7 @@ func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't serialize cloud account config to JSON")
|
||||
}
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
||||
return string(serialized), nil
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
@@ -187,8 +186,7 @@ func (r *AgentReport) Value() (driver.Value, error) {
|
||||
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
|
||||
)
|
||||
}
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
||||
return string(serialized), nil
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudIntegrationService struct {
|
||||
@@ -242,6 +240,5 @@ func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
err, errors.CodeInternal, "couldn't serialize cloud service config to JSON",
|
||||
)
|
||||
}
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
||||
return string(serialized), nil
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
@@ -31,13 +31,6 @@ var (
|
||||
TreemapModeSamples = TreemapMode{valuer.NewString("samples")}
|
||||
)
|
||||
|
||||
func (TreemapMode) Enum() []any {
|
||||
return []any{
|
||||
TreemapModeTimeSeries,
|
||||
TreemapModeSamples,
|
||||
}
|
||||
}
|
||||
|
||||
// StatsRequest represents the payload accepted by the metrics stats endpoint.
|
||||
type StatsRequest struct {
|
||||
Filter *qbtypes.Filter `json:"filter,omitempty"`
|
||||
@@ -105,7 +98,7 @@ func (req *StatsRequest) UnmarshalJSON(data []byte) error {
|
||||
type Stat struct {
|
||||
MetricName string `json:"metricName" required:"true"`
|
||||
Description string `json:"description" required:"true"`
|
||||
MetricType metrictypes.Type `json:"type" required:"true"`
|
||||
MetricType metrictypes.Type `json:"type" required:"true" enum:"gauge,sum,histogram,summary,exponentialhistogram"`
|
||||
MetricUnit string `json:"unit" required:"true"`
|
||||
TimeSeries uint64 `json:"timeseries" required:"true"`
|
||||
Samples uint64 `json:"samples" required:"true"`
|
||||
@@ -119,9 +112,9 @@ type StatsResponse struct {
|
||||
|
||||
type MetricMetadata struct {
|
||||
Description string `json:"description" required:"true"`
|
||||
MetricType metrictypes.Type `json:"type" required:"true"`
|
||||
MetricType metrictypes.Type `json:"type" required:"true" enum:"gauge,sum,histogram,summary,exponentialhistogram"`
|
||||
MetricUnit string `json:"unit" required:"true"`
|
||||
Temporality metrictypes.Temporality `json:"temporality" required:"true"`
|
||||
Temporality metrictypes.Temporality `json:"temporality" required:"true" enum:"delta,cumulative,unspecified"`
|
||||
IsMonotonic bool `json:"isMonotonic" required:"true"`
|
||||
}
|
||||
|
||||
@@ -138,10 +131,10 @@ func (m *MetricMetadata) UnmarshalBinary(data []byte) error {
|
||||
// UpdateMetricMetadataRequest represents the payload for updating metric metadata.
|
||||
type UpdateMetricMetadataRequest struct {
|
||||
MetricName string `json:"metricName" required:"true"`
|
||||
Type metrictypes.Type `json:"type" required:"true"`
|
||||
Type metrictypes.Type `json:"type" required:"true" enum:"gauge,sum,histogram,summary,exponentialhistogram"`
|
||||
Description string `json:"description" required:"true"`
|
||||
Unit string `json:"unit" required:"true"`
|
||||
Temporality metrictypes.Temporality `json:"temporality" required:"true"`
|
||||
Temporality metrictypes.Temporality `json:"temporality" required:"true" enum:"delta,cumulative,unspecified"`
|
||||
IsMonotonic bool `json:"isMonotonic" required:"true"`
|
||||
}
|
||||
|
||||
@@ -151,7 +144,7 @@ type TreemapRequest struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
Mode TreemapMode `json:"mode" required:"true"`
|
||||
Mode TreemapMode `json:"mode" required:"true" enum:"timeseries,samples"`
|
||||
}
|
||||
|
||||
// Validate enforces basic constraints on TreemapRequest.
|
||||
|
||||
@@ -36,7 +36,7 @@ func (t Temporality) Value() (driver.Value, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Temporality) Scan(src any) error {
|
||||
func (t *Temporality) Scan(src interface{}) error {
|
||||
if src == nil {
|
||||
*t = Unknown
|
||||
return nil
|
||||
@@ -66,14 +66,6 @@ func (t *Temporality) Scan(src any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Temporality) Enum() []any {
|
||||
return []any{
|
||||
Delta,
|
||||
Cumulative,
|
||||
Unspecified,
|
||||
}
|
||||
}
|
||||
|
||||
// Type is the type of the metric in OTLP data model
|
||||
// Read more here https://opentelemetry.io/docs/specs/otel/metrics/data-model/#metric-points
|
||||
type Type struct {
|
||||
@@ -142,16 +134,6 @@ var (
|
||||
UnspecifiedType = Type{valuer.NewString("")}
|
||||
)
|
||||
|
||||
func (Type) Enum() []any {
|
||||
return []any{
|
||||
GaugeType,
|
||||
SumType,
|
||||
HistogramType,
|
||||
SummaryType,
|
||||
ExpHistogramType,
|
||||
}
|
||||
}
|
||||
|
||||
type TimeAggregation struct {
|
||||
valuer.String
|
||||
}
|
||||
@@ -169,21 +151,6 @@ var (
|
||||
TimeAggregationIncrease = TimeAggregation{valuer.NewString("increase")}
|
||||
)
|
||||
|
||||
func (TimeAggregation) Enum() []any {
|
||||
return []any{
|
||||
TimeAggregationUnspecified,
|
||||
TimeAggregationLatest,
|
||||
TimeAggregationSum,
|
||||
TimeAggregationAvg,
|
||||
TimeAggregationMin,
|
||||
TimeAggregationMax,
|
||||
TimeAggregationCount,
|
||||
TimeAggregationCountDistinct,
|
||||
TimeAggregationRate,
|
||||
TimeAggregationIncrease,
|
||||
}
|
||||
}
|
||||
|
||||
type SpaceAggregation struct {
|
||||
valuer.String
|
||||
}
|
||||
@@ -202,22 +169,6 @@ var (
|
||||
SpaceAggregationPercentile99 = SpaceAggregation{valuer.NewString("p99")}
|
||||
)
|
||||
|
||||
func (SpaceAggregation) Enum() []any {
|
||||
return []any{
|
||||
SpaceAggregationUnspecified,
|
||||
SpaceAggregationSum,
|
||||
SpaceAggregationAvg,
|
||||
SpaceAggregationMin,
|
||||
SpaceAggregationMax,
|
||||
SpaceAggregationCount,
|
||||
SpaceAggregationPercentile50,
|
||||
SpaceAggregationPercentile75,
|
||||
SpaceAggregationPercentile90,
|
||||
SpaceAggregationPercentile95,
|
||||
SpaceAggregationPercentile99,
|
||||
}
|
||||
}
|
||||
|
||||
func (s SpaceAggregation) IsPercentile() bool {
|
||||
return s == SpaceAggregationPercentile50 ||
|
||||
s == SpaceAggregationPercentile75 ||
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable
|
||||
|
||||
import requests
|
||||
from sqlalchemy import text
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def cleanup_cloud_accounts(postgres: types.TestContainerSQL) -> None:
|
||||
try:
|
||||
with postgres.conn.connect() as conn:
|
||||
conn.execute(text("TRUNCATE TABLE cloud_integration CASCADE"))
|
||||
conn.commit()
|
||||
logger.info("Cleaned up cloud_integration table")
|
||||
except Exception: # pylint: disable=broad-except
|
||||
logger.info("Cleanup skipped or table does not exist")
|
||||
|
||||
|
||||
def test_generate_connection_url(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test to generate connection URL for AWS CloudFormation stack deployment."""
|
||||
# Clean up any corrupted data from previous test runs
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
# Get authentication token for admin user
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
|
||||
# Mock the deployment info query (for license validation)
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
|
||||
|
||||
# Prepare request payload
|
||||
request_payload = {
|
||||
"account_config": {"regions": ["us-east-1", "us-west-2"]},
|
||||
"agent_config": {
|
||||
"region": "us-east-1",
|
||||
"ingestion_url": "https://ingest.test.signoz.cloud",
|
||||
"ingestion_key": "test-ingestion-key-123456",
|
||||
"signoz_api_url": "https://test-deployment.test.signoz.cloud",
|
||||
"signoz_api_key": "test-api-key-789",
|
||||
"version": "v0.0.8",
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=request_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
# Assert successful response
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}: {response.text}"
|
||||
|
||||
# Parse response JSON
|
||||
response_data = response.json()
|
||||
|
||||
# Assert response structure contains expected data
|
||||
assert "data" in response_data, "Response should contain 'data' field"
|
||||
|
||||
# Assert required fields in the response data
|
||||
expected_fields = ["account_id", "connection_url"]
|
||||
|
||||
for field in expected_fields:
|
||||
assert (
|
||||
field in response_data["data"]
|
||||
), f"Response data should contain '{field}' field"
|
||||
|
||||
data = response_data["data"]
|
||||
|
||||
# Assert account_id is a valid UUID format
|
||||
assert (
|
||||
len(data["account_id"]) > 0
|
||||
), "account_id should be a non-empty string (UUID)"
|
||||
|
||||
# Assert connection_url contains expected CloudFormation parameters
|
||||
connection_url = data["connection_url"]
|
||||
|
||||
# Verify it's an AWS CloudFormation URL
|
||||
assert (
|
||||
"console.aws.amazon.com/cloudformation" in connection_url
|
||||
), "connection_url should be an AWS CloudFormation URL"
|
||||
|
||||
# Verify region is included
|
||||
assert (
|
||||
"region=us-east-1" in connection_url
|
||||
), "connection_url should contain the specified region"
|
||||
|
||||
# Verify required parameters are in the URL
|
||||
required_params = [
|
||||
"param_SigNozIntegrationAgentVersion=v0.0.8",
|
||||
"param_SigNozApiUrl=https%3A%2F%2Ftest-deployment.test.signoz.cloud",
|
||||
"param_SigNozApiKey=test-api-key-789",
|
||||
"param_SigNozAccountId=", # Will be a UUID
|
||||
"param_IngestionUrl=https%3A%2F%2Fingest.test.signoz.cloud",
|
||||
"param_IngestionKey=test-ingestion-key-123456",
|
||||
"stackName=signoz-integration",
|
||||
"templateURL=https%3A%2F%2Fsignoz-integrations.s3.us-east-1.amazonaws.com%2Faws-quickcreate-template-v0.0.8.json",
|
||||
]
|
||||
|
||||
for param in required_params:
|
||||
assert (
|
||||
param in connection_url
|
||||
), f"connection_url should contain parameter: {param}"
|
||||
|
||||
def test_generate_connection_url_unsupported_provider(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
) -> None:
|
||||
"""Test that unsupported cloud providers return an error."""
|
||||
# Get authentication token for admin user
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Try with GCP (unsupported)
|
||||
cloud_provider = "gcp"
|
||||
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
|
||||
|
||||
request_payload = {
|
||||
"account_config": {"regions": ["us-central1"]},
|
||||
"agent_config": {
|
||||
"region": "us-central1",
|
||||
"ingestion_url": "https://ingest.test.signoz.cloud",
|
||||
"ingestion_key": "test-ingestion-key-123456",
|
||||
"signoz_api_url": "https://test-deployment.test.signoz.cloud",
|
||||
"signoz_api_key": "test-api-key-789",
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=request_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
# Should return Bad Request for unsupported provider
|
||||
assert (
|
||||
response.status_code == HTTPStatus.BAD_REQUEST
|
||||
), f"Expected 400 for unsupported provider, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
assert "error" in response_data, "Response should contain 'error' field"
|
||||
assert (
|
||||
"unsupported cloud provider" in response_data["error"].lower()
|
||||
), "Error message should indicate unsupported provider"
|
||||
@@ -1,574 +0,0 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from sqlalchemy import text
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def cleanup_cloud_accounts(postgres: types.TestContainerSQL) -> None:
|
||||
try:
|
||||
with postgres.conn.connect() as conn:
|
||||
# Try to delete all records instead of truncate in case table exists
|
||||
conn.execute(text("DELETE FROM cloud_integration"))
|
||||
conn.commit()
|
||||
logger.info("Cleaned up cloud_integration table")
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# Table might not exist, which is fine
|
||||
logger.info("Cleanup skipped or partial")
|
||||
|
||||
|
||||
def generate_unique_cloud_account_id() -> str:
|
||||
"""Generate a unique cloud account ID for testing."""
|
||||
# Use last 12 digits of UUID to simulate AWS account ID format
|
||||
return str(uuid.uuid4().int)[:12]
|
||||
|
||||
|
||||
def simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
) -> dict:
|
||||
"""Simulate an agent check-in to mark the account as connected.
|
||||
|
||||
Returns:
|
||||
dict with the response from check-in
|
||||
"""
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/agent-check-in"
|
||||
)
|
||||
|
||||
checkin_payload = {
|
||||
"account_id": account_id,
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"data": {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if response.status_code != HTTPStatus.OK:
|
||||
logger.error("Agent check-in failed: %s, response: %s", response.status_code, response.text)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Agent check-in failed: {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
return response_data.get("data", response_data)
|
||||
|
||||
|
||||
def create_test_account(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str = "aws",
|
||||
) -> dict:
|
||||
"""Create a test account via generate-connection-url.
|
||||
|
||||
Returns the data as-is from the API response. Caller is responsible for
|
||||
doing agent check-in if needed to mark the account as connected.
|
||||
|
||||
Returns:
|
||||
dict with account_id and connection_url from the API
|
||||
"""
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
|
||||
)
|
||||
|
||||
request_payload = {
|
||||
"account_config": {"regions": ["us-east-1"]},
|
||||
"agent_config": {
|
||||
"region": "us-east-1",
|
||||
"ingestion_url": "https://ingest.test.signoz.cloud",
|
||||
"ingestion_key": "test-ingestion-key-123456",
|
||||
"signoz_api_url": "https://test-deployment.test.signoz.cloud",
|
||||
"signoz_api_key": "test-api-key-789",
|
||||
"version": "v0.0.8",
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=request_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Failed to create test account: {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
# API returns data wrapped in {'status': 'success', 'data': {...}}
|
||||
data = response_data.get("data", response_data)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def test_list_connected_accounts_empty(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test listing connected accounts when there are none."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts"
|
||||
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
# API returns data wrapped in {'status': 'success', 'data': {...}}
|
||||
data = response_data.get("data", response_data)
|
||||
assert "accounts" in data, "Response should contain 'accounts' field"
|
||||
assert isinstance(data["accounts"], list), "Accounts should be a list"
|
||||
|
||||
|
||||
def test_list_connected_accounts_with_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test listing connected accounts after creating one."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
# Simulate agent check-in to mark as connected
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
|
||||
|
||||
# List accounts
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts"
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
assert "accounts" in data, "Response should contain 'accounts' field"
|
||||
assert isinstance(data["accounts"], list), "Accounts should be a list"
|
||||
|
||||
# Find our account in the list (there may be leftover accounts from previous test runs)
|
||||
account = next((a for a in data["accounts"] if a["id"] == account_id), None)
|
||||
assert account is not None, f"Account {account_id} should be found in list"
|
||||
assert account["id"] == account_id, "Account ID should match"
|
||||
assert "config" in account, "Account should have config field"
|
||||
assert "status" in account, "Account should have status field"
|
||||
|
||||
|
||||
def test_get_account_status(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test getting the status of a specific account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account (no check-in needed for status check)
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
# Get account status
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{account_id}/status"
|
||||
)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
assert "id" in data, "Response should contain 'id' field"
|
||||
assert data["id"] == account_id, "Account ID should match"
|
||||
assert "status" in data, "Response should contain 'status' field"
|
||||
assert "integration" in data["status"], "Status should contain 'integration' field"
|
||||
|
||||
|
||||
def test_get_account_status_not_found(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test getting status for a non-existent account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
fake_account_id = "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{fake_account_id}/status"
|
||||
)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.NOT_FOUND
|
||||
), f"Expected 404, got {response.status_code}"
|
||||
|
||||
|
||||
def test_update_account_config(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test updating account configuration."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
# Simulate agent check-in to mark as connected
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
|
||||
|
||||
# Update account configuration
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{account_id}/config"
|
||||
)
|
||||
|
||||
updated_config = {
|
||||
"config": {"regions": ["us-east-1", "us-west-2", "eu-west-1"]}
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=updated_config,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
assert "id" in data, "Response should contain 'id' field"
|
||||
assert data["id"] == account_id, "Account ID should match"
|
||||
|
||||
# Verify the update by listing accounts
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
|
||||
list_response_data = list_response.json()
|
||||
list_data = list_response_data.get("data", list_response_data)
|
||||
account = next((a for a in list_data["accounts"] if a["id"] == account_id), None)
|
||||
assert account is not None, "Account should be found in list"
|
||||
assert "config" in account, "Account should have config"
|
||||
assert "regions" in account["config"], "Config should have regions"
|
||||
assert len(account["config"]["regions"]) == 3, "Should have 3 regions"
|
||||
assert set(account["config"]["regions"]) == {
|
||||
"us-east-1",
|
||||
"us-west-2",
|
||||
"eu-west-1",
|
||||
}, "Regions should match updated config"
|
||||
|
||||
|
||||
def test_disconnect_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test disconnecting an account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
# Simulate agent check-in to mark as connected
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
|
||||
|
||||
# Disconnect the account
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{account_id}/disconnect"
|
||||
)
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
# Verify our specific account is no longer in the connected list
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
list_response_data = list_response.json()
|
||||
list_data = list_response_data.get("data", list_response_data)
|
||||
|
||||
# Check that our specific account is not in the list
|
||||
disconnected_account = next(
|
||||
(a for a in list_data["accounts"] if a["id"] == account_id), None
|
||||
)
|
||||
assert disconnected_account is None, f"Account {account_id} should be removed from connected accounts"
|
||||
|
||||
|
||||
def test_disconnect_account_not_found(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test disconnecting a non-existent account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
fake_account_id = "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
endpoint = (
|
||||
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{fake_account_id}/disconnect"
|
||||
)
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.NOT_FOUND
|
||||
), f"Expected 404, got {response.status_code}"
|
||||
|
||||
|
||||
def test_list_accounts_unsupported_provider(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test listing accounts for an unsupported cloud provider."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "gcp" # Unsupported provider
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts"
|
||||
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.BAD_REQUEST
|
||||
), f"Expected 400, got {response.status_code}"
|
||||
@@ -1,759 +0,0 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable
|
||||
import uuid
|
||||
|
||||
import requests
|
||||
from sqlalchemy import text
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def cleanup_cloud_accounts(postgres: types.TestContainerSQL) -> None:
|
||||
"""Clean up cloud_integration table to avoid corrupted data issues."""
|
||||
try:
|
||||
with postgres.conn.connect() as conn:
|
||||
# Try to delete all records instead of truncate in case table exists
|
||||
conn.execute(text("DELETE FROM cloud_integration"))
|
||||
conn.commit()
|
||||
logger.info("Cleaned up cloud_integration table")
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# Table might not exist, which is fine
|
||||
logger.info("Cleanup skipped or partial")
|
||||
|
||||
|
||||
def generate_unique_cloud_account_id() -> str:
|
||||
"""Generate a unique cloud account ID for testing."""
|
||||
# Use last 12 digits of UUID to simulate AWS account ID format
|
||||
return str(uuid.uuid4().int)[:12]
|
||||
|
||||
|
||||
def simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
) -> dict:
|
||||
"""Simulate an agent check-in to mark the account as connected.
|
||||
|
||||
Returns:
|
||||
dict with the response from check-in
|
||||
"""
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/agent-check-in"
|
||||
|
||||
checkin_payload = {
|
||||
"account_id": account_id,
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"data": {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if response.status_code != HTTPStatus.OK:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s", response.status_code, response.text
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Agent check-in failed: {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
return response_data.get("data", response_data)
|
||||
|
||||
|
||||
def create_test_account(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str = "aws",
|
||||
) -> dict:
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
|
||||
|
||||
request_payload = {
|
||||
"account_config": {"regions": ["us-east-1"]},
|
||||
"agent_config": {
|
||||
"region": "us-east-1",
|
||||
"ingestion_url": "https://ingest.test.signoz.cloud",
|
||||
"ingestion_key": "test-ingestion-key-123456",
|
||||
"signoz_api_url": "https://test-deployment.test.signoz.cloud",
|
||||
"signoz_api_key": "test-api-key-789",
|
||||
"version": "v0.0.8",
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=request_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Failed to create test account: {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
# API returns data wrapped in {'status': 'success', 'data': {...}}
|
||||
data = response_data.get("data", response_data)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def test_list_services_without_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test listing available services without specifying an account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
assert "services" in data, "Response should contain 'services' field"
|
||||
assert isinstance(data["services"], list), "Services should be a list"
|
||||
assert len(data["services"]) > 0, "Should have at least one service available"
|
||||
|
||||
# Verify service structure
|
||||
service = data["services"][0]
|
||||
assert "id" in service, "Service should have 'id' field"
|
||||
assert "title" in service, "Service should have 'title' field"
|
||||
assert "icon" in service, "Service should have 'icon' field"
|
||||
|
||||
|
||||
def test_list_services_with_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test listing services for a specific connected account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account and do check-in
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(
|
||||
signoz, admin_token, cloud_provider, account_id, cloud_account_id
|
||||
)
|
||||
|
||||
# List services for the account
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services?cloud_account_id={cloud_account_id}"
|
||||
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
assert "services" in data, "Response should contain 'services' field"
|
||||
assert isinstance(data["services"], list), "Services should be a list"
|
||||
assert len(data["services"]) > 0, "Should have at least one service available"
|
||||
|
||||
# Services should include config field (may be null if not configured)
|
||||
service = data["services"][0]
|
||||
assert "id" in service, "Service should have 'id' field"
|
||||
assert "title" in service, "Service should have 'title' field"
|
||||
assert "icon" in service, "Service should have 'icon' field"
|
||||
|
||||
|
||||
def test_get_service_details_without_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test getting service details without specifying an account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
# First get the list of services to get a valid service ID
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
list_data = list_response.json().get("data", list_response.json())
|
||||
assert len(list_data["services"]) > 0, "Should have at least one service"
|
||||
service_id = list_data["services"][0]["id"]
|
||||
|
||||
# Get service details
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}"
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
|
||||
# Verify service details structure
|
||||
assert "id" in data, "Service details should have 'id' field"
|
||||
assert data["id"] == service_id, "Service ID should match requested ID"
|
||||
assert "title" in data, "Service details should have 'name' field"
|
||||
assert "overview" in data, "Service details should have 'overview' field"
|
||||
# assert assets to had list of dashboards
|
||||
assert "assets" in data, "Service details should have 'assets' field"
|
||||
assert isinstance(data["assets"], dict), "Assets should be a dictionary"
|
||||
|
||||
|
||||
def test_get_service_details_with_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test getting service details for a specific connected account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account and do check-in
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(
|
||||
signoz, admin_token, cloud_provider, account_id, cloud_account_id
|
||||
)
|
||||
|
||||
# Get list of services first
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
list_data = list_response.json().get("data", list_response.json())
|
||||
assert len(list_data["services"]) > 0, "Should have at least one service"
|
||||
service_id = list_data["services"][0]["id"]
|
||||
|
||||
# Get service details with account
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}?cloud_account_id={cloud_account_id}"
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
|
||||
# Verify service details structure
|
||||
assert "id" in data, "Service details should have 'id' field"
|
||||
assert data["id"] == service_id, "Service ID should match requested ID"
|
||||
assert "title" in data, "Service details should have 'title' field"
|
||||
assert "overview" in data, "Service details should have 'overview' field"
|
||||
assert "assets" in data, "Service details should have 'assets' field"
|
||||
assert "config" in data, "Service details should have 'config' field"
|
||||
assert "status" in data, "Config should have 'status' field"
|
||||
|
||||
|
||||
def test_get_service_details_invalid_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test getting details for a non-existent service."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
fake_service_id = "non-existent-service"
|
||||
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{fake_service_id}"
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.NOT_FOUND
|
||||
), f"Expected 404, got {response.status_code}"
|
||||
|
||||
|
||||
def test_list_services_unsupported_provider(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test listing services for an unsupported cloud provider."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "gcp" # Unsupported provider
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.BAD_REQUEST
|
||||
), f"Expected 400, got {response.status_code}"
|
||||
|
||||
|
||||
def test_update_service_config(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test updating service configuration for a connected account."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account and do check-in
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(
|
||||
signoz, admin_token, cloud_provider, account_id, cloud_account_id
|
||||
)
|
||||
|
||||
# Get list of services to pick a valid service ID
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
list_data = list_response.json().get("data", list_response.json())
|
||||
assert len(list_data["services"]) > 0, "Should have at least one service"
|
||||
service_id = list_data["services"][0]["id"]
|
||||
|
||||
# Update service configuration
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
|
||||
|
||||
config_payload = {
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"config": {
|
||||
"metrics": {"enabled": True},
|
||||
"logs": {"enabled": True},
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=config_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {response.status_code}"
|
||||
|
||||
response_data = response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
|
||||
# Verify response structure
|
||||
assert "id" in data, "Response should contain 'id' field"
|
||||
assert data["id"] == service_id, "Service ID should match"
|
||||
assert "config" in data, "Response should contain 'config' field"
|
||||
assert "metrics" in data["config"], "Config should contain 'metrics' field"
|
||||
assert "logs" in data["config"], "Config should contain 'logs' field"
|
||||
|
||||
|
||||
def test_update_service_config_without_account(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test updating service config without a connected account should fail."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
cloud_provider = "aws"
|
||||
|
||||
# Get a valid service ID
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
list_data = list_response.json().get("data", list_response.json())
|
||||
service_id = list_data["services"][0]["id"]
|
||||
|
||||
# Try to update config with non-existent account
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
|
||||
|
||||
fake_cloud_account_id = generate_unique_cloud_account_id()
|
||||
config_payload = {
|
||||
"cloud_account_id": fake_cloud_account_id,
|
||||
"config": {
|
||||
"metrics": {"enabled": True},
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=config_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
), f"Expected 500 for non-existent account, got {response.status_code}"
|
||||
|
||||
|
||||
def test_update_service_config_invalid_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test updating config for a non-existent service should fail."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account and do check-in
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(
|
||||
signoz, admin_token, cloud_provider, account_id, cloud_account_id
|
||||
)
|
||||
|
||||
# Try to update config for invalid service
|
||||
fake_service_id = "non-existent-service"
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{fake_service_id}/config"
|
||||
|
||||
config_payload = {
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"config": {
|
||||
"metrics": {"enabled": True},
|
||||
},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=config_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
response.status_code == HTTPStatus.NOT_FOUND
|
||||
), f"Expected 404 for invalid service, got {response.status_code}"
|
||||
|
||||
|
||||
def test_update_service_config_disable_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, list], None],
|
||||
get_token: Callable[[str, str], str],
|
||||
postgres: types.TestContainerSQL,
|
||||
) -> None:
|
||||
"""Test disabling a service by updating config with enabled=false."""
|
||||
cleanup_cloud_accounts(postgres)
|
||||
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
add_license(signoz, make_http_mocks, get_token)
|
||||
|
||||
# Mock the deployment info query
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# Create a test account and do check-in
|
||||
cloud_provider = "aws"
|
||||
account_data = create_test_account(signoz, admin_token, cloud_provider)
|
||||
account_id = account_data["account_id"]
|
||||
|
||||
cloud_account_id = generate_unique_cloud_account_id()
|
||||
simulate_agent_checkin(
|
||||
signoz, admin_token, cloud_provider, account_id, cloud_account_id
|
||||
)
|
||||
|
||||
# Get a valid service
|
||||
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
|
||||
list_response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(list_endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
list_data = list_response.json().get("data", list_response.json())
|
||||
service_id = list_data["services"][0]["id"]
|
||||
|
||||
# First enable the service
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
|
||||
|
||||
enable_payload = {
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"config": {
|
||||
"metrics": {"enabled": True},
|
||||
},
|
||||
}
|
||||
|
||||
enable_response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=enable_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert enable_response.status_code == HTTPStatus.OK, "Failed to enable service"
|
||||
|
||||
# Now disable the service
|
||||
disable_payload = {
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"config": {
|
||||
"metrics": {"enabled": False},
|
||||
"logs": {"enabled": False},
|
||||
},
|
||||
}
|
||||
|
||||
disable_response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=disable_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert (
|
||||
disable_response.status_code == HTTPStatus.OK
|
||||
), f"Expected 200, got {disable_response.status_code}"
|
||||
|
||||
response_data = disable_response.json()
|
||||
data = response_data.get("data", response_data)
|
||||
|
||||
# Verify service is disabled
|
||||
assert data["config"]["metrics"]["enabled"] is False, "Metrics should be disabled"
|
||||
assert data["config"]["logs"]["enabled"] is False, "Logs should be disabled"
|
||||
|
||||
Reference in New Issue
Block a user