mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-15 21:52:36 +00:00
Compare commits
4 Commits
fix/unit_c
...
fix-issues
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b117c85e6 | ||
|
|
3e1a48acf9 | ||
|
|
6d282dfe68 | ||
|
|
e3c04b378a |
@@ -176,25 +176,6 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
|
||||
|
||||
### Unsere Projektbetreuer
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
## Dokumentation
|
||||
|
||||
28
README.md
28
README.md
@@ -221,34 +221,6 @@ We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
|
||||
- [Ekansh Gupta](https://github.com/eKuG)
|
||||
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Vikrant Gupta](https://github.com/vikrantgupta25)
|
||||
- [Sagar Rajput](https://github.com/SagarRajput-7)
|
||||
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
||||
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
||||
- [Sahil Khan](https://github.com/sawhil)
|
||||
- [Aditya Singh](https://github.com/aks07)
|
||||
- [Abhi Kumar](https://github.com/ahrefabhi)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Vibhu Pandey](https://github.com/therealpandey)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
|
||||
@@ -187,25 +187,6 @@ Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metric
|
||||
|
||||
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
|
||||
|
||||
### 项目维护人员
|
||||
|
||||
#### 后端
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### 前端
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### 运维开发
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
## 文档
|
||||
|
||||
@@ -285,7 +285,6 @@ flagger:
|
||||
config:
|
||||
boolean:
|
||||
use_span_metrics: true
|
||||
interpolation_enabled: false
|
||||
kafka_span_eval: false
|
||||
string:
|
||||
float:
|
||||
|
||||
@@ -3,9 +3,8 @@ package flagger
|
||||
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
var (
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureInterpolationEnabled = featuretypes.MustNewName("interpolation_enabled")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
)
|
||||
|
||||
func MustNewRegistry() featuretypes.Registry {
|
||||
@@ -18,14 +17,6 @@ func MustNewRegistry() featuretypes.Registry {
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureInterpolationEnabled,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Stage: featuretypes.StageExperimental,
|
||||
Description: "Controls whether to enable interpolation",
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureKafkaSpanEval,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
|
||||
@@ -43,11 +43,11 @@ var (
|
||||
// FromUnit returns a converter for the given unit
|
||||
func FromUnit(u Unit) Converter {
|
||||
switch u {
|
||||
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min", "w", "wk":
|
||||
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min":
|
||||
return DurationConverter
|
||||
case "bytes", "decbytes", "bits", "bit", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy", "EBy", "ZBy", "YBy", "KiBy", "MiBy", "GiBy", "TiBy", "PiBy", "EiBy", "ZiBy", "YiBy", "kbit", "Mbit", "Gbit", "Tbit", "Pbit", "Ebit", "Zbit", "Ybit", "Kibit", "Mibit", "Gibit", "Tibit", "Pibit":
|
||||
case "bytes", "decbytes", "bits", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy":
|
||||
return DataConverter
|
||||
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "EBy/s", "ZBy/s", "YBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s", "Ebit/s", "Zbit/s", "Ybit/s", "KiBy/s", "MiBy/s", "GiBy/s", "TiBy/s", "PiBy/s", "EiBy/s", "ZiBy/s", "YiBy/s", "Kibit/s", "Mibit/s", "Gibit/s", "Tibit/s", "Pibit/s", "Eibit/s", "Zibit/s", "Yibit/s":
|
||||
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s":
|
||||
return DataRateConverter
|
||||
case "percent", "percentunit", "%":
|
||||
return PercentConverter
|
||||
|
||||
@@ -58,80 +58,36 @@ func (*dataConverter) Name() string {
|
||||
return "data"
|
||||
}
|
||||
|
||||
// Notation followed by UCUM:
|
||||
// https://ucum.org/ucum
|
||||
// kibi = Ki, mebi = Mi, gibi = Gi, tebi = Ti, pibi = Pi
|
||||
// kilo = k, mega = M, giga = G, tera = T, peta = P
|
||||
// exa = E, zetta = Z, yotta = Y
|
||||
// byte = By, bit = bit
|
||||
func FromDataUnit(u Unit) float64 {
|
||||
switch u {
|
||||
case "bytes", "By": // base 2
|
||||
return Byte
|
||||
case "decbytes": // base 10
|
||||
return Byte
|
||||
case "bits", "bit": // base 2
|
||||
case "bits": // base 2
|
||||
return Bit
|
||||
case "decbits": // base 10
|
||||
return Bit
|
||||
case "kbytes", "KiBy": // base 2
|
||||
case "kbytes", "kBy": // base 2
|
||||
return Kibibyte
|
||||
case "decKbytes", "deckbytes", "kBy": // base 10
|
||||
case "decKbytes", "deckbytes": // base 10
|
||||
return Kilobyte
|
||||
case "mbytes", "MiBy": // base 2
|
||||
case "mbytes", "MBy": // base 2
|
||||
return Mebibyte
|
||||
case "decMbytes", "decmbytes", "MBy": // base 10
|
||||
case "decMbytes", "decmbytes": // base 10
|
||||
return Megabyte
|
||||
case "gbytes", "GiBy": // base 2
|
||||
case "gbytes", "GBy": // base 2
|
||||
return Gibibyte
|
||||
case "decGbytes", "decgbytes", "GBy": // base 10
|
||||
case "decGbytes", "decgbytes": // base 10
|
||||
return Gigabyte
|
||||
case "tbytes", "TiBy": // base 2
|
||||
case "tbytes", "TBy": // base 2
|
||||
return Tebibyte
|
||||
case "decTbytes", "dectbytes", "TBy": // base 10
|
||||
case "decTbytes", "dectbytes": // base 10
|
||||
return Terabyte
|
||||
case "pbytes", "PiBy": // base 2
|
||||
case "pbytes", "PBy": // base 2
|
||||
return Pebibyte
|
||||
case "decPbytes", "decpbytes", "PBy": // base 10
|
||||
case "decPbytes", "decpbytes": // base 10
|
||||
return Petabyte
|
||||
case "EBy": // base 10
|
||||
return Exabyte
|
||||
case "ZBy": // base 10
|
||||
return Zettabyte
|
||||
case "YBy": // base 10
|
||||
return Yottabyte
|
||||
case "Kibit": // base 2
|
||||
return Kibibit
|
||||
case "Mibit": // base 2
|
||||
return Mebibit
|
||||
case "Gibit": // base 2
|
||||
return Gibibit
|
||||
case "Tibit": // base 2
|
||||
return Tebibit
|
||||
case "Pibit": // base 2
|
||||
return Pebibit
|
||||
case "EiBy": // base 2
|
||||
return Exbibyte
|
||||
case "ZiBy": // base 2
|
||||
return Zebibyte
|
||||
case "YiBy": // base 2
|
||||
return Yobibyte
|
||||
case "kbit": // base 10
|
||||
return Kilobit
|
||||
case "Mbit": // base 10
|
||||
return Megabit
|
||||
case "Gbit": // base 10
|
||||
return Gigabit
|
||||
case "Tbit": // base 10
|
||||
return Terabit
|
||||
case "Pbit": // base 10
|
||||
return Petabit
|
||||
case "Ebit": // base 10
|
||||
return Exabit
|
||||
case "Zbit": // base 10
|
||||
return Zettabit
|
||||
case "Ybit": // base 10
|
||||
return Yottabit
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -54,12 +54,6 @@ func (*dataRateConverter) Name() string {
|
||||
return "data_rate"
|
||||
}
|
||||
|
||||
// Notation followed by UCUM:
|
||||
// https://ucum.org/ucum
|
||||
// kibi = Ki, mebi = Mi, gibi = Gi, tebi = Ti, pibi = Pi
|
||||
// kilo = k, mega = M, giga = G, tera = T, peta = P
|
||||
// exa = E, zetta = Z, yotta = Y
|
||||
// byte = By, bit = bit
|
||||
func FromDataRateUnit(u Unit) float64 {
|
||||
// See https://github.com/SigNoz/signoz/blob/5a81f5f90b34845f5b4b3bdd46acf29d04bf3987/frontend/src/container/NewWidget/RightContainer/dataFormatCategories.ts#L62-L85
|
||||
switch u {
|
||||
@@ -71,70 +65,46 @@ func FromDataRateUnit(u Unit) float64 {
|
||||
return BitPerSecond
|
||||
case "bps", "bit/s": // bits/sec(SI)
|
||||
return BitPerSecond
|
||||
case "KiBs", "KiBy/s": // kibibytes/sec
|
||||
case "KiBs": // kibibytes/sec
|
||||
return KibibytePerSecond
|
||||
case "Kibits", "Kibit/s": // kibibits/sec
|
||||
case "Kibits": // kibibits/sec
|
||||
return KibibitPerSecond
|
||||
case "KBs", "kBy/s": // kilobytes/sec
|
||||
return KilobytePerSecond
|
||||
case "Kbits", "kbit/s": // kilobits/sec
|
||||
return KilobitPerSecond
|
||||
case "MiBs", "MiBy/s": // mebibytes/sec
|
||||
case "MiBs": // mebibytes/sec
|
||||
return MebibytePerSecond
|
||||
case "Mibits", "Mibit/s": // mebibits/sec
|
||||
case "Mibits": // mebibits/sec
|
||||
return MebibitPerSecond
|
||||
case "MBs", "MBy/s": // megabytes/sec
|
||||
return MegabytePerSecond
|
||||
case "Mbits", "Mbit/s": // megabits/sec
|
||||
return MegabitPerSecond
|
||||
case "GiBs", "GiBy/s": // gibibytes/sec
|
||||
case "GiBs": // gibibytes/sec
|
||||
return GibibytePerSecond
|
||||
case "Gibits", "Gibit/s": // gibibits/sec
|
||||
case "Gibits": // gibibits/sec
|
||||
return GibibitPerSecond
|
||||
case "GBs", "GBy/s": // gigabytes/sec
|
||||
return GigabytePerSecond
|
||||
case "Gbits", "Gbit/s": // gigabits/sec
|
||||
return GigabitPerSecond
|
||||
case "TiBs", "TiBy/s": // tebibytes/sec
|
||||
case "TiBs": // tebibytes/sec
|
||||
return TebibytePerSecond
|
||||
case "Tibits", "Tibit/s": // tebibits/sec
|
||||
case "Tibits": // tebibits/sec
|
||||
return TebibitPerSecond
|
||||
case "TBs", "TBy/s": // terabytes/sec
|
||||
return TerabytePerSecond
|
||||
case "Tbits", "Tbit/s": // terabits/sec
|
||||
return TerabitPerSecond
|
||||
case "PiBs", "PiBy/s": // pebibytes/sec
|
||||
case "PiBs": // pebibytes/sec
|
||||
return PebibytePerSecond
|
||||
case "Pibits", "Pibit/s": // pebibits/sec
|
||||
case "Pibits": // pebibits/sec
|
||||
return PebibitPerSecond
|
||||
case "PBs", "PBy/s": // petabytes/sec
|
||||
return PetabytePerSecond
|
||||
case "Pbits", "Pbit/s": // petabits/sec
|
||||
return PetabitPerSecond
|
||||
case "EBy/s": // exabytes/sec
|
||||
return ExabytePerSecond
|
||||
case "Ebit/s": // exabits/sec
|
||||
return ExabitPerSecond
|
||||
case "EiBy/s": // exbibytes/sec
|
||||
return ExbibytePerSecond
|
||||
case "Eibit/s": // exbibits/sec
|
||||
return ExbibitPerSecond
|
||||
case "ZBy/s": // zettabytes/sec
|
||||
return ZettabytePerSecond
|
||||
case "Zbit/s": // zettabits/sec
|
||||
return ZettabitPerSecond
|
||||
case "ZiBy/s": // zebibytes/sec
|
||||
return ZebibytePerSecond
|
||||
case "Zibit/s": // zebibits/sec
|
||||
return ZebibitPerSecond
|
||||
case "YBy/s": // yottabytes/sec
|
||||
return YottabytePerSecond
|
||||
case "Ybit/s": // yottabits/sec
|
||||
return YottabitPerSecond
|
||||
case "YiBy/s": // yobibytes/sec
|
||||
return YobibytePerSecond
|
||||
case "Yibit/s": // yobibits/sec
|
||||
return YobibitPerSecond
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -75,83 +75,3 @@ func TestDataRate(t *testing.T) {
|
||||
// 1024 * 1024 * 1024 bytes = 1 gbytes
|
||||
assert.Equal(t, Value{F: 1, U: "GiBs"}, dataRateConverter.Convert(Value{F: 1024 * 1024 * 1024, U: "binBps"}, "GiBs"))
|
||||
}
|
||||
|
||||
func TestDataRateConversionUCUMUnit(t *testing.T) {
|
||||
dataRateConverter := NewDataRateConverter()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input Value
|
||||
toUnit Unit
|
||||
expected Value
|
||||
}{
|
||||
// Binary byte scaling
|
||||
{name: "Binary byte scaling: 1024 By/s = 1 KiBy/s", input: Value{F: 1024, U: "By/s"}, toUnit: "KiBy/s", expected: Value{F: 1, U: "KiBy/s"}},
|
||||
{name: "Kibibyte to bytes: 1 KiBy/s = 1024 By/s", input: Value{F: 1, U: "KiBy/s"}, toUnit: "By/s", expected: Value{F: 1024, U: "By/s"}},
|
||||
{name: "Binary byte scaling: 1024 KiBy/s = 1 MiBy/s", input: Value{F: 1024, U: "KiBy/s"}, toUnit: "MiBy/s", expected: Value{F: 1, U: "MiBy/s"}},
|
||||
{name: "Gibibyte to bytes: 1 GiBy/s = 1073741824 By/s", input: Value{F: 1, U: "GiBy/s"}, toUnit: "By/s", expected: Value{F: 1024 * 1024 * 1024, U: "By/s"}},
|
||||
{name: "Binary byte scaling: 1024 MiBy/s = 1 GiBy/s", input: Value{F: 1024, U: "MiBy/s"}, toUnit: "GiBy/s", expected: Value{F: 1, U: "GiBy/s"}},
|
||||
{name: "Gibibyte to mebibyte: 1 GiBy/s = 1024 MiBy/s", input: Value{F: 1, U: "GiBy/s"}, toUnit: "MiBy/s", expected: Value{F: 1024, U: "MiBy/s"}},
|
||||
{name: "Binary byte scaling: 1024 GiBy/s = 1 TiBy/s", input: Value{F: 1024, U: "GiBy/s"}, toUnit: "TiBy/s", expected: Value{F: 1, U: "TiBy/s"}},
|
||||
{name: "Tebibyte to bytes: 1 TiBy/s = 1099511627776 By/s", input: Value{F: 1, U: "TiBy/s"}, toUnit: "By/s", expected: Value{F: 1024 * 1024 * 1024 * 1024, U: "By/s"}},
|
||||
{name: "Binary byte scaling: 1024 TiBy/s = 1 PiBy/s", input: Value{F: 1024, U: "TiBy/s"}, toUnit: "PiBy/s", expected: Value{F: 1, U: "PiBy/s"}},
|
||||
{name: "Pebibyte to tebibyte: 1 PiBy/s = 1024 TiBy/s", input: Value{F: 1, U: "PiBy/s"}, toUnit: "TiBy/s", expected: Value{F: 1024, U: "TiBy/s"}},
|
||||
// Binary bit scaling
|
||||
{name: "Binary bit scaling: 1024 bit/s = 1 Kibit/s", input: Value{F: 1024, U: "bit/s"}, toUnit: "Kibit/s", expected: Value{F: 1, U: "Kibit/s"}},
|
||||
{name: "Kibibit to bits: 1 Kibit/s = 1024 bit/s", input: Value{F: 1, U: "Kibit/s"}, toUnit: "bit/s", expected: Value{F: 1024, U: "bit/s"}},
|
||||
{name: "Binary bit scaling: 1024 Kibit/s = 1 Mibit/s", input: Value{F: 1024, U: "Kibit/s"}, toUnit: "Mibit/s", expected: Value{F: 1, U: "Mibit/s"}},
|
||||
{name: "Gibibit to bits: 1 Gibit/s = 1073741824 bit/s", input: Value{F: 1, U: "Gibit/s"}, toUnit: "bit/s", expected: Value{F: 1024 * 1024 * 1024, U: "bit/s"}},
|
||||
{name: "Binary bit scaling: 1024 Mibit/s = 1 Gibit/s", input: Value{F: 1024, U: "Mibit/s"}, toUnit: "Gibit/s", expected: Value{F: 1, U: "Gibit/s"}},
|
||||
{name: "Gibibit to mebibit: 1 Gibit/s = 1024 Mibit/s", input: Value{F: 1, U: "Gibit/s"}, toUnit: "Mibit/s", expected: Value{F: 1024, U: "Mibit/s"}},
|
||||
{name: "Binary bit scaling: 1024 Gibit/s = 1 Tibit/s", input: Value{F: 1024, U: "Gibit/s"}, toUnit: "Tibit/s", expected: Value{F: 1, U: "Tibit/s"}},
|
||||
{name: "Tebibit to gibibit: 1 Tibit/s = 1024 Gibit/s", input: Value{F: 1, U: "Tibit/s"}, toUnit: "Gibit/s", expected: Value{F: 1024, U: "Gibit/s"}},
|
||||
{name: "Binary bit scaling: 1024 Tibit/s = 1 Pibit/s", input: Value{F: 1024, U: "Tibit/s"}, toUnit: "Pibit/s", expected: Value{F: 1, U: "Pibit/s"}},
|
||||
{name: "Pebibit to tebibit: 1 Pibit/s = 1024 Tibit/s", input: Value{F: 1, U: "Pibit/s"}, toUnit: "Tibit/s", expected: Value{F: 1024, U: "Tibit/s"}},
|
||||
// Bytes to bits
|
||||
{name: "Bytes to bits: 1 KiBy/s = 8 Kibit/s", input: Value{F: 1, U: "KiBy/s"}, toUnit: "Kibit/s", expected: Value{F: 8, U: "Kibit/s"}},
|
||||
{name: "Bytes to bits: 1 MiBy/s = 8 Mibit/s", input: Value{F: 1, U: "MiBy/s"}, toUnit: "Mibit/s", expected: Value{F: 8, U: "Mibit/s"}},
|
||||
{name: "Bytes to bits: 1 GiBy/s = 8 Gibit/s", input: Value{F: 1, U: "GiBy/s"}, toUnit: "Gibit/s", expected: Value{F: 8, U: "Gibit/s"}},
|
||||
// Unit alias
|
||||
{name: "Unit alias: 1 KiBs = 1 KiBy/s", input: Value{F: 1, U: "KiBs"}, toUnit: "KiBy/s", expected: Value{F: 1, U: "KiBy/s"}},
|
||||
{name: "Unit alias: 1 Kibits = 1 Kibit/s", input: Value{F: 1, U: "Kibits"}, toUnit: "Kibit/s", expected: Value{F: 1, U: "Kibit/s"}},
|
||||
// SI byte scaling (Exa, Zetta, Yotta)
|
||||
{name: "SI byte scaling: 1000 PBy/s = 1 EBy/s", input: Value{F: 1000, U: "PBy/s"}, toUnit: "EBy/s", expected: Value{F: 1, U: "EBy/s"}},
|
||||
{name: "Exabyte to bytes: 1 EBy/s = 1e18 By/s", input: Value{F: 1, U: "EBy/s"}, toUnit: "By/s", expected: Value{F: 1e18, U: "By/s"}},
|
||||
{name: "SI byte scaling: 1000 EBy/s = 1 ZBy/s", input: Value{F: 1000, U: "EBy/s"}, toUnit: "ZBy/s", expected: Value{F: 1, U: "ZBy/s"}},
|
||||
{name: "Zettabyte to petabytes: 1 ZBy/s = 1000000 PBy/s", input: Value{F: 1, U: "ZBy/s"}, toUnit: "PBy/s", expected: Value{F: 1e6, U: "PBy/s"}},
|
||||
{name: "SI byte scaling: 1000 ZBy/s = 1 YBy/s", input: Value{F: 1000, U: "ZBy/s"}, toUnit: "YBy/s", expected: Value{F: 1, U: "YBy/s"}},
|
||||
{name: "Yottabyte to zettabyte: 1 YBy/s = 1000 ZBy/s", input: Value{F: 1, U: "YBy/s"}, toUnit: "ZBy/s", expected: Value{F: 1000, U: "ZBy/s"}},
|
||||
// Binary byte scaling (Exbi, Zebi, Yobi)
|
||||
{name: "Binary byte scaling: 1024 PiBy/s = 1 EiBy/s", input: Value{F: 1024, U: "PiBy/s"}, toUnit: "EiBy/s", expected: Value{F: 1, U: "EiBy/s"}},
|
||||
{name: "Exbibyte to tebibytes: 1 EiBy/s = 1048576 TiBy/s", input: Value{F: 1, U: "EiBy/s"}, toUnit: "TiBy/s", expected: Value{F: 1024 * 1024, U: "TiBy/s"}},
|
||||
{name: "Binary byte scaling: 1024 EiBy/s = 1 ZiBy/s", input: Value{F: 1024, U: "EiBy/s"}, toUnit: "ZiBy/s", expected: Value{F: 1, U: "ZiBy/s"}},
|
||||
{name: "Zebibyte to exbibyte: 1 ZiBy/s = 1024 EiBy/s", input: Value{F: 1, U: "ZiBy/s"}, toUnit: "EiBy/s", expected: Value{F: 1024, U: "EiBy/s"}},
|
||||
{name: "Binary byte scaling: 1024 ZiBy/s = 1 YiBy/s", input: Value{F: 1024, U: "ZiBy/s"}, toUnit: "YiBy/s", expected: Value{F: 1, U: "YiBy/s"}},
|
||||
{name: "Yobibyte to zebibyte: 1 YiBy/s = 1024 ZiBy/s", input: Value{F: 1, U: "YiBy/s"}, toUnit: "ZiBy/s", expected: Value{F: 1024, U: "ZiBy/s"}},
|
||||
// SI bit scaling (Exa, Zetta, Yotta)
|
||||
{name: "SI bit scaling: 1000 Pbit/s = 1 Ebit/s", input: Value{F: 1000, U: "Pbit/s"}, toUnit: "Ebit/s", expected: Value{F: 1, U: "Ebit/s"}},
|
||||
{name: "Exabit to gigabits: 1 Ebit/s = 1e9 Gbit/s", input: Value{F: 1, U: "Ebit/s"}, toUnit: "Gbit/s", expected: Value{F: 1e9, U: "Gbit/s"}},
|
||||
{name: "SI bit scaling: 1000 Ebit/s = 1 Zbit/s", input: Value{F: 1000, U: "Ebit/s"}, toUnit: "Zbit/s", expected: Value{F: 1, U: "Zbit/s"}},
|
||||
{name: "Zettabit to exabit: 1 Zbit/s = 1000 Ebit/s", input: Value{F: 1, U: "Zbit/s"}, toUnit: "Ebit/s", expected: Value{F: 1000, U: "Ebit/s"}},
|
||||
{name: "SI bit scaling: 1000 Zbit/s = 1 Ybit/s", input: Value{F: 1000, U: "Zbit/s"}, toUnit: "Ybit/s", expected: Value{F: 1, U: "Ybit/s"}},
|
||||
{name: "Yottabit to zettabit: 1 Ybit/s = 1000 Zbit/s", input: Value{F: 1, U: "Ybit/s"}, toUnit: "Zbit/s", expected: Value{F: 1000, U: "Zbit/s"}},
|
||||
// Binary bit scaling (Exbi, Zebi, Yobi)
|
||||
{name: "Binary bit scaling: 1024 Pibit/s = 1 Eibit/s", input: Value{F: 1024, U: "Pibit/s"}, toUnit: "Eibit/s", expected: Value{F: 1, U: "Eibit/s"}},
|
||||
{name: "Exbibit to pebibit: 1 Eibit/s = 1024 Pibit/s", input: Value{F: 1, U: "Eibit/s"}, toUnit: "Pibit/s", expected: Value{F: 1024, U: "Pibit/s"}},
|
||||
{name: "Binary bit scaling: 1024 Eibit/s = 1 Zibit/s", input: Value{F: 1024, U: "Eibit/s"}, toUnit: "Zibit/s", expected: Value{F: 1, U: "Zibit/s"}},
|
||||
{name: "Zebibit to exbibit: 1 Zibit/s = 1024 Eibit/s", input: Value{F: 1, U: "Zibit/s"}, toUnit: "Eibit/s", expected: Value{F: 1024, U: "Eibit/s"}},
|
||||
{name: "Binary bit scaling: 1024 Zibit/s = 1 Yibit/s", input: Value{F: 1024, U: "Zibit/s"}, toUnit: "Yibit/s", expected: Value{F: 1, U: "Yibit/s"}},
|
||||
{name: "Yobibit to zebibit: 1 Yibit/s = 1024 Zibit/s", input: Value{F: 1, U: "Yibit/s"}, toUnit: "Zibit/s", expected: Value{F: 1024, U: "Zibit/s"}},
|
||||
// Bytes to bits (Exbi, Zebi, Yobi)
|
||||
{name: "Bytes to bits: 1 EiBy/s = 8 Eibit/s", input: Value{F: 1, U: "EiBy/s"}, toUnit: "Eibit/s", expected: Value{F: 8, U: "Eibit/s"}},
|
||||
{name: "Bytes to bits: 1 ZiBy/s = 8 Zibit/s", input: Value{F: 1, U: "ZiBy/s"}, toUnit: "Zibit/s", expected: Value{F: 8, U: "Zibit/s"}},
|
||||
{name: "Bytes to bits: 1 YiBy/s = 8 Yibit/s", input: Value{F: 1, U: "YiBy/s"}, toUnit: "Yibit/s", expected: Value{F: 8, U: "Yibit/s"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := dataRateConverter.Convert(tt.input, tt.toUnit)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ func TestData(t *testing.T) {
|
||||
assert.Equal(t, Value{F: 1, U: "By"}, dataConverter.Convert(Value{F: 8, U: "bits"}, "By"))
|
||||
// 1024 bytes = 1 kbytes
|
||||
assert.Equal(t, Value{F: 1, U: "kbytes"}, dataConverter.Convert(Value{F: 1024, U: "bytes"}, "kbytes"))
|
||||
assert.Equal(t, Value{F: 1, U: "kBy"}, dataConverter.Convert(Value{F: 1000, U: "bytes"}, "kBy"))
|
||||
assert.Equal(t, Value{F: 1, U: "kBy"}, dataConverter.Convert(Value{F: 1024, U: "bytes"}, "kBy"))
|
||||
// 1 byte = 8 bits
|
||||
assert.Equal(t, Value{F: 8, U: "bits"}, dataConverter.Convert(Value{F: 1, U: "bytes"}, "bits"))
|
||||
// 1 mbytes = 1024 kbytes
|
||||
@@ -22,7 +22,7 @@ func TestData(t *testing.T) {
|
||||
assert.Equal(t, Value{F: 1024, U: "bytes"}, dataConverter.Convert(Value{F: 1, U: "kbytes"}, "bytes"))
|
||||
// 1024 kbytes = 1 mbytes
|
||||
assert.Equal(t, Value{F: 1, U: "mbytes"}, dataConverter.Convert(Value{F: 1024, U: "kbytes"}, "mbytes"))
|
||||
assert.Equal(t, Value{F: 1, U: "MBy"}, dataConverter.Convert(Value{F: 1000, U: "kBy"}, "MBy"))
|
||||
assert.Equal(t, Value{F: 1, U: "MBy"}, dataConverter.Convert(Value{F: 1024, U: "kbytes"}, "MBy"))
|
||||
// 1 mbytes = 1024 * 1024 bytes
|
||||
assert.Equal(t, Value{F: 1024 * 1024, U: "bytes"}, dataConverter.Convert(Value{F: 1, U: "mbytes"}, "bytes"))
|
||||
// 1024 mbytes = 1 gbytes
|
||||
@@ -45,90 +45,10 @@ func TestData(t *testing.T) {
|
||||
assert.Equal(t, Value{F: 1024 * 1024 * 1024 * 1024, U: "bytes"}, dataConverter.Convert(Value{F: 1, U: "tbytes"}, "bytes"))
|
||||
// 1024 tbytes = 1 pbytes
|
||||
assert.Equal(t, Value{F: 1, U: "pbytes"}, dataConverter.Convert(Value{F: 1024, U: "tbytes"}, "pbytes"))
|
||||
// 1024 tbytes = 1 PiBy
|
||||
assert.Equal(t, Value{F: 1, U: "PiBy"}, dataConverter.Convert(Value{F: 1024, U: "tbytes"}, "PiBy"))
|
||||
// 1024 tbytes = 1 pbytes
|
||||
assert.Equal(t, Value{F: 1, U: "PBy"}, dataConverter.Convert(Value{F: 1024, U: "tbytes"}, "PBy"))
|
||||
// 1 pbytes = 1024 tbytes
|
||||
assert.Equal(t, Value{F: 1024, U: "tbytes"}, dataConverter.Convert(Value{F: 1, U: "pbytes"}, "tbytes"))
|
||||
// 1024 TiBy = 1 pbytes
|
||||
assert.Equal(t, Value{F: 1024, U: "TiBy"}, dataConverter.Convert(Value{F: 1, U: "pbytes"}, "TiBy"))
|
||||
}
|
||||
|
||||
func TestDataConversionUCUMUnit(t *testing.T) {
|
||||
dataConverter := NewDataConverter()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input Value
|
||||
toUnit Unit
|
||||
expected Value
|
||||
}{
|
||||
// Bits to bytes
|
||||
{name: "Bits to bytes: 8 bit = 1 By", input: Value{F: 8, U: "bit"}, toUnit: "By", expected: Value{F: 1, U: "By"}},
|
||||
{name: "Byte to bits: 1 By = 8 bit", input: Value{F: 1, U: "By"}, toUnit: "bit", expected: Value{F: 8, U: "bit"}},
|
||||
// Binary byte scaling
|
||||
{name: "Binary byte scaling: 1024 By = 1 KiBy", input: Value{F: 1024, U: "By"}, toUnit: "KiBy", expected: Value{F: 1, U: "KiBy"}},
|
||||
{name: "Kibibyte to bytes: 1 KiBy = 1024 By", input: Value{F: 1, U: "KiBy"}, toUnit: "By", expected: Value{F: 1024, U: "By"}},
|
||||
{name: "Binary byte scaling: 1024 KiBy = 1 MiBy", input: Value{F: 1024, U: "KiBy"}, toUnit: "MiBy", expected: Value{F: 1, U: "MiBy"}},
|
||||
{name: "Binary byte scaling: 1024 MiBy = 1 GiBy", input: Value{F: 1024, U: "MiBy"}, toUnit: "GiBy", expected: Value{F: 1, U: "GiBy"}},
|
||||
{name: "Gibibyte to mebibyte: 1 GiBy = 1024 MiBy", input: Value{F: 1, U: "GiBy"}, toUnit: "MiBy", expected: Value{F: 1024, U: "MiBy"}},
|
||||
{name: "Binary byte scaling: 1024 GiBy = 1 TiBy", input: Value{F: 1024, U: "GiBy"}, toUnit: "TiBy", expected: Value{F: 1, U: "TiBy"}},
|
||||
{name: "Binary byte scaling: 1024 TiBy = 1 PiBy", input: Value{F: 1024, U: "TiBy"}, toUnit: "PiBy", expected: Value{F: 1, U: "PiBy"}},
|
||||
{name: "Pebibyte to tebibyte: 1 PiBy = 1024 TiBy", input: Value{F: 1, U: "PiBy"}, toUnit: "TiBy", expected: Value{F: 1024, U: "TiBy"}},
|
||||
{name: "Gibibyte to bytes: 1 GiBy = 1073741824 By", input: Value{F: 1, U: "GiBy"}, toUnit: "By", expected: Value{F: 1024 * 1024 * 1024, U: "By"}},
|
||||
{name: "Tebibyte to bytes: 1 TiBy = 1099511627776 By", input: Value{F: 1, U: "TiBy"}, toUnit: "By", expected: Value{F: 1024 * 1024 * 1024 * 1024, U: "By"}},
|
||||
// SI bit scaling
|
||||
{name: "SI bit scaling: 1000 bit = 1 kbit", input: Value{F: 1000, U: "bit"}, toUnit: "kbit", expected: Value{F: 1, U: "kbit"}},
|
||||
{name: "Kilobit to bits: 1 kbit = 1000 bit", input: Value{F: 1, U: "kbit"}, toUnit: "bit", expected: Value{F: 1000, U: "bit"}},
|
||||
{name: "SI bit scaling: 1000 kbit = 1 Mbit", input: Value{F: 1000, U: "kbit"}, toUnit: "Mbit", expected: Value{F: 1, U: "Mbit"}},
|
||||
{name: "Gigabit to bits: 1 Gbit = 1000000000 bit", input: Value{F: 1, U: "Gbit"}, toUnit: "bit", expected: Value{F: 1000 * 1000 * 1000, U: "bit"}},
|
||||
{name: "SI bit scaling: 1000 Mbit = 1 Gbit", input: Value{F: 1000, U: "Mbit"}, toUnit: "Gbit", expected: Value{F: 1, U: "Gbit"}},
|
||||
{name: "Gigabit to megabit: 1 Gbit = 1000 Mbit", input: Value{F: 1, U: "Gbit"}, toUnit: "Mbit", expected: Value{F: 1000, U: "Mbit"}},
|
||||
{name: "SI bit scaling: 1000 Gbit = 1 Tbit", input: Value{F: 1000, U: "Gbit"}, toUnit: "Tbit", expected: Value{F: 1, U: "Tbit"}},
|
||||
{name: "Terabit to gigabit: 1 Tbit = 1000 Gbit", input: Value{F: 1, U: "Tbit"}, toUnit: "Gbit", expected: Value{F: 1000, U: "Gbit"}},
|
||||
{name: "SI bit scaling: 1000 Tbit = 1 Pbit", input: Value{F: 1000, U: "Tbit"}, toUnit: "Pbit", expected: Value{F: 1, U: "Pbit"}},
|
||||
{name: "Petabit to terabit: 1 Pbit = 1000 Tbit", input: Value{F: 1, U: "Pbit"}, toUnit: "Tbit", expected: Value{F: 1000, U: "Tbit"}},
|
||||
// Binary bit scaling
|
||||
{name: "Binary bit scaling: 1024 bit = 1 Kibit", input: Value{F: 1024, U: "bit"}, toUnit: "Kibit", expected: Value{F: 1, U: "Kibit"}},
|
||||
{name: "Kibibit to bits: 1 Kibit = 1024 bit", input: Value{F: 1, U: "Kibit"}, toUnit: "bit", expected: Value{F: 1024, U: "bit"}},
|
||||
{name: "Binary bit scaling: 1024 Kibit = 1 Mibit", input: Value{F: 1024, U: "Kibit"}, toUnit: "Mibit", expected: Value{F: 1, U: "Mibit"}},
|
||||
{name: "Mebibit to kibibit: 1 Mibit = 1024 Kibit", input: Value{F: 1, U: "Mibit"}, toUnit: "Kibit", expected: Value{F: 1024, U: "Kibit"}},
|
||||
{name: "Binary bit scaling: 1024 Mibit = 1 Gibit", input: Value{F: 1024, U: "Mibit"}, toUnit: "Gibit", expected: Value{F: 1, U: "Gibit"}},
|
||||
{name: "Gibibit to mebibit: 1 Gibit = 1024 Mibit", input: Value{F: 1, U: "Gibit"}, toUnit: "Mibit", expected: Value{F: 1024, U: "Mibit"}},
|
||||
{name: "Binary bit scaling: 1024 Gibit = 1 Tibit", input: Value{F: 1024, U: "Gibit"}, toUnit: "Tibit", expected: Value{F: 1, U: "Tibit"}},
|
||||
{name: "Tebibit to gibibit: 1 Tibit = 1024 Gibit", input: Value{F: 1, U: "Tibit"}, toUnit: "Gibit", expected: Value{F: 1024, U: "Gibit"}},
|
||||
{name: "Binary bit scaling: 1024 Tibit = 1 Pibit", input: Value{F: 1024, U: "Tibit"}, toUnit: "Pibit", expected: Value{F: 1, U: "Pibit"}},
|
||||
{name: "Pebibit to tebibit: 1 Pibit = 1024 Tibit", input: Value{F: 1, U: "Pibit"}, toUnit: "Tibit", expected: Value{F: 1024, U: "Tibit"}},
|
||||
// Bytes to bits
|
||||
{name: "Bytes to bits: 1 KiBy = 8 Kibit", input: Value{F: 1, U: "KiBy"}, toUnit: "Kibit", expected: Value{F: 8, U: "Kibit"}},
|
||||
{name: "Bytes to bits: 1 MiBy = 8 Mibit", input: Value{F: 1, U: "MiBy"}, toUnit: "Mibit", expected: Value{F: 8, U: "Mibit"}},
|
||||
{name: "Bytes to bits: 1 GiBy = 8 Gibit", input: Value{F: 1, U: "GiBy"}, toUnit: "Gibit", expected: Value{F: 8, U: "Gibit"}},
|
||||
// SI byte scaling (Exa, Zetta, Yotta)
|
||||
{name: "SI byte scaling: 1000 PBy = 1 EBy", input: Value{F: 1000, U: "PBy"}, toUnit: "EBy", expected: Value{F: 1, U: "EBy"}},
|
||||
{name: "Exabyte to bytes: 1 EBy = 1e18 By", input: Value{F: 1, U: "EBy"}, toUnit: "By", expected: Value{F: 1e18, U: "By"}},
|
||||
{name: "SI byte scaling: 1000 EBy = 1 ZBy", input: Value{F: 1000, U: "EBy"}, toUnit: "ZBy", expected: Value{F: 1, U: "ZBy"}},
|
||||
{name: "Zettabyte to petabytes: 1 ZBy = 1000000 PBy", input: Value{F: 1, U: "ZBy"}, toUnit: "PBy", expected: Value{F: 1e6, U: "PBy"}},
|
||||
{name: "SI byte scaling: 1000 ZBy = 1 YBy", input: Value{F: 1000, U: "ZBy"}, toUnit: "YBy", expected: Value{F: 1, U: "YBy"}},
|
||||
{name: "Yottabyte to zettabyte: 1 YBy = 1000 ZBy", input: Value{F: 1, U: "YBy"}, toUnit: "ZBy", expected: Value{F: 1000, U: "ZBy"}},
|
||||
// Binary byte scaling (Exbi, Zebi, Yobi)
|
||||
{name: "Binary byte scaling: 1024 PiBy = 1 EiBy", input: Value{F: 1024, U: "PiBy"}, toUnit: "EiBy", expected: Value{F: 1, U: "EiBy"}},
|
||||
{name: "Exbibyte to tebibytes: 1 EiBy = 1048576 TiBy", input: Value{F: 1, U: "EiBy"}, toUnit: "TiBy", expected: Value{F: 1024 * 1024, U: "TiBy"}},
|
||||
{name: "Binary byte scaling: 1024 EiBy = 1 ZiBy", input: Value{F: 1024, U: "EiBy"}, toUnit: "ZiBy", expected: Value{F: 1, U: "ZiBy"}},
|
||||
{name: "Zebibyte to exbibyte: 1 ZiBy = 1024 EiBy", input: Value{F: 1, U: "ZiBy"}, toUnit: "EiBy", expected: Value{F: 1024, U: "EiBy"}},
|
||||
{name: "Binary byte scaling: 1024 ZiBy = 1 YiBy", input: Value{F: 1024, U: "ZiBy"}, toUnit: "YiBy", expected: Value{F: 1, U: "YiBy"}},
|
||||
{name: "Yobibyte to zebibyte: 1 YiBy = 1024 ZiBy", input: Value{F: 1, U: "YiBy"}, toUnit: "ZiBy", expected: Value{F: 1024, U: "ZiBy"}},
|
||||
// SI bit scaling (Exa, Zetta, Yotta)
|
||||
{name: "SI bit scaling: 1000 Pbit = 1 Ebit", input: Value{F: 1000, U: "Pbit"}, toUnit: "Ebit", expected: Value{F: 1, U: "Ebit"}},
|
||||
{name: "Exabit to gigabits: 1 Ebit = 1e9 Gbit", input: Value{F: 1, U: "Ebit"}, toUnit: "Gbit", expected: Value{F: 1e9, U: "Gbit"}},
|
||||
{name: "SI bit scaling: 1000 Ebit = 1 Zbit", input: Value{F: 1000, U: "Ebit"}, toUnit: "Zbit", expected: Value{F: 1, U: "Zbit"}},
|
||||
{name: "Zettabit to exabit: 1 Zbit = 1000 Ebit", input: Value{F: 1, U: "Zbit"}, toUnit: "Ebit", expected: Value{F: 1000, U: "Ebit"}},
|
||||
{name: "SI bit scaling: 1000 Zbit = 1 Ybit", input: Value{F: 1000, U: "Zbit"}, toUnit: "Ybit", expected: Value{F: 1, U: "Ybit"}},
|
||||
{name: "Yottabit to zettabit: 1 Ybit = 1000 Zbit", input: Value{F: 1, U: "Ybit"}, toUnit: "Zbit", expected: Value{F: 1000, U: "Zbit"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := dataConverter.Convert(tt.input, tt.toUnit)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
// 1024 pbytes = 1 tbytes
|
||||
assert.Equal(t, Value{F: 1024, U: "TBy"}, dataConverter.Convert(Value{F: 1, U: "pbytes"}, "TBy"))
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func FromTimeUnit(u Unit) Duration {
|
||||
return Hour
|
||||
case "d":
|
||||
return Day
|
||||
case "w", "wk":
|
||||
case "w":
|
||||
return Week
|
||||
default:
|
||||
return Second
|
||||
|
||||
@@ -54,13 +54,4 @@ func TestDurationConvert(t *testing.T) {
|
||||
assert.Equal(t, Value{F: 1, U: "ms"}, timeConverter.Convert(Value{F: 1000, U: "us"}, "ms"))
|
||||
// 1000000000 ns = 1 s
|
||||
assert.Equal(t, Value{F: 1, U: "s"}, timeConverter.Convert(Value{F: 1000000000, U: "ns"}, "s"))
|
||||
|
||||
// 7 d = 1 wk
|
||||
assert.Equal(t, Value{F: 1, U: "wk"}, timeConverter.Convert(Value{F: 7, U: "d"}, "wk"))
|
||||
// 1 wk = 7 d
|
||||
assert.Equal(t, Value{F: 7, U: "d"}, timeConverter.Convert(Value{F: 1, U: "wk"}, "d"))
|
||||
// 1 wk = 168 h
|
||||
assert.Equal(t, Value{F: 168, U: "h"}, timeConverter.Convert(Value{F: 1, U: "wk"}, "h"))
|
||||
// 604800 s = 1 wk
|
||||
assert.Equal(t, Value{F: 1, U: "wk"}, timeConverter.Convert(Value{F: 604800, U: "s"}, "wk"))
|
||||
}
|
||||
|
||||
@@ -24,50 +24,30 @@ func (f *dataFormatter) Format(value float64, unit string) string {
|
||||
return humanize.IBytes(uint64(value))
|
||||
case "decbytes":
|
||||
return humanize.Bytes(uint64(value))
|
||||
case "kbytes", "KiBy":
|
||||
case "bits":
|
||||
return humanize.IBytes(uint64(value * converter.Bit))
|
||||
case "decbits":
|
||||
return humanize.Bytes(uint64(value * converter.Bit))
|
||||
case "kbytes", "kBy":
|
||||
return humanize.IBytes(uint64(value * converter.Kibibit))
|
||||
case "Kibit":
|
||||
return humanize.IBytes(uint64(value * converter.Kibibit / 8))
|
||||
case "decKbytes", "deckbytes", "kBy":
|
||||
return humanize.Bytes(uint64(value * converter.Kilobit))
|
||||
case "kbit":
|
||||
return humanize.Bytes(uint64(value * converter.Kilobit / 8))
|
||||
case "mbytes", "MiBy":
|
||||
case "decKbytes", "deckbytes":
|
||||
return humanize.IBytes(uint64(value * converter.Kilobit))
|
||||
case "mbytes", "MBy":
|
||||
return humanize.IBytes(uint64(value * converter.Mebibit))
|
||||
case "Mibit":
|
||||
return humanize.IBytes(uint64(value * converter.Mebibit / 8))
|
||||
case "decMbytes", "decmbytes", "MBy":
|
||||
case "decMbytes", "decmbytes":
|
||||
return humanize.Bytes(uint64(value * converter.Megabit))
|
||||
case "Mbit":
|
||||
return humanize.Bytes(uint64(value * converter.Megabit / 8))
|
||||
case "gbytes", "GiBy":
|
||||
case "gbytes", "GBy":
|
||||
return humanize.IBytes(uint64(value * converter.Gibibit))
|
||||
case "Gibit":
|
||||
return humanize.IBytes(uint64(value * converter.Gibibit / 8))
|
||||
case "decGbytes", "decgbytes", "GBy":
|
||||
case "decGbytes", "decgbytes":
|
||||
return humanize.Bytes(uint64(value * converter.Gigabit))
|
||||
case "Gbit":
|
||||
return humanize.Bytes(uint64(value * converter.Gigabit / 8))
|
||||
case "tbytes", "TiBy":
|
||||
case "tbytes", "TBy":
|
||||
return humanize.IBytes(uint64(value * converter.Tebibit))
|
||||
case "Tibit":
|
||||
return humanize.IBytes(uint64(value * converter.Tebibit / 8))
|
||||
case "decTbytes", "dectbytes", "TBy":
|
||||
case "decTbytes", "dectbytes":
|
||||
return humanize.Bytes(uint64(value * converter.Terabit))
|
||||
case "Tbit":
|
||||
return humanize.Bytes(uint64(value * converter.Terabit / 8))
|
||||
case "pbytes", "PiBy":
|
||||
case "pbytes", "PBy":
|
||||
return humanize.IBytes(uint64(value * converter.Pebibit))
|
||||
case "Pbit":
|
||||
return humanize.Bytes(uint64(value * converter.Petabit / 8))
|
||||
case "decPbytes", "decpbytes", "PBy":
|
||||
case "decPbytes", "decpbytes":
|
||||
return humanize.Bytes(uint64(value * converter.Petabit))
|
||||
case "EiBy":
|
||||
return humanize.IBytes(uint64(value * converter.Exbibit))
|
||||
case "Ebit":
|
||||
return humanize.Bytes(uint64(value * converter.Exabit / 8))
|
||||
case "EBy":
|
||||
return humanize.Bytes(uint64(value * converter.Exabit))
|
||||
}
|
||||
// When unit is not matched, return the value as it is.
|
||||
return fmt.Sprintf("%v", value)
|
||||
|
||||
@@ -24,55 +24,50 @@ func (f *dataRateFormatter) Format(value float64, unit string) string {
|
||||
return humanize.IBytes(uint64(value)) + "/s"
|
||||
case "Bps", "By/s":
|
||||
return humanize.Bytes(uint64(value)) + "/s"
|
||||
case "KiBs", "KiBy/s":
|
||||
case "binbps":
|
||||
return humanize.IBytes(uint64(value*converter.BitPerSecond)) + "/s"
|
||||
case "bps", "bit/s":
|
||||
return humanize.Bytes(uint64(value*converter.BitPerSecond)) + "/s"
|
||||
case "KiBs":
|
||||
return humanize.IBytes(uint64(value*converter.KibibitPerSecond)) + "/s"
|
||||
case "Kibits", "Kibit/s":
|
||||
return humanize.IBytes(uint64(value*converter.KibibitPerSecond/8)) + "/s"
|
||||
case "Kibits":
|
||||
return humanize.IBytes(uint64(value*converter.KibibytePerSecond)) + "/s"
|
||||
case "KBs", "kBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.KilobitPerSecond)) + "/s"
|
||||
case "Kbits", "kbit/s":
|
||||
return humanize.Bytes(uint64(value*converter.KilobitPerSecond/8)) + "/s"
|
||||
case "MiBs", "MiBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.KilobytePerSecond)) + "/s"
|
||||
case "MiBs":
|
||||
return humanize.IBytes(uint64(value*converter.MebibitPerSecond)) + "/s"
|
||||
case "Mibits", "Mibit/s":
|
||||
return humanize.IBytes(uint64(value*converter.MebibitPerSecond/8)) + "/s"
|
||||
case "Mibits":
|
||||
return humanize.IBytes(uint64(value*converter.MebibytePerSecond)) + "/s"
|
||||
case "MBs", "MBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.MegabitPerSecond)) + "/s"
|
||||
case "Mbits", "Mbit/s":
|
||||
return humanize.Bytes(uint64(value*converter.MegabitPerSecond/8)) + "/s"
|
||||
case "GiBs", "GiBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.MegabytePerSecond)) + "/s"
|
||||
case "GiBs":
|
||||
return humanize.IBytes(uint64(value*converter.GibibitPerSecond)) + "/s"
|
||||
case "Gibits", "Gibit/s":
|
||||
return humanize.IBytes(uint64(value*converter.GibibitPerSecond/8)) + "/s"
|
||||
case "Gibits":
|
||||
return humanize.IBytes(uint64(value*converter.GibibytePerSecond)) + "/s"
|
||||
case "GBs", "GBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.GigabitPerSecond)) + "/s"
|
||||
case "Gbits", "Gbit/s":
|
||||
return humanize.Bytes(uint64(value*converter.GigabitPerSecond/8)) + "/s"
|
||||
case "TiBs", "TiBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.GigabytePerSecond)) + "/s"
|
||||
case "TiBs":
|
||||
return humanize.IBytes(uint64(value*converter.TebibitPerSecond)) + "/s"
|
||||
case "Tibits", "Tibit/s":
|
||||
return humanize.IBytes(uint64(value*converter.TebibitPerSecond/8)) + "/s"
|
||||
case "Tibits":
|
||||
return humanize.IBytes(uint64(value*converter.TebibytePerSecond)) + "/s"
|
||||
case "TBs", "TBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.TerabitPerSecond)) + "/s"
|
||||
case "Tbits", "Tbit/s":
|
||||
return humanize.Bytes(uint64(value*converter.TerabitPerSecond/8)) + "/s"
|
||||
case "PiBs", "PiBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.TerabytePerSecond)) + "/s"
|
||||
case "PiBs":
|
||||
return humanize.IBytes(uint64(value*converter.PebibitPerSecond)) + "/s"
|
||||
case "Pibits", "Pibit/s":
|
||||
return humanize.IBytes(uint64(value*converter.PebibitPerSecond/8)) + "/s"
|
||||
case "Pibits":
|
||||
return humanize.IBytes(uint64(value*converter.PebibytePerSecond)) + "/s"
|
||||
case "PBs", "PBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.PetabitPerSecond)) + "/s"
|
||||
case "Pbits", "Pbit/s":
|
||||
return humanize.Bytes(uint64(value*converter.PetabitPerSecond/8)) + "/s"
|
||||
// Exa units
|
||||
case "EBy/s":
|
||||
return humanize.Bytes(uint64(value*converter.ExabitPerSecond)) + "/s"
|
||||
case "Ebit/s":
|
||||
return humanize.Bytes(uint64(value*converter.ExabitPerSecond/8)) + "/s"
|
||||
case "EiBy/s":
|
||||
return humanize.IBytes(uint64(value*converter.ExbibitPerSecond)) + "/s"
|
||||
case "Eibit/s":
|
||||
return humanize.IBytes(uint64(value*converter.ExbibitPerSecond/8)) + "/s"
|
||||
return humanize.IBytes(uint64(value*converter.PetabytePerSecond)) + "/s"
|
||||
}
|
||||
// When unit is not matched, return the value as it is.
|
||||
return fmt.Sprintf("%v", value)
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDataRateFormatterComprehensive(t *testing.T) {
|
||||
dataRateFormatter := NewDataRateFormatter()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
value float64
|
||||
unit string
|
||||
expected string
|
||||
}{
|
||||
// IEC Base bytes/sec - binBps
|
||||
{name: "binBps as Bps", value: 0, unit: "binBps", expected: "0 B/s"},
|
||||
{name: "1 binBps as 1 Bps", value: 1, unit: "binBps", expected: "1 B/s"},
|
||||
{name: "binBps as Kibps", value: 1024, unit: "binBps", expected: "1.0 KiB/s"},
|
||||
{name: "binBps as Mibps", value: 1024 * 1024, unit: "binBps", expected: "1.0 MiB/s"},
|
||||
{name: "binBps as Gibps", value: 1024 * 1024 * 1024, unit: "binBps", expected: "1.0 GiB/s"},
|
||||
|
||||
// SI Base bytes/sec - Bps, By/s
|
||||
{name: "Bps as Bps", value: 1, unit: "Bps", expected: "1 B/s"},
|
||||
{name: "Bps as kbps", value: 1000, unit: "Bps", expected: "1.0 kB/s"},
|
||||
{name: "Bps as Mbps", value: 1000 * 1000, unit: "Bps", expected: "1.0 MB/s"},
|
||||
{name: "Byps as kbps", value: 1000, unit: "By/s", expected: "1.0 kB/s"},
|
||||
|
||||
// Kibibytes/sec - KiBs, KiBy/s
|
||||
{name: "Kibs as Bps", value: 0, unit: "KiBs", expected: "0 B/s"},
|
||||
{name: "Kibs as Kibps", value: 1, unit: "KiBs", expected: "1.0 KiB/s"},
|
||||
{name: "Kibs as Mibps", value: 1024, unit: "KiBs", expected: "1.0 MiB/s"},
|
||||
{name: "Kibs as Gibps", value: 3 * 1024 * 1024, unit: "KiBs", expected: "3.0 GiB/s"},
|
||||
{name: "KiByps as Kibps", value: 1, unit: "KiBy/s", expected: "1.0 KiB/s"},
|
||||
{name: "KiByps as Mibps", value: 1024, unit: "KiBy/s", expected: "1.0 MiB/s"},
|
||||
|
||||
// Kibibits/sec - Kibits, Kibit/s
|
||||
{name: "Kibitps as Kibps", value: 1, unit: "Kibits", expected: "128 B/s"},
|
||||
{name: "Kibitps as Mibps", value: 42 * 1024, unit: "Kibits", expected: "5.3 MiB/s"},
|
||||
{name: "Kibitps as Kibps 10", value: 10, unit: "Kibit/s", expected: "1.3 KiB/s"},
|
||||
|
||||
// Kilobytes/sec (SI) - KBs, kBy/s
|
||||
{name: "Kbs as Bps", value: 0.5, unit: "KBs", expected: "500 B/s"},
|
||||
{name: "Kbs as Mibps", value: 1048.6, unit: "KBs", expected: "1.0 MiB/s"},
|
||||
{name: "kByps as Bps", value: 1, unit: "kBy/s", expected: "1000 B/s"},
|
||||
|
||||
// Kilobits/sec (SI) - Kbits, kbit/s
|
||||
{name: "Kbitps as Bps", value: 1, unit: "Kbits", expected: "125 B/s"},
|
||||
{name: "kbitps as Bps", value: 1, unit: "kbit/s", expected: "125 B/s"},
|
||||
|
||||
// Mebibytes/sec - MiBs, MiBy/s
|
||||
{name: "Mibs as Mibps", value: 1, unit: "MiBs", expected: "1.0 MiB/s"},
|
||||
{name: "Mibs as Gibps", value: 1024, unit: "MiBs", expected: "1.0 GiB/s"},
|
||||
{name: "Mibs as Tibps", value: 1024 * 1024, unit: "MiBs", expected: "1.0 TiB/s"},
|
||||
{name: "MiByps as Mibps", value: 1, unit: "MiBy/s", expected: "1.0 MiB/s"},
|
||||
|
||||
// Mebibits/sec - Mibits, Mibit/s
|
||||
{name: "Mibitps as Mibps", value: 40, unit: "Mibits", expected: "5.0 MiB/s"},
|
||||
{name: "Mibitps as Mibps per second variant", value: 10, unit: "Mibit/s", expected: "1.3 MiB/s"},
|
||||
|
||||
// Megabytes/sec (SI) - MBs, MBy/s
|
||||
{name: "Mbs as Kibps", value: 1, unit: "MBs", expected: "977 KiB/s"},
|
||||
{name: "MByps as Kibps", value: 1, unit: "MBy/s", expected: "977 KiB/s"},
|
||||
|
||||
// Megabits/sec (SI) - Mbits, Mbit/s
|
||||
{name: "Mbitps as Kibps", value: 1, unit: "Mbits", expected: "125 kB/s"},
|
||||
{name: "Mbitps as Kibps per second variant", value: 1, unit: "Mbit/s", expected: "125 kB/s"},
|
||||
|
||||
// Gibibytes/sec - GiBs, GiBy/s
|
||||
{name: "Gibs as Gibps", value: 1, unit: "GiBs", expected: "1.0 GiB/s"},
|
||||
{name: "Gibs as Tibps", value: 1024, unit: "GiBs", expected: "1.0 TiB/s"},
|
||||
{name: "GiByps as Tibps", value: 42 * 1024, unit: "GiBy/s", expected: "42 TiB/s"},
|
||||
|
||||
// Gibibits/sec - Gibits, Gibit/s
|
||||
{name: "Gibitps as Tibps", value: 42 * 1024, unit: "Gibits", expected: "5.3 TiB/s"},
|
||||
{name: "Gibitps as Tibps per second variant", value: 42 * 1024, unit: "Gibit/s", expected: "5.3 TiB/s"},
|
||||
|
||||
// Gigabytes/sec (SI) - GBs, GBy/s
|
||||
{name: "Gbs as Tibps", value: 42 * 1000, unit: "GBs", expected: "38 TiB/s"},
|
||||
{name: "GByps as Tibps", value: 42 * 1000, unit: "GBy/s", expected: "38 TiB/s"},
|
||||
|
||||
// Gigabits/sec (SI) - Gbits, Gbit/s
|
||||
{name: "Gbitps as Tibps", value: 42 * 1000, unit: "Gbits", expected: "5.3 TB/s"},
|
||||
{name: "Gbitps as Tibps per second variant", value: 42 * 1000, unit: "Gbit/s", expected: "5.3 TB/s"},
|
||||
|
||||
// Tebibytes/sec - TiBs, TiBy/s
|
||||
{name: "Tibs as Tibps", value: 1, unit: "TiBs", expected: "1.0 TiB/s"},
|
||||
{name: "Tibs as Pibps", value: 1024, unit: "TiBs", expected: "1.0 PiB/s"},
|
||||
{name: "TiByps as Pibps", value: 42 * 1024, unit: "TiBy/s", expected: "42 PiB/s"},
|
||||
|
||||
// Tebibits/sec - Tibits, Tibit/s
|
||||
{name: "Tibitps as Pibps", value: 42 * 1024, unit: "Tibits", expected: "5.3 PiB/s"},
|
||||
{name: "Tibitps as Pibps per second variant", value: 42 * 1024, unit: "Tibit/s", expected: "5.3 PiB/s"},
|
||||
|
||||
// Terabytes/sec (SI) - TBs, TBy/s
|
||||
{name: "Tbs as Pibps", value: 42 * 1000, unit: "TBs", expected: "37 PiB/s"},
|
||||
{name: "TByps as Pibps", value: 42 * 1000, unit: "TBy/s", expected: "37 PiB/s"},
|
||||
|
||||
// Terabits/sec (SI) - Tbits, Tbit/s
|
||||
{name: "Tbitps as Pibps", value: 42 * 1000, unit: "Tbits", expected: "5.3 PB/s"},
|
||||
{name: "Tbitps as Pibps per second variant", value: 42 * 1000, unit: "Tbit/s", expected: "5.3 PB/s"},
|
||||
|
||||
// Pebibytes/sec - PiBs, PiBy/s
|
||||
{name: "Pibs as Eibps", value: 10 * 1024, unit: "PiBs", expected: "10 EiB/s"},
|
||||
{name: "PiByps as Eibps", value: 10 * 1024, unit: "PiBy/s", expected: "10 EiB/s"},
|
||||
|
||||
// Pebibits/sec - Pibits, Pibit/s
|
||||
{name: "Pibitps as Eibps", value: 10 * 1024, unit: "Pibits", expected: "1.3 EiB/s"},
|
||||
{name: "Pibitps as Eibps per second variant", value: 10 * 1024, unit: "Pibit/s", expected: "1.3 EiB/s"},
|
||||
|
||||
// Petabytes/sec (SI) - PBs, PBy/s
|
||||
{name: "Pbs as Pibps", value: 42, unit: "PBs", expected: "37 PiB/s"},
|
||||
{name: "PByps as Pibps", value: 42, unit: "PBy/s", expected: "37 PiB/s"},
|
||||
|
||||
// Petabits/sec (SI) - Pbits, Pbit/s
|
||||
{name: "Pbitps as Pibps", value: 42, unit: "Pbits", expected: "5.3 PB/s"},
|
||||
{name: "Pbitps as Pibps per second variant", value: 42, unit: "Pbit/s", expected: "5.3 PB/s"},
|
||||
|
||||
// Exabytes/sec (SI) - EBy/s
|
||||
{name: "EByps as Ebps", value: 10, unit: "EBy/s", expected: "10 EB/s"},
|
||||
|
||||
// Exabits/sec (SI) - Ebit/s
|
||||
{name: "Ebitps as Ebps", value: 10, unit: "Ebit/s", expected: "1.3 EB/s"},
|
||||
|
||||
// Exbibytes/sec (IEC) - EiBy/s
|
||||
{name: "EiByps as Eibps", value: 10, unit: "EiBy/s", expected: "10 EiB/s"},
|
||||
|
||||
// Exbibits/sec (IEC) - Eibit/s
|
||||
{name: "Eibitps as Eibps", value: 10, unit: "Eibit/s", expected: "1.3 EiB/s"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := dataRateFormatter.Format(tt.value, tt.unit)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -14,164 +14,21 @@ func TestData(t *testing.T) {
|
||||
assert.Equal(t, "1.0 KiB", dataFormatter.Format(1024, "bytes"))
|
||||
assert.Equal(t, "1.0 KiB", dataFormatter.Format(1024, "By"))
|
||||
assert.Equal(t, "2.3 GiB", dataFormatter.Format(2.3*1024, "mbytes"))
|
||||
assert.Equal(t, "2.3 GiB", dataFormatter.Format(2.3*1024, "MiBy"))
|
||||
assert.Equal(t, "2.3 GiB", dataFormatter.Format(2.3*1024, "MBy"))
|
||||
assert.Equal(t, "1.0 MiB", dataFormatter.Format(1024*1024, "bytes"))
|
||||
assert.Equal(t, "1.0 MiB", dataFormatter.Format(1024*1024, "By"))
|
||||
assert.Equal(t, "69 TiB", dataFormatter.Format(69*1024*1024, "mbytes"))
|
||||
assert.Equal(t, "69 TiB", dataFormatter.Format(69*1024*1024, "MiBy"))
|
||||
assert.Equal(t, "69 TiB", dataFormatter.Format(69*1024*1024, "MBy"))
|
||||
assert.Equal(t, "102 KiB", dataFormatter.Format(102*1024, "bytes"))
|
||||
assert.Equal(t, "102 KiB", dataFormatter.Format(102*1024, "By"))
|
||||
assert.Equal(t, "240 MiB", dataFormatter.Format(240*1024, "kbytes"))
|
||||
assert.Equal(t, "240 MiB", dataFormatter.Format(240*1024, "KiBy"))
|
||||
assert.Equal(t, "240 MiB", dataFormatter.Format(240*1024, "kBy"))
|
||||
assert.Equal(t, "1.0 GiB", dataFormatter.Format(1024*1024, "kbytes"))
|
||||
assert.Equal(t, "1.0 GiB", dataFormatter.Format(1024*1024, "KiBy"))
|
||||
assert.Equal(t, "1.0 GiB", dataFormatter.Format(1024*1024, "kBy"))
|
||||
assert.Equal(t, "23 GiB", dataFormatter.Format(23*1024*1024, "kbytes"))
|
||||
assert.Equal(t, "23 GiB", dataFormatter.Format(23*1024*1024, "KiBy"))
|
||||
assert.Equal(t, "23 GiB", dataFormatter.Format(23*1024*1024, "kBy"))
|
||||
assert.Equal(t, "32 TiB", dataFormatter.Format(32*1024*1024*1024, "kbytes"))
|
||||
assert.Equal(t, "32 TiB", dataFormatter.Format(32*1024*1024*1024, "KiBy"))
|
||||
assert.Equal(t, "32 TiB", dataFormatter.Format(32*1024*1024*1024, "kBy"))
|
||||
assert.Equal(t, "24 MiB", dataFormatter.Format(24, "mbytes"))
|
||||
assert.Equal(t, "24 MiB", dataFormatter.Format(24, "MiBy"))
|
||||
}
|
||||
|
||||
func TestDataFormatterComprehensive(t *testing.T) {
|
||||
dataFormatter := NewDataFormatter()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
value float64
|
||||
unit string
|
||||
expected string
|
||||
}{
|
||||
// IEC Base bytes - bytes, By
|
||||
{name: "bytes: 0", value: 0, unit: "bytes", expected: "0 B"},
|
||||
{name: "bytes: 1", value: 1, unit: "bytes", expected: "1 B"},
|
||||
{name: "bytes: 512", value: 512, unit: "bytes", expected: "512 B"},
|
||||
{name: "bytes: 1023", value: 1023, unit: "bytes", expected: "1023 B"},
|
||||
{name: "bytes: 1024 = 1 KiB", value: 1024, unit: "bytes", expected: "1.0 KiB"},
|
||||
{name: "bytes: 1536", value: 1536, unit: "bytes", expected: "1.5 KiB"},
|
||||
{name: "bytes: 1024*1024 = 1 MiB", value: 1024 * 1024, unit: "bytes", expected: "1.0 MiB"},
|
||||
{name: "bytes: 1024*1024*1024 = 1 GiB", value: 1024 * 1024 * 1024, unit: "bytes", expected: "1.0 GiB"},
|
||||
{name: "By: same as bytes", value: 1024, unit: "By", expected: "1.0 KiB"},
|
||||
|
||||
// SI Base bytes - decbytes
|
||||
{name: "decbytes: 1", value: 1, unit: "decbytes", expected: "1 B"},
|
||||
{name: "decbytes: 1000 = 1 kB", value: 1000, unit: "decbytes", expected: "1.0 kB"},
|
||||
{name: "decbytes: 1000*1000 = 1 MB", value: 1000 * 1000, unit: "decbytes", expected: "1.0 MB"},
|
||||
{name: "decbytes: 1000*1000*1000 = 1 GB", value: 1000 * 1000 * 1000, unit: "decbytes", expected: "1.0 GB"},
|
||||
|
||||
// Kibibytes - kbytes, KiBy (IEC)
|
||||
{name: "kbytes: 0", value: 0, unit: "kbytes", expected: "0 B"},
|
||||
{name: "kbytes: 1 = 1 KiB", value: 1, unit: "kbytes", expected: "1.0 KiB"},
|
||||
{name: "kbytes: 512", value: 512, unit: "kbytes", expected: "512 KiB"},
|
||||
{name: "kbytes: 1024 = 1 MiB", value: 1024, unit: "kbytes", expected: "1.0 MiB"},
|
||||
{name: "kbytes: 1024*1024 = 1 GiB", value: 1024 * 1024, unit: "kbytes", expected: "1.0 GiB"},
|
||||
{name: "kbytes: 2.3*1024 = 2.3 MiB", value: 2.3 * 1024, unit: "kbytes", expected: "2.3 MiB"},
|
||||
{name: "KiBy: 1 = 1 KiB", value: 1, unit: "KiBy", expected: "1.0 KiB"},
|
||||
{name: "KiBy: 1024 = 1 MiB", value: 1024, unit: "KiBy", expected: "1.0 MiB"},
|
||||
{name: "kbytes and KiBy alias", value: 240 * 1024, unit: "KiBy", expected: "240 MiB"},
|
||||
|
||||
// SI Kilobytes - decKbytes, deckbytes, kBy
|
||||
{name: "decKbytes: 1 = 1 kB", value: 1, unit: "decKbytes", expected: "1.0 kB"},
|
||||
{name: "decKbytes: 1000 = 1 MB", value: 1000, unit: "decKbytes", expected: "1.0 MB"},
|
||||
{name: "deckbytes: 1 = 1 kB", value: 1, unit: "deckbytes", expected: "1.0 kB"},
|
||||
{name: "kBy: 1 = 1 kB", value: 1, unit: "kBy", expected: "1.0 kB"},
|
||||
{name: "kBy: 1000 = 1 MB", value: 1000, unit: "kBy", expected: "1.0 MB"},
|
||||
|
||||
// Mebibytes - mbytes, MiBy (IEC)
|
||||
{name: "mbytes: 1 = 1 MiB", value: 1, unit: "mbytes", expected: "1.0 MiB"},
|
||||
{name: "mbytes: 24", value: 24, unit: "mbytes", expected: "24 MiB"},
|
||||
{name: "mbytes: 1024 = 1 GiB", value: 1024, unit: "mbytes", expected: "1.0 GiB"},
|
||||
{name: "mbytes: 1024*1024 = 1 TiB", value: 1024 * 1024, unit: "mbytes", expected: "1.0 TiB"},
|
||||
{name: "mbytes: 69*1024 = 69 GiB", value: 69 * 1024, unit: "mbytes", expected: "69 GiB"},
|
||||
{name: "mbytes: 69*1024*1024 = 69 TiB", value: 69 * 1024 * 1024, unit: "mbytes", expected: "69 TiB"},
|
||||
{name: "MiBy: 1 = 1 MiB", value: 1, unit: "MiBy", expected: "1.0 MiB"},
|
||||
{name: "MiBy: 1024 = 1 GiB", value: 1024, unit: "MiBy", expected: "1.0 GiB"},
|
||||
|
||||
// SI Megabytes - decMbytes, decmbytes, MBy
|
||||
{name: "decMbytes: 1 = 1 MB", value: 1, unit: "decMbytes", expected: "1.0 MB"},
|
||||
{name: "decMbytes: 1000 = 1 GB", value: 1000, unit: "decMbytes", expected: "1.0 GB"},
|
||||
{name: "decmbytes: 1 = 1 MB", value: 1, unit: "decmbytes", expected: "1.0 MB"},
|
||||
{name: "MBy: 1 = 1 MB", value: 1, unit: "MBy", expected: "1.0 MB"},
|
||||
|
||||
// Gibibytes - gbytes, GiBy (IEC)
|
||||
{name: "gbytes: 1 = 1 GiB", value: 1, unit: "gbytes", expected: "1.0 GiB"},
|
||||
{name: "gbytes: 1024 = 1 TiB", value: 1024, unit: "gbytes", expected: "1.0 TiB"},
|
||||
{name: "GiBy: 42*1024 = 42 TiB", value: 42 * 1024, unit: "GiBy", expected: "42 TiB"},
|
||||
|
||||
// SI Gigabytes - decGbytes, decgbytes, GBy
|
||||
{name: "decGbytes: 42*1000 = 42 TB", value: 42 * 1000, unit: "decGbytes", expected: "42 TB"},
|
||||
{name: "GBy: 42*1000 = 42 TB", value: 42 * 1000, unit: "GBy", expected: "42 TB"},
|
||||
|
||||
// Tebibytes - tbytes, TiBy (IEC)
|
||||
{name: "tbytes: 1 = 1 TiB", value: 1, unit: "tbytes", expected: "1.0 TiB"},
|
||||
{name: "tbytes: 1024 = 1 PiB", value: 1024, unit: "tbytes", expected: "1.0 PiB"},
|
||||
{name: "TiBy: 42*1024 = 42 PiB", value: 42 * 1024, unit: "TiBy", expected: "42 PiB"},
|
||||
|
||||
// SI Terabytes - decTbytes, dectbytes, TBy
|
||||
{name: "decTbytes: 42*1000 = 42 PB", value: 42 * 1000, unit: "decTbytes", expected: "42 PB"},
|
||||
{name: "dectbytes: 42*1000 = 42 PB", value: 42 * 1000, unit: "dectbytes", expected: "42 PB"},
|
||||
{name: "TBy: 42*1000 = 42 PB", value: 42 * 1000, unit: "TBy", expected: "42 PB"},
|
||||
|
||||
// Pebibytes - pbytes, PiBy (IEC)
|
||||
{name: "pbytes: 10*1024 = 10 EiB", value: 10 * 1024, unit: "pbytes", expected: "10 EiB"},
|
||||
{name: "PiBy: 10*1024 = 10 EiB", value: 10 * 1024, unit: "PiBy", expected: "10 EiB"},
|
||||
|
||||
// SI Petabytes - decPbytes, decpbytes, PBy
|
||||
{name: "decPbytes: 42 = 42 PB", value: 42, unit: "decPbytes", expected: "42 PB"},
|
||||
{name: "decpbytes: 42 = 42 PB", value: 42, unit: "decpbytes", expected: "42 PB"},
|
||||
{name: "PBy: 42 = 42 PB", value: 42, unit: "PBy", expected: "42 PB"},
|
||||
|
||||
// Exbibytes - EiBy (IEC)
|
||||
{name: "EiBy: 10 = 10 EiB", value: 10, unit: "EiBy", expected: "10 EiB"},
|
||||
|
||||
// Exabytes - EBy (SI)
|
||||
{name: "EBy: 10 = 10 EB", value: 10, unit: "EBy", expected: "10 EB"},
|
||||
|
||||
// Kibibits - Kibit (IEC): 1 Kibit = 1024 bits = 128 bytes
|
||||
{name: "Kibit: 1 = 128 B", value: 1, unit: "Kibit", expected: "128 B"},
|
||||
{name: "Kibit: 1024 = 128 KiB", value: 1024, unit: "Kibit", expected: "128 KiB"},
|
||||
|
||||
// Mebibits - Mibit (IEC): 1 Mibit = 1024 Kibit = 128 KiB
|
||||
{name: "Mibit: 1 = 128 KiB", value: 1, unit: "Mibit", expected: "128 KiB"},
|
||||
{name: "Mibit: 1024 = 128 MiB", value: 1024, unit: "Mibit", expected: "128 MiB"},
|
||||
|
||||
// Gibibits - Gibit (IEC): 1 Gibit = 1024 Mibit = 128 MiB
|
||||
{name: "Gibit: 1 = 128 MiB", value: 1, unit: "Gibit", expected: "128 MiB"},
|
||||
{name: "Gibit: 42*1024 = 5.3 TiB", value: 42 * 1024, unit: "Gibit", expected: "5.3 TiB"},
|
||||
|
||||
// Tebibits - Tibit (IEC): 1 Tibit = 1024 Gibit = 128 GiB
|
||||
{name: "Tibit: 1 = 128 GiB", value: 1, unit: "Tibit", expected: "128 GiB"},
|
||||
{name: "Tibit: 42*1024 = 5.3 PiB", value: 42 * 1024, unit: "Tibit", expected: "5.3 PiB"},
|
||||
|
||||
// Kilobits - kbit (SI): 1 kbit = 1000 bits = 125 bytes
|
||||
{name: "kbit: 1 = 125 B", value: 1, unit: "kbit", expected: "125 B"},
|
||||
{name: "kbit: 1000 = 125 kB", value: 1000, unit: "kbit", expected: "125 kB"},
|
||||
|
||||
// Megabits - Mbit (SI): 1 Mbit = 1000 kbit = 125 kB
|
||||
{name: "Mbit: 1 = 125 kB", value: 1, unit: "Mbit", expected: "125 kB"},
|
||||
{name: "Mbit: 1000 = 125 MB", value: 1000, unit: "Mbit", expected: "125 MB"},
|
||||
|
||||
// Gigabits - Gbit (SI): 1 Gbit = 1000 Mbit = 125 MB
|
||||
{name: "Gbit: 1 = 125 MB", value: 1, unit: "Gbit", expected: "125 MB"},
|
||||
{name: "Gbit: 42*1000 = 5.3 TB", value: 42 * 1000, unit: "Gbit", expected: "5.3 TB"},
|
||||
|
||||
// Terabits - Tbit (SI): 1 Tbit = 1000 Gbit = 125 GB
|
||||
{name: "Tbit: 1 = 125 GB", value: 1, unit: "Tbit", expected: "125 GB"},
|
||||
{name: "Tbit: 42*1000 = 5.3 PB", value: 42 * 1000, unit: "Tbit", expected: "5.3 PB"},
|
||||
|
||||
// Petabits - Pbit (SI): 1 Pbit = 1000 Tbit = 125 TB
|
||||
{name: "Pbit: 1 = 125 TB", value: 1, unit: "Pbit", expected: "125 TB"},
|
||||
{name: "Pbit: 42 = 5.3 PB", value: 42, unit: "Pbit", expected: "5.3 PB"},
|
||||
|
||||
// Exabits - Ebit (SI): 1 Ebit = 1000 Pbit = 125 PB
|
||||
{name: "Ebit: 1 = 125 PB", value: 1, unit: "Ebit", expected: "125 PB"},
|
||||
{name: "Ebit: 10 = 1.3 EB", value: 10, unit: "Ebit", expected: "1.3 EB"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := dataFormatter.Format(tt.value, tt.unit)
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
assert.Equal(t, "24 MiB", dataFormatter.Format(24, "MBy"))
|
||||
}
|
||||
|
||||
@@ -18,11 +18,11 @@ var (
|
||||
|
||||
func FromUnit(u string) Formatter {
|
||||
switch u {
|
||||
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min", "w", "wk":
|
||||
case "ns", "us", "µs", "ms", "s", "m", "h", "d", "min":
|
||||
return DurationFormatter
|
||||
case "bytes", "decbytes", "bits", "bit", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy", "EBy", "KiBy", "MiBy", "GiBy", "TiBy", "PiBy", "EiBy", "kbit", "Mbit", "Gbit", "Tbit", "Pbit", "Ebit", "Kibit", "Mibit", "Gibit", "Tibit", "Pibit":
|
||||
case "bytes", "decbytes", "bits", "decbits", "kbytes", "decKbytes", "deckbytes", "mbytes", "decMbytes", "decmbytes", "gbytes", "decGbytes", "decgbytes", "tbytes", "decTbytes", "dectbytes", "pbytes", "decPbytes", "decpbytes", "By", "kBy", "MBy", "GBy", "TBy", "PBy":
|
||||
return DataFormatter
|
||||
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "EBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s", "Ebit/s", "KiBy/s", "MiBy/s", "GiBy/s", "TiBy/s", "PiBy/s", "EiBy/s", "Kibit/s", "Mibit/s", "Gibit/s", "Tibit/s", "Pibit/s", "Eibit/s":
|
||||
case "binBps", "Bps", "binbps", "bps", "KiBs", "Kibits", "KBs", "Kbits", "MiBs", "Mibits", "MBs", "Mbits", "GiBs", "Gibits", "GBs", "Gbits", "TiBs", "Tibits", "TBs", "Tbits", "PiBs", "Pibits", "PBs", "Pbits", "By/s", "kBy/s", "MBy/s", "GBy/s", "TBy/s", "PBy/s", "bit/s", "kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s":
|
||||
return DataRateFormatter
|
||||
case "percent", "percentunit", "%":
|
||||
return PercentFormatter
|
||||
|
||||
@@ -32,7 +32,7 @@ func (f *durationFormatter) Format(value float64, unit string) string {
|
||||
return toHours(value)
|
||||
case "d":
|
||||
return toDays(value)
|
||||
case "w", "wk":
|
||||
case "w":
|
||||
return toWeeks(value)
|
||||
}
|
||||
// When unit is not matched, return the value as it is.
|
||||
|
||||
@@ -483,6 +483,22 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
|
||||
value1 := v.Visit(values[0])
|
||||
value2 := v.Visit(values[1])
|
||||
|
||||
switch value1.(type) {
|
||||
case float64:
|
||||
if _, ok := value2.(float64); !ok {
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected number for both operands", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
case string:
|
||||
if _, ok := value2.(string); !ok {
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected string for both operands", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
default:
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: operands must be number or string", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, []any{value1, value2}, v.builder, v.startNs, v.endNs)
|
||||
@@ -855,7 +871,7 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
// 1. either user meant key ( this is already handled above in fieldKeysForName )
|
||||
// 2. or user meant `attribute.key` we look up in the map for all possible field keys with name 'attribute.key'
|
||||
|
||||
// Note:
|
||||
// Note:
|
||||
// If user only wants to search `attribute.key`, then they have to use `attribute.attribute.key`
|
||||
// If user only wants to search `key`, then they have to use `key`
|
||||
// If user wants to search both, they can use `attribute.key` and we will resolve the ambiguity
|
||||
|
||||
@@ -375,13 +375,6 @@ func mergeAndEnsureBackwardCompatibility(ctx context.Context, logger *slog.Logge
|
||||
config.Flagger.Config.Boolean[flagger.FeatureKafkaSpanEval.String()] = os.Getenv("KAFKA_SPAN_EVAL") == "true"
|
||||
}
|
||||
|
||||
if os.Getenv("INTERPOLATION_ENABLED") != "" {
|
||||
logger.WarnContext(ctx, "[Deprecated] env INTERPOLATION_ENABLED is deprecated and scheduled for removal. Please use SIGNOZ_FLAGGER_CONFIG_BOOLEAN_INTERPOLATION__ENABLED instead.")
|
||||
if config.Flagger.Config.Boolean == nil {
|
||||
config.Flagger.Config.Boolean = make(map[string]bool)
|
||||
}
|
||||
config.Flagger.Config.Boolean[flagger.FeatureInterpolationEnabled.String()] = os.Getenv("INTERPOLATION_ENABLED") == "true"
|
||||
}
|
||||
}
|
||||
|
||||
func (config Config) Collect(_ context.Context, _ valuer.UUID) (map[string]any, error) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
@@ -73,7 +74,7 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
|
||||
cteArgs [][]any
|
||||
)
|
||||
|
||||
if b.metricsStatementBuilder.CanShortCircuitDelta(query) {
|
||||
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
|
||||
// spatial_aggregation_cte directly for certain delta queries
|
||||
if frag, args, err := b.buildTemporalAggDeltaFastPath(ctx, start, end, query, keys, variables); err != nil {
|
||||
return nil, err
|
||||
@@ -91,8 +92,9 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// spatial_aggregation_cte
|
||||
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
|
||||
if frag != "" {
|
||||
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -122,13 +124,16 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
for _, g := range query.GroupBy {
|
||||
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return "", []any{}, err
|
||||
return "", nil, err
|
||||
}
|
||||
sb.SelectMore(col)
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
@@ -150,7 +155,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
Variables: variables,
|
||||
}, start, end)
|
||||
if err != nil {
|
||||
return "", []any{}, err
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
if filterWhere != nil {
|
||||
@@ -208,8 +213,11 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
@@ -278,7 +286,10 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
|
||||
|
||||
baseSb.From(fmt.Sprintf("%s.%s AS points", DBName, tbl))
|
||||
@@ -315,25 +326,23 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(telemetrymetrics.RateWithoutNegative, start, start)
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.RateTmpl))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
incExpr := fmt.Sprintf(telemetrymetrics.IncreaseWithoutNegative, start, start)
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.IncreaseTmpl))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
@@ -348,7 +357,15 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
_ uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
_ map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any) {
|
||||
) (string, []any, error) {
|
||||
|
||||
if query.Aggregations[0].SpaceAggregation.IsZero() {
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`]",
|
||||
)
|
||||
}
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
sb.Select("ts")
|
||||
@@ -365,5 +382,5 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package telemetrymeter
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
@@ -63,7 +64,7 @@ func AggregationColumnForSamplesTable(
|
||||
temporality metrictypes.Temporality,
|
||||
timeAggregation metrictypes.TimeAggregation,
|
||||
tableHints *metrictypes.MetricTableHints,
|
||||
) string {
|
||||
) (string, error) {
|
||||
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
|
||||
var aggregationColumn string
|
||||
switch temporality {
|
||||
@@ -190,5 +191,13 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
|
||||
}
|
||||
return aggregationColumn
|
||||
|
||||
if aggregationColumn == "" {
|
||||
return "", errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
|
||||
)
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
}
|
||||
|
||||
@@ -29,13 +29,7 @@ func (c *conditionBuilder) conditionFor(
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains,
|
||||
qbtypes.FilterOperatorNotContains,
|
||||
qbtypes.FilterOperatorILike,
|
||||
qbtypes.FilterOperatorNotILike,
|
||||
qbtypes.FilterOperatorLike,
|
||||
qbtypes.FilterOperatorNotLike:
|
||||
if operator.IsStringSearchOperator() {
|
||||
value = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
@@ -44,6 +38,18 @@ func (c *conditionBuilder) conditionFor(
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): use the same data type collision handling when metrics schemas are updated
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
|
||||
case []any:
|
||||
if len(v) > 0 && (operator == qbtypes.FilterOperatorBetween || operator == qbtypes.FilterOperatorNotBetween) {
|
||||
if _, ok := v[0].(float64); ok {
|
||||
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
|
||||
@@ -5,67 +5,27 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
RateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
|
||||
IncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, ((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
|
||||
RateTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))`
|
||||
|
||||
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
|
||||
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s, ((%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
|
||||
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
|
||||
IncreaseTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value, per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)`
|
||||
|
||||
RateWithInterpolation = `
|
||||
CASE
|
||||
WHEN row_number() OVER rate_window = 1 THEN
|
||||
-- First row: try to interpolate using next value
|
||||
CASE
|
||||
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
|
||||
-- Assume linear growth to next point
|
||||
(leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)
|
||||
ELSE
|
||||
0 -- No next value either, can't interpolate
|
||||
END
|
||||
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
|
||||
-- Counter reset detected
|
||||
per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window)
|
||||
ELSE
|
||||
-- Normal case: calculate rate
|
||||
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) /
|
||||
(ts - lagInFrame(ts, 1) OVER rate_window)
|
||||
END`
|
||||
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1) OVER rate_window), (%s - lagInFrame(%s, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))) AS per_series_value`
|
||||
|
||||
IncreaseWithInterpolation = `
|
||||
CASE
|
||||
WHEN row_number() OVER rate_window = 1 THEN
|
||||
-- First row: try to interpolate using next value
|
||||
CASE
|
||||
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
|
||||
-- Calculate the interpolated increase for this interval
|
||||
((leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)) *
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)
|
||||
ELSE
|
||||
0 -- No next value either, can't interpolate
|
||||
END
|
||||
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
|
||||
-- Counter reset detected: the increase is the current value
|
||||
per_series_value
|
||||
ELSE
|
||||
-- Normal case: calculate increase
|
||||
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)
|
||||
END`
|
||||
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s, (%s - lagInFrame(%s, 1) OVER rate_window))) AS per_series_value`
|
||||
|
||||
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
|
||||
)
|
||||
|
||||
type MetricQueryStatementBuilder struct {
|
||||
@@ -147,54 +107,6 @@ func (b *MetricQueryStatementBuilder) Build(
|
||||
return b.buildPipelineStatement(ctx, start, end, query, keys, variables)
|
||||
}
|
||||
|
||||
// Fast‑path (no fingerprint grouping)
|
||||
// canShortCircuitDelta returns true if we can use the optimized query
|
||||
// for the given query
|
||||
// This is used to avoid the group by fingerprint thus improving the performance
|
||||
// for certain queries
|
||||
// cases where we can short circuit:
|
||||
// 1. time aggregation = (rate|increase) and space aggregation = sum
|
||||
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
|
||||
//
|
||||
// 2. time aggregation = sum and space aggregation = sum
|
||||
// - sum of sums is same as sum of all values
|
||||
//
|
||||
// 3. time aggregation = min and space aggregation = min
|
||||
// - min of mins is same as min of all values
|
||||
//
|
||||
// 4. time aggregation = max and space aggregation = max
|
||||
// - max of maxs is same as max of all values
|
||||
//
|
||||
// 5. special case exphist, there is no need for per series/fingerprint aggregation
|
||||
// we can directly use the quantilesDDMerge function
|
||||
//
|
||||
// all of this is true only for delta metrics
|
||||
func (b *MetricQueryStatementBuilder) CanShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool {
|
||||
if q.Aggregations[0].Temporality != metrictypes.Delta {
|
||||
return false
|
||||
}
|
||||
|
||||
ta := q.Aggregations[0].TimeAggregation
|
||||
sa := q.Aggregations[0].SpaceAggregation
|
||||
|
||||
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
|
||||
return true
|
||||
}
|
||||
if q.Aggregations[0].Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
ctx context.Context,
|
||||
start, end uint64,
|
||||
@@ -256,10 +168,11 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if b.CanShortCircuitDelta(query) {
|
||||
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
|
||||
// spatial_aggregation_cte directly for certain delta queries
|
||||
frag, args := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs)
|
||||
if frag != "" {
|
||||
if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -273,8 +186,9 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// spatial_aggregation_cte
|
||||
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
|
||||
if frag != "" {
|
||||
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -294,7 +208,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
timeSeriesCTE string,
|
||||
timeSeriesCTEArgs []any,
|
||||
) (string, []any) {
|
||||
) (string, []any, error) {
|
||||
stepSec := int64(query.StepInterval.Seconds())
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
@@ -307,11 +221,15 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol := AggregationColumnForSamplesTable(
|
||||
aggCol, err := AggregationColumnForSamplesTable(
|
||||
start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints,
|
||||
)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
|
||||
@@ -334,7 +252,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
|
||||
@@ -437,8 +355,12 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
|
||||
@@ -461,7 +383,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
start, end uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
timeSeriesCTE string,
|
||||
@@ -479,7 +401,10 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
baseSb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
@@ -496,36 +421,25 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
|
||||
innerQuery, innerArgs := baseSb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
|
||||
|
||||
// ! TODO (balanikaran) Get OrgID via function parameter instead of valuer.GenerateUUID()
|
||||
interpolationEnabled := b.flagger.BooleanOrEmpty(ctx, flagger.FeatureInterpolationEnabled, featuretypes.NewFlaggerEvaluationContext(valuer.GenerateUUID()))
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegative, start, start)
|
||||
if interpolationEnabled {
|
||||
rateExpr = RateWithInterpolation
|
||||
}
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", RateTmpl))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
incExpr := fmt.Sprintf(IncreaseWithoutNegative, start, start)
|
||||
if interpolationEnabled {
|
||||
incExpr = IncreaseWithInterpolation
|
||||
}
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", IncreaseTmpl))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
@@ -534,7 +448,6 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
}
|
||||
}
|
||||
|
||||
// because RateInterpolation is not enabled anywhere due to some gaps in the logic wrt cache handling, it hasn't been considered for the multi temporality
|
||||
func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
|
||||
_ context.Context,
|
||||
start, end uint64,
|
||||
@@ -553,18 +466,32 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggForDeltaTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggForCumulativeTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggForDeltaTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggForCumulativeTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggForDeltaTemporality = fmt.Sprintf("%s/%d", aggForDeltaTemporality, stepSec)
|
||||
}
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, aggForCumulativeTemporality, aggForCumulativeTemporality, start)
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality,
|
||||
aggForDeltaTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
)
|
||||
sb.SelectMore(rateExpr)
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, start)
|
||||
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality,
|
||||
aggForDeltaTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
)
|
||||
sb.SelectMore(increaseExpr)
|
||||
default:
|
||||
expr := fmt.Sprintf(OthersMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality)
|
||||
@@ -592,7 +519,14 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
_ uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
_ map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any) {
|
||||
) (string, []any, error) {
|
||||
if query.Aggregations[0].SpaceAggregation.IsZero() {
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`]",
|
||||
)
|
||||
}
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
sb.Select("ts")
|
||||
@@ -609,7 +543,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
@@ -641,9 +575,7 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
quantile,
|
||||
))
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
for _, g := range query.GroupBy {
|
||||
sb.GroupBy(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
@@ -659,6 +591,8 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
sb.Where(rewrittenExpr)
|
||||
}
|
||||
}
|
||||
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.OrderBy("ts")
|
||||
|
||||
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package telemetrymetrics
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
@@ -168,7 +169,7 @@ func AggregationColumnForSamplesTable(
|
||||
temporality metrictypes.Temporality,
|
||||
timeAggregation metrictypes.TimeAggregation,
|
||||
tableHints *metrictypes.MetricTableHints,
|
||||
) string {
|
||||
) (string, error) {
|
||||
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
|
||||
var aggregationColumn string
|
||||
switch temporality {
|
||||
@@ -298,5 +299,12 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
}
|
||||
}
|
||||
return aggregationColumn
|
||||
if aggregationColumn == "" {
|
||||
return "", errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
|
||||
)
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
}
|
||||
|
||||
@@ -35,13 +35,7 @@ func (c *conditionBuilder) conditionFor(
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains,
|
||||
qbtypes.FilterOperatorNotContains,
|
||||
qbtypes.FilterOperatorILike,
|
||||
qbtypes.FilterOperatorNotILike,
|
||||
qbtypes.FilterOperatorLike,
|
||||
qbtypes.FilterOperatorNotLike:
|
||||
if operator.IsStringSearchOperator() {
|
||||
value = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -152,7 +152,9 @@ func (f FilterOperator) IsStringSearchOperator() bool {
|
||||
FilterOperatorILike,
|
||||
FilterOperatorNotILike,
|
||||
FilterOperatorLike,
|
||||
FilterOperatorNotLike:
|
||||
FilterOperatorNotLike,
|
||||
FilterOperatorRegexp,
|
||||
FilterOperatorNotRegexp:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
@@ -3,6 +3,7 @@ package querybuildertypesv5
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
@@ -174,3 +175,54 @@ func (q *QueryBuilderQuery[T]) Normalize() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Fast‑path (no fingerprint grouping)
|
||||
// canShortCircuitDelta returns true if we can use the optimized query
|
||||
// for the given query
|
||||
// This is used to avoid the group by fingerprint thus improving the performance
|
||||
// for certain queries
|
||||
// cases where we can short circuit:
|
||||
// 1. time aggregation = (rate|increase) and space aggregation = sum
|
||||
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
|
||||
//
|
||||
// 2. time aggregation = sum and space aggregation = sum
|
||||
// - sum of sums is same as sum of all values
|
||||
//
|
||||
// 3. time aggregation = min and space aggregation = min
|
||||
// - min of mins is same as min of all values
|
||||
//
|
||||
// 4. time aggregation = max and space aggregation = max
|
||||
// - max of maxs is same as max of all values
|
||||
//
|
||||
// 5. special case exphist, there is no need for per series/fingerprint aggregation
|
||||
// we can directly use the quantilesDDMerge function
|
||||
//
|
||||
// all of this is true only for delta metrics
|
||||
func CanShortCircuitDelta(metricAgg MetricAggregation) bool {
|
||||
|
||||
if metricAgg.Temporality != metrictypes.Delta {
|
||||
return false
|
||||
}
|
||||
|
||||
ta := metricAgg.TimeAggregation
|
||||
sa := metricAgg.SpaceAggregation
|
||||
|
||||
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) &&
|
||||
sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
|
||||
return true
|
||||
}
|
||||
if metricAgg.Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
var (
|
||||
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "field not found")
|
||||
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
|
||||
ErrBetweenValuesType = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values of the number type")
|
||||
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
|
||||
ErrUnsupportedOperator = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported operator")
|
||||
)
|
||||
|
||||
@@ -179,14 +179,6 @@ func (q *QueryBuilderQuery[T]) validateAggregations() error {
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
// Validate metric-specific aggregations
|
||||
if err := validateMetricAggregation(v); err != nil {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return wrapValidationError(err, aggId, "invalid metric %s: %s")
|
||||
}
|
||||
case TraceAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
@@ -803,85 +795,3 @@ func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) erro
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMetricAggregation validates metric-specific aggregation parameters
|
||||
func validateMetricAggregation(agg MetricAggregation) error {
|
||||
// we can't decide anything here without known temporality
|
||||
if agg.Temporality == metrictypes.Unknown {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate that rate/increase are only used with appropriate temporalities
|
||||
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
|
||||
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
|
||||
if agg.Temporality == metrictypes.Unspecified {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate percentile aggregations are only used with histogram types
|
||||
if agg.SpaceAggregation.IsPercentile() {
|
||||
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"percentile aggregation can only be used with histogram or summary metric types",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate time aggregation values
|
||||
validTimeAggregations := []metrictypes.TimeAggregation{
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
metrictypes.TimeAggregationLatest,
|
||||
metrictypes.TimeAggregationSum,
|
||||
metrictypes.TimeAggregationAvg,
|
||||
metrictypes.TimeAggregationMin,
|
||||
metrictypes.TimeAggregationMax,
|
||||
metrictypes.TimeAggregationCount,
|
||||
metrictypes.TimeAggregationCountDistinct,
|
||||
metrictypes.TimeAggregationRate,
|
||||
metrictypes.TimeAggregationIncrease,
|
||||
}
|
||||
|
||||
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
|
||||
if !validTimeAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation: %s",
|
||||
agg.TimeAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate space aggregation values
|
||||
validSpaceAggregations := []metrictypes.SpaceAggregation{
|
||||
metrictypes.SpaceAggregationUnspecified,
|
||||
metrictypes.SpaceAggregationSum,
|
||||
metrictypes.SpaceAggregationAvg,
|
||||
metrictypes.SpaceAggregationMin,
|
||||
metrictypes.SpaceAggregationMax,
|
||||
metrictypes.SpaceAggregationCount,
|
||||
metrictypes.SpaceAggregationPercentile50,
|
||||
metrictypes.SpaceAggregationPercentile75,
|
||||
metrictypes.SpaceAggregationPercentile90,
|
||||
metrictypes.SpaceAggregationPercentile95,
|
||||
metrictypes.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
|
||||
if !validSpaceAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation: %s",
|
||||
agg.SpaceAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -265,46 +265,6 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
ruleUnit: "",
|
||||
shouldAlert: true,
|
||||
},
|
||||
// bytes and Gibibytes,
|
||||
// rule will only fire if target is converted to bytes so that the sample value becomes lower than the target 100GiBy
|
||||
{
|
||||
name: "bytes to Gibibytes - should alert",
|
||||
threshold: BasicRuleThreshold{
|
||||
Name: CriticalThresholdName,
|
||||
TargetValue: &target, // 100 Gibibytes
|
||||
TargetUnit: "GiBy",
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsBelow,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 70 * 1024 * 1024 * 1024, Timestamp: 1000}, // 70 Gibibytes
|
||||
},
|
||||
},
|
||||
ruleUnit: "bytes",
|
||||
shouldAlert: true,
|
||||
},
|
||||
// data Rate conversion - bytes per second to MiB per second
|
||||
// rule will only fire if target is converted to bytes so that the sample value becomes lower than the target 100 MiB/s
|
||||
{
|
||||
name: "bytes per second to MiB per second - should alert",
|
||||
threshold: BasicRuleThreshold{
|
||||
Name: CriticalThresholdName,
|
||||
TargetValue: &target, // 100 MiB/s
|
||||
TargetUnit: "MiBy/s",
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsBelow,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 30 * 1024 * 1024, Timestamp: 1000}, // 30 MiB/s
|
||||
},
|
||||
},
|
||||
ruleUnit: "By/s",
|
||||
shouldAlert: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -14,10 +14,10 @@ from fixtures.alertutils import (
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
# test cases for match type and compare operators have wait time of 30 seconds to verify the alert expectation.
|
||||
# we've poistioned the alert data to fire the alert on first eval of rule manager, the eval frequency
|
||||
# for most alert rules are set of 15s so considering this delay plus some delay from alert manager's
|
||||
# group_wait and group_interval, even in worst case most alerts should be triggered in about 30 seconds
|
||||
# Alert test cases use a 30-second wait time to verify expected alert firing.
|
||||
# Alert data is set up to trigger on the first rule manager evaluation.
|
||||
# With a 15-second eval frequency for most rules, plus alertmanager's
|
||||
# group_wait and group_interval delays, alerts should fire well within 30 seconds.
|
||||
TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_above_at_least_once",
|
||||
@@ -25,6 +25,7 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
# active requests dummy data
|
||||
data_path="alerts/test_scenarios/threshold_above_at_least_once/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
@@ -115,30 +116,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last, pylint: disable=W0511
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_above_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_above_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_above_last",
|
||||
rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_above_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_below_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_below_at_least_once/rule.json",
|
||||
@@ -189,6 +188,7 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
# one rate ~5 + rest 0.01 so it remains in total below 10
|
||||
data_path="alerts/test_scenarios/threshold_below_in_total/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
@@ -227,30 +227,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_below_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_below_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_below_last",
|
||||
rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_below_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_equal_to_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_equal_to_at_least_once/rule.json",
|
||||
@@ -339,30 +337,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_equal_to_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_equal_to_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_equal_to_last",
|
||||
rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_equal_to_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_not_equal_to_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_not_equal_to_at_least_once/rule.json",
|
||||
@@ -451,30 +447,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_not_equal_to_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_not_equal_to_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_not_equal_to_last",
|
||||
rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_not_equal_to_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
# test cases unit conversion
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -54,17 +54,17 @@ def test_rate_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 59
|
||||
assert len(result_values) >= 58
|
||||
# the counter reset happened at 31st minute
|
||||
assert (
|
||||
result_values[30]["value"] == 0.0167
|
||||
result_values[29]["value"] == 0.0167
|
||||
) # i.e 2/120 i.e 29th to 31st minute changes
|
||||
assert (
|
||||
result_values[31]["value"] == 0.133
|
||||
result_values[30]["value"] == 0.133
|
||||
) # i.e 10/60 i.e 31st to 32nd minute changes
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_of_steady_rate >= 56
|
||||
count_of_steady_rate >= 55
|
||||
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
|
||||
@@ -72,16 +72,17 @@ def test_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 59
|
||||
assert len(result_values) >= 58
|
||||
# the counter reset happened at 31st minute
|
||||
assert result_values[30]["value"] == expected_value_at_31st_minute
|
||||
assert result_values[31]["value"] == expected_value_at_32nd_minute
|
||||
# we skip the rate value for the first data point without previous value
|
||||
assert result_values[29]["value"] == expected_value_at_31st_minute
|
||||
assert result_values[30]["value"] == expected_value_at_32nd_minute
|
||||
assert (
|
||||
result_values[39]["value"] == steady_value
|
||||
) # 39th minute is when cumulative shifts to delta
|
||||
result_values[38]["value"] == steady_value
|
||||
) # 38th minute is when cumulative shifts to delta
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == steady_value)
|
||||
assert (
|
||||
count_of_steady_rate >= 56
|
||||
count_of_steady_rate >= 55
|
||||
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
@@ -316,12 +317,12 @@ def test_for_service_with_switch(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 60
|
||||
assert result_values[30]["value"] == expected_value_at_30th_minute # 0.183
|
||||
assert result_values[31]["value"] == expected_value_at_31st_minute # 0.183
|
||||
assert result_values[38]["value"] == value_at_switch # 0.25
|
||||
assert len(result_values) >= 59
|
||||
assert result_values[29]["value"] == expected_value_at_30th_minute # 0.183
|
||||
assert result_values[30]["value"] == expected_value_at_31st_minute # 0.183
|
||||
assert result_values[37]["value"] == value_at_switch # 0.25
|
||||
assert (
|
||||
result_values[39]["value"] == value_at_switch # 0.25
|
||||
result_values[38]["value"] == value_at_switch # 0.25
|
||||
) # 39th minute is when cumulative shifts to delta
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":19,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":38,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"type": "clickhouse_sql",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= {{.start_timestamp_ms}} \n AND unix_milli < {{.end_timestamp_ms}} \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n avg(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
|
||||
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= $start_timestamp_ms \n AND unix_milli < $end_timestamp_ms \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n sum(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":46,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":76,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":12,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":23,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":30,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":50,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":55,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":70,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":34,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
Reference in New Issue
Block a user