diff --git a/.github/workflows/staging-deployment.yaml b/.github/workflows/staging-deployment.yaml index a2ff80354f..4918cf54d2 100644 --- a/.github/workflows/staging-deployment.yaml +++ b/.github/workflows/staging-deployment.yaml @@ -31,6 +31,7 @@ jobs: GCP_ZONE: ${{ secrets.GCP_ZONE }} GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }} CLOUDSDK_CORE_DISABLE_PROMPTS: 1 + KAFKA_SPAN_EVAL: true run: | read -r -d '' COMMAND < - SigNoz-logo +

+ + SigNoz + +
SigNoz +

-

Monitor your applications and troubleshoot problems in your deployed applications, an open-source alternative to DataDog, New Relic, etc.

-

+

All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit signoz.io for the full documentation, tutorials, and guide.

Downloads @@ -21,110 +24,99 @@ Twitter -## - -SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can: +## Features -πŸ‘‰ Visualise Metrics, Traces and Logs in a single pane of glass -πŸ‘‰ You can see metrics like p99 latency, error rates for your services, external API calls and individual end points. +### Application Performance Monitoring -πŸ‘‰ You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces. +Use SigNoz APM to monitor your applications and services. It comes with out-of-box charts for key application metrics like p99 latency, error rate, Apdex and operations per second. You can also monitor the database and external calls made from your application. Read [more](https://signoz.io/application-performance-monitoring/). -πŸ‘‰ Run aggregates on trace data to get business relevant metrics +You can [instrument](https://signoz.io/docs/instrumentation/) your application with OpenTelemetry to get started. -πŸ‘‰ Filter and query logs, build dashboards and alerts based on attributes in logs +![apm-cover](https://github.com/user-attachments/assets/fa5c0396-0854-4c8b-b972-9b62fd2a70d2) -πŸ‘‰ Record exceptions automatically in Python, Java, Ruby, and Javascript -πŸ‘‰ Easy to set alerts with DIY query builder +### Logs Management +SigNoz can be used as a centralized log management solution. We use ClickHouse (used by likes of Uber & Cloudflare) as a datastore, ⎯ an extremely fast and highly optimized storage for logs data. Instantly search through all your logs using quick filters and a powerful query builder. -### Application Metrics +You can also create charts on your logs and monitor them with customized dashboards. Read [more](https://signoz.io/log-management/). -![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png) +![logs-management-cover](https://github.com/user-attachments/assets/343588ee-98fb-4310-b3d2-c5bacf9c7384) ### Distributed Tracing -distributed_tracing_2 2 -distributed_tracing_1 +Distributed Tracing is essential to troubleshoot issues in microservices applications. Powered by OpenTelemetry, distributed tracing in SigNoz can help you track user requests across services to help you identify performance bottlenecks. -### Logs Management +See user requests in a detailed breakdown with the help of Flamegraphs and Gantt Charts. Click on any span to see the entire trace represented beautifully, which will help you make sense of where issues actually occurred in the flow of requests. -logs_management +Read [more](https://signoz.io/distributed-tracing/). -### Infrastructure Monitoring +![distributed-tracing-cover](https://github.com/user-attachments/assets/9bfe060a-0c40-4922-9b55-8a97e1a4076c) -infrastructure_monitoring -### Exceptions Monitoring -![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png) +### Metrics and Dashboards + +Ingest metrics from your infrastructure or applications and create customized dashboards to monitor them. Create visualization that suits your needs with a variety of panel types like pie chart, time-series, bar chart, etc. + +Create queries on your metrics data quickly with an easy-to-use metrics query builder. Add multiple queries and combine those queries with formulae to create really complex queries quickly. +Read [more](https://signoz.io/metrics-and-dashboards/). + +![metrics-n-dashboards-cover](https://github.com/user-attachments/assets/a536fd71-1d2c-4681-aa7e-516d754c47a5) ### Alerts -alerts_management +Use alerts in SigNoz to get notified when anything unusual happens in your application. You can set alerts on any type of telemetry signal (logs, metrics, traces), create thresholds and set up a notification channel to get notified. Advanced features like alert history and anomaly detection can help you create smarter alerts. +Alerts in SigNoz help you identify issues proactively so that you can address them before they reach your customers. -

+Read [more](https://signoz.io/alerts-management/). +![alerts-cover](https://github.com/user-attachments/assets/03873bb8-1b62-4adf-8f56-28bb7b1750ea) -## Join our Slack community +### Exceptions Monitoring -Come say Hi to us on [Slack](https://signoz.io/slack) πŸ‘‹ +Monitor exceptions automatically in Python, Java, Ruby, and Javascript. For other languages, just drop in a few lines of code and start monitoring exceptions. -

+See the detailed stack trace for all exceptions caught in your application. You can also log in custom attributes to add more context to your exceptions. For example, you can add attributes to identify users for which exceptions occurred. +Read [more](https://signoz.io/exceptions-monitoring/). -## Features: -- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack. -- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate -- Slowest endpoints in your application -- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc -- Filter traces by service name, operation, latency, error, tags/annotations. -- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal` -- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster -- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/)) -- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments -- Easy to set alerts with DIY query builder +![exceptions-cover](https://github.com/user-attachments/assets/4be37864-59f2-4e8a-8d6e-e29ad04298c5) -

+

## Why SigNoz? -Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency. +SigNoz is a single tool for all your monitoring and observability needs. Here are a few reasons why you should choose SigNoz: -We wanted to make a self-hosted & open source version of tools like DataDog, NewRelic for companies that have privacy and security concerns about having customer data going to third party services. +- Single tool for observability(logs, metrics, and traces) -Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities +- Built on top of [OpenTelemetry](https://opentelemetry.io/), the open-source standard which frees you from any type of vendor lock-in -### Languages supported: +- Correlated logs, metrics and traces for much richer context while debugging -We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are: +- Uses ClickHouse (used by likes of Uber & Cloudflare) as datastore - an extremely fast and highly optimized storage for observability data -- Java -- Python -- Node.js -- Go -- PHP -- .NET -- Ruby -- Elixir -- Rust +- DIY Query builder, PromQL, and ClickHouse queries to fulfill all your use-cases around querying observability data +- Open-Source - you can use open-source, our [cloud service](https://signoz.io/teams/) or a mix of both based on your use case -You can find the complete list of languages here - https://opentelemetry.io/docs/ -

+## Getting Started +### Create a SigNoz Cloud Account -## Getting Started +SigNoz cloud is the easiest way to get started with SigNoz. Our cloud service is for those users who want to spend more time in getting insights for their application performance without worrying about maintenance. + +[Get started for free](https://signoz.io/teams/) -### Deploy using Docker +### Deploy using Docker(self-hosted) Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker @@ -133,20 +125,51 @@ The [troubleshooting instructions](https://signoz.io/docs/install/troubleshootin

 

-### Deploy in Kubernetes using Helm +### Deploy in Kubernetes using Helm(self-hosted) Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts

+We also offer managed services in your infra. Check our [pricing plans](https://signoz.io/pricing/) for all details. + + +## Join our Slack community + +Come say Hi to us on [Slack](https://signoz.io/slack) πŸ‘‹ + +

+ + +### Languages supported: + +SigNoz supports all major programming languages for monitoring. Any framework and language supported by OpenTelemetry is supported by SigNoz. Find instructions for instrumenting different languages below: + +- [Java](https://signoz.io/docs/instrumentation/java/) +- [Python](https://signoz.io/docs/instrumentation/python/) +- [Node.js or Javascript](https://signoz.io/docs/instrumentation/javascript/) +- [Go](https://signoz.io/docs/instrumentation/golang/) +- [PHP](https://signoz.io/docs/instrumentation/php/) +- [.NET](https://signoz.io/docs/instrumentation/dotnet/) +- [Ruby](https://signoz.io/docs/instrumentation/ruby-on-rails/) +- [Elixir](https://signoz.io/docs/instrumentation/elixir/) +- [Rust](https://signoz.io/docs/instrumentation/rust/) +- [Swift](https://signoz.io/docs/instrumentation/swift/) + +You can find our entire documentation [here](https://signoz.io/docs/introduction/). + +

+ ## Comparisons to Familiar Tools ### SigNoz vs Prometheus -Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great. +Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics, logs and traces, then current experience of stitching together Prometheus & other tools is not great. + +SigNoz is a one-stop solution for metrics and other telemetry signals. And because you will use the same standard(OpenTelemetry) to collect all telemetry signals, you can also correlate these signals to troubleshoot quickly. -Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack. +For example, if you see that there are issues with infrastructure metrics of your k8s cluster at a timestamp, you can jump to other signals like logs and traces to understand the issue quickly.

 

@@ -158,6 +181,7 @@ Moreover, SigNoz has few more advanced features wrt Jaeger: - Jaegar UI doesn’t show any metrics on traces or on filtered traces - Jaeger can’t get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz +- You can also go from traces to logs easily in SigNoz

 

diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index a36fa711a4..8031f65547 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -146,7 +146,7 @@ services: condition: on-failure query-service: - image: signoz/query-service:0.56.0 + image: signoz/query-service:0.57.0 command: [ "-config=/root/config/prometheus.yml", @@ -186,7 +186,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:0.56.0 + image: signoz/frontend:0.57.0 deploy: restart_policy: condition: on-failure @@ -199,7 +199,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/signoz-otel-collector:0.102.12 + image: signoz/signoz-otel-collector:0.111.5 command: [ "--config=/etc/otel-collector-config.yaml", @@ -214,7 +214,6 @@ services: - /:/hostfs:ro environment: - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}} - - DOCKER_MULTI_NODE_CLUSTER=false - LOW_CARDINAL_EXCEPTION_GROUPING=false ports: # - "1777:1777" # pprof extension @@ -238,7 +237,7 @@ services: - query-service otel-collector-migrator: - image: signoz/signoz-schema-migrator:0.102.10 + image: signoz/signoz-schema-migrator:0.111.5 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml index a0859ef633..f10d0bb848 100644 --- a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml @@ -131,8 +131,7 @@ processors: exporters: clickhousetraces: datasource: tcp://clickhouse:9000/signoz_traces - docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} - low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING} + low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} clickhousemetricswrite: endpoint: tcp://clickhouse:9000/signoz_metrics resource_to_telemetry_conversion: @@ -142,7 +141,6 @@ exporters: # logging: {} clickhouselogsexporter: dsn: tcp://clickhouse:9000/signoz_logs - docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} timeout: 10s use_new_schema: true extensions: diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml index 0e902a9e28..ec9e0697fb 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml @@ -69,7 +69,7 @@ services: - --storage.path=/data otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -84,7 +84,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` otel-collector: container_name: signoz-otel-collector - image: signoz/signoz-otel-collector:0.102.12 + image: signoz/signoz-otel-collector:0.111.5 command: [ "--config=/etc/otel-collector-config.yaml", diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml index e12fb3831c..6b9b96b331 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml @@ -34,7 +34,7 @@ x-db-depend: &db-depend depends_on: clickhouse: condition: service_healthy - otel-collector-migrator: + otel-collector-migrator-sync: condition: service_completed_successfully # clickhouse-2: # condition: service_healthy @@ -162,7 +162,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.56.0} + image: signoz/query-service:${DOCKER_TAG:-0.57.0} container_name: signoz-query-service command: [ @@ -201,7 +201,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.56.0} + image: signoz/frontend:${DOCKER_TAG:-0.57.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -212,11 +212,13 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10} - container_name: otel-migrator + otel-collector-migrator-sync: + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5} + container_name: otel-migrator-sync command: + - "sync" - "--dsn=tcp://clickhouse:9000" + - "--up=" depends_on: clickhouse: condition: service_healthy @@ -225,9 +227,25 @@ services: # clickhouse-3: # condition: service_healthy + otel-collector-migrator-async: + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5} + container_name: otel-migrator-async + command: + - "async" + - "--dsn=tcp://clickhouse:9000" + - "--up=" + depends_on: + clickhouse: + condition: service_healthy + otel-collector-migrator-sync: + condition: service_completed_successfully + # clickhouse-2: + # condition: service_healthy + # clickhouse-3: + # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.12} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5} container_name: signoz-otel-collector command: [ @@ -244,7 +262,6 @@ services: - /:/hostfs:ro environment: - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - - DOCKER_MULTI_NODE_CLUSTER=false - LOW_CARDINAL_EXCEPTION_GROUPING=false ports: # - "1777:1777" # pprof extension @@ -262,7 +279,7 @@ services: depends_on: clickhouse: condition: service_healthy - otel-collector-migrator: + otel-collector-migrator-sync: condition: service_completed_successfully query-service: condition: service_healthy diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index 2bf0996da4..d8c0db25ac 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -167,7 +167,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.56.0} + image: signoz/query-service:${DOCKER_TAG:-0.57.0} container_name: signoz-query-service command: [ @@ -207,7 +207,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.56.0} + image: signoz/frontend:${DOCKER_TAG:-0.57.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -219,7 +219,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -233,7 +233,7 @@ services: otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.12} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5} container_name: signoz-otel-collector command: [ @@ -250,7 +250,6 @@ services: - /:/hostfs:ro environment: - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - - DOCKER_MULTI_NODE_CLUSTER=false - LOW_CARDINAL_EXCEPTION_GROUPING=false ports: # - "1777:1777" # pprof extension diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index 72e4f3dcc7..8fef0af791 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -142,8 +142,7 @@ extensions: exporters: clickhousetraces: datasource: tcp://clickhouse:9000/signoz_traces - docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} - low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING} + low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} clickhousemetricswrite: endpoint: tcp://clickhouse:9000/signoz_metrics resource_to_telemetry_conversion: @@ -152,7 +151,6 @@ exporters: endpoint: tcp://clickhouse:9000/signoz_metrics clickhouselogsexporter: dsn: tcp://clickhouse:9000/signoz_logs - docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} timeout: 10s use_new_schema: true # logging: {} diff --git a/ee/query-service/app/api/gateway.go b/ee/query-service/app/api/gateway.go index 15d274ee23..1154911a26 100644 --- a/ee/query-service/app/api/gateway.go +++ b/ee/query-service/app/api/gateway.go @@ -9,7 +9,15 @@ import ( func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) { ctx := req.Context() - if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) { + validPath := false + for _, allowedPrefix := range gateway.AllowedPrefix { + if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) { + validPath = true + break + } + } + + if !validPath { rw.WriteHeader(http.StatusNotFound) return } diff --git a/ee/query-service/app/api/queryrange.go b/ee/query-service/app/api/queryrange.go index f5ad490ed5..6047eeeb10 100644 --- a/ee/query-service/app/api/queryrange.go +++ b/ee/query-service/app/api/queryrange.go @@ -53,7 +53,11 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) { if anomalyQueryExists { // ensure all queries have metric data source, and there should be only one anomaly query for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { - if query.DataSource != v3.DataSourceMetrics { + // What is query.QueryName == query.Expression doing here? + // In the current implementation, the way to recognize if a query is a formula is by + // checking if the expression is the same as the query name. if the expression is different + // then it is a formula. otherwise, it is simple builder query. + if query.DataSource != v3.DataSourceMetrics && query.QueryName == query.Expression { RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil) return } @@ -100,6 +104,13 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) { anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector), anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags), ) + default: + provider = anomaly.NewDailyProvider( + anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache), + anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector), + anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags), + ) } anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams}) if err != nil { diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 7468be4698..cf54693ba8 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -364,6 +364,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e apiHandler.RegisterLogsRoutes(r, am) apiHandler.RegisterIntegrationRoutes(r, am) apiHandler.RegisterQueryRangeV3Routes(r, am) + apiHandler.RegisterInfraMetricsRoutes(r, am) apiHandler.RegisterQueryRangeV4Routes(r, am) apiHandler.RegisterWebSocketPaths(r, am) apiHandler.RegisterMessagingQueuesRoutes(r, am) diff --git a/ee/query-service/integrations/gateway/proxy.go b/ee/query-service/integrations/gateway/proxy.go index 8b225c4459..06cb08c33a 100644 --- a/ee/query-service/integrations/gateway/proxy.go +++ b/ee/query-service/integrations/gateway/proxy.go @@ -8,9 +8,9 @@ import ( "strings" ) -const ( - RoutePrefix string = "/api/gateway" - AllowedPrefix string = "/v1/workspaces/me" +var ( + RoutePrefix string = "/api/gateway" + AllowedPrefix []string = []string{"/v1/workspaces/me", "/v2/profiles/me", "/v2/deployments/me"} ) type proxy struct { diff --git a/frontend/package.json b/frontend/package.json index a9119d0e63..ad43546550 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -68,7 +68,7 @@ "css-loader": "5.0.0", "css-minimizer-webpack-plugin": "5.0.1", "dayjs": "^1.10.7", - "dompurify": "3.0.0", + "dompurify": "3.1.3", "dotenv": "8.2.0", "event-source-polyfill": "1.0.31", "eventemitter3": "5.0.1", @@ -239,6 +239,7 @@ "debug": "4.3.4", "semver": "7.5.4", "xml2js": "0.5.0", - "phin": "^3.7.1" + "phin": "^3.7.1", + "body-parser": "1.20.3" } } diff --git a/frontend/public/Images/signoz-hero-image.webp b/frontend/public/Images/signoz-hero-image.webp new file mode 100644 index 0000000000..b659b5b2e4 Binary files /dev/null and b/frontend/public/Images/signoz-hero-image.webp differ diff --git a/frontend/public/css/uPlot.min.css b/frontend/public/css/uPlot.min.css new file mode 100644 index 0000000000..f978c7ac02 --- /dev/null +++ b/frontend/public/css/uPlot.min.css @@ -0,0 +1 @@ +.uplot, .uplot *, .uplot *::before, .uplot *::after {box-sizing: border-box;}.uplot {font-family: system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";line-height: 1.5;width: min-content;}.u-title {text-align: center;font-size: 18px;font-weight: bold;}.u-wrap {position: relative;user-select: none;}.u-over, .u-under {position: absolute;}.u-under {overflow: hidden;}.uplot canvas {display: block;position: relative;width: 100%;height: 100%;}.u-axis {position: absolute;}.u-legend {font-size: 14px;margin: auto;text-align: center;}.u-inline {display: block;}.u-inline * {display: inline-block;}.u-inline tr {margin-right: 16px;}.u-legend th {font-weight: 600;}.u-legend th > * {vertical-align: middle;display: inline-block;}.u-legend .u-marker {width: 1em;height: 1em;margin-right: 4px;background-clip: padding-box !important;}.u-inline.u-live th::after {content: ":";vertical-align: middle;}.u-inline:not(.u-live) .u-value {display: none;}.u-series > * {padding: 4px;}.u-series th {cursor: pointer;}.u-legend .u-off > * {opacity: 0.3;}.u-select {background: rgba(0,0,0,0.07);position: absolute;pointer-events: none;}.u-cursor-x, .u-cursor-y {position: absolute;left: 0;top: 0;pointer-events: none;will-change: transform;}.u-hz .u-cursor-x, .u-vt .u-cursor-y {height: 100%;border-right: 1px dashed #607D8B;}.u-hz .u-cursor-y, .u-vt .u-cursor-x {width: 100%;border-bottom: 1px dashed #607D8B;}.u-cursor-pt {position: absolute;top: 0;left: 0;border-radius: 50%;border: 0 solid;pointer-events: none;will-change: transform;/*this has to be !important since we set inline "background" shorthand */background-clip: padding-box !important;}.u-axis.u-off, .u-select.u-off, .u-cursor-x.u-off, .u-cursor-y.u-off, .u-cursor-pt.u-off {display: none;} diff --git a/frontend/public/fonts/FiraCode-VariableFont_wght.ttf b/frontend/public/fonts/FiraCode-VariableFont_wght.ttf new file mode 100644 index 0000000000..5655ed5145 Binary files /dev/null and b/frontend/public/fonts/FiraCode-VariableFont_wght.ttf differ diff --git a/frontend/public/fonts/Inter-VariableFont_opsz,wght.ttf b/frontend/public/fonts/Inter-VariableFont_opsz,wght.ttf new file mode 100644 index 0000000000..e31b51e3e9 Binary files /dev/null and b/frontend/public/fonts/Inter-VariableFont_opsz,wght.ttf differ diff --git a/frontend/public/fonts/SpaceMono-Regular.ttf b/frontend/public/fonts/SpaceMono-Regular.ttf new file mode 100644 index 0000000000..04e56b923f Binary files /dev/null and b/frontend/public/fonts/SpaceMono-Regular.ttf differ diff --git a/frontend/public/fonts/WorkSans-VariableFont_wght.ttf b/frontend/public/fonts/WorkSans-VariableFont_wght.ttf new file mode 100644 index 0000000000..9a827989bd Binary files /dev/null and b/frontend/public/fonts/WorkSans-VariableFont_wght.ttf differ diff --git a/frontend/public/locales/en-GB/alerts.json b/frontend/public/locales/en-GB/alerts.json index 5c5c3b851e..8dd0ccbe47 100644 --- a/frontend/public/locales/en-GB/alerts.json +++ b/frontend/public/locales/en-GB/alerts.json @@ -56,6 +56,7 @@ "option_last": "last", "option_above": "above", "option_below": "below", + "option_above_below": "above/below", "option_equal": "is equal to", "option_notequal": "not equal to", "button_query": "Query", @@ -110,6 +111,8 @@ "choose_alert_type": "Choose a type for the alert", "metric_based_alert": "Metric based Alert", "metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.", + "anomaly_based_alert": "Anomaly based Alert", + "anomaly_based_alert_desc": "Send a notification when a condition occurs in the metric data.", "log_based_alert": "Log-based Alert", "log_based_alert_desc": "Send a notification when a condition occurs in the logs data.", "traces_based_alert": "Trace-based Alert", diff --git a/frontend/public/locales/en-GB/rules.json b/frontend/public/locales/en-GB/rules.json index 9ac3641c7a..63ae437d7f 100644 --- a/frontend/public/locales/en-GB/rules.json +++ b/frontend/public/locales/en-GB/rules.json @@ -43,6 +43,7 @@ "option_last": "last", "option_above": "above", "option_below": "below", + "option_above_below": "above/below", "option_equal": "is equal to", "option_notequal": "not equal to", "button_query": "Query", diff --git a/frontend/public/locales/en/alerts.json b/frontend/public/locales/en/alerts.json index 3ad8390731..6adeb7382b 100644 --- a/frontend/public/locales/en/alerts.json +++ b/frontend/public/locales/en/alerts.json @@ -13,9 +13,12 @@ "button_no": "No", "remove_label_confirm": "This action will remove all the labels. Do you want to proceed?", "remove_label_success": "Labels cleared", - "alert_form_step1": "Step 1 - Define the metric", - "alert_form_step2": "Step 2 - Define Alert Conditions", - "alert_form_step3": "Step 3 - Alert Configuration", + "alert_form_step1": "Choose a detection method", + "alert_form_step2": "Define the metric", + "alert_form_step3": "Define Alert Conditions", + "alert_form_step4": "Alert Configuration", + "threshold_alert_desc": "An alert is triggered whenever a metric deviates from an expected threshold.", + "anomaly_detection_alert_desc": "An alert is triggered whenever a metric deviates from an expected pattern.", "metric_query_max_limit": "Can not create query. You can create maximum of 5 queries", "confirm_save_title": "Save Changes", "confirm_save_content_part1": "Your alert built with", @@ -35,6 +38,7 @@ "button_cancelchanges": "Cancel", "button_discard": "Discard", "text_condition1": "Send a notification when", + "text_condition1_anomaly": "Send notification when the observed value for", "text_condition2": "the threshold", "text_condition3": "during the last", "option_1min": "1 min", @@ -56,6 +60,7 @@ "option_last": "last", "option_above": "above", "option_below": "below", + "option_above_below": "above/below", "option_equal": "is equal to", "option_notequal": "not equal to", "button_query": "Query", @@ -109,7 +114,9 @@ "user_tooltip_more_help": "More details on how to create alerts", "choose_alert_type": "Choose a type for the alert", "metric_based_alert": "Metric based Alert", + "anomaly_based_alert": "Anomaly based Alert", "metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.", + "anomaly_based_alert_desc": "Send a notification when a condition occurs in the metric data.", "log_based_alert": "Log-based Alert", "log_based_alert_desc": "Send a notification when a condition occurs in the logs data.", "traces_based_alert": "Trace-based Alert", diff --git a/frontend/public/locales/en/rules.json b/frontend/public/locales/en/rules.json index 9ac3641c7a..63ae437d7f 100644 --- a/frontend/public/locales/en/rules.json +++ b/frontend/public/locales/en/rules.json @@ -43,6 +43,7 @@ "option_last": "last", "option_above": "above", "option_below": "below", + "option_above_below": "above/below", "option_equal": "is equal to", "option_notequal": "not equal to", "button_query": "Query", diff --git a/frontend/src/api/preferences/getAllOrgPreferences.ts b/frontend/src/api/preferences/getAllOrgPreferences.ts new file mode 100644 index 0000000000..12f6497c0e --- /dev/null +++ b/frontend/src/api/preferences/getAllOrgPreferences.ts @@ -0,0 +1,18 @@ +import axios from 'api'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { GetAllOrgPreferencesResponseProps } from 'types/api/preferences/userOrgPreferences'; + +const getAllOrgPreferences = async (): Promise< + SuccessResponse | ErrorResponse +> => { + const response = await axios.get(`/org/preferences`); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; +}; + +export default getAllOrgPreferences; diff --git a/frontend/src/api/preferences/getAllUserPreference.ts b/frontend/src/api/preferences/getAllUserPreference.ts new file mode 100644 index 0000000000..f7e94f76cb --- /dev/null +++ b/frontend/src/api/preferences/getAllUserPreference.ts @@ -0,0 +1,18 @@ +import axios from 'api'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { GetAllUserPreferencesResponseProps } from 'types/api/preferences/userOrgPreferences'; + +const getAllUserPreferences = async (): Promise< + SuccessResponse | ErrorResponse +> => { + const response = await axios.get(`/user/preferences`); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; +}; + +export default getAllUserPreferences; diff --git a/frontend/src/api/preferences/getOrgPreference.ts b/frontend/src/api/preferences/getOrgPreference.ts new file mode 100644 index 0000000000..6a016ddd3a --- /dev/null +++ b/frontend/src/api/preferences/getOrgPreference.ts @@ -0,0 +1,20 @@ +import axios from 'api'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { GetOrgPreferenceResponseProps } from 'types/api/preferences/userOrgPreferences'; + +const getOrgPreference = async ({ + preferenceID, +}: { + preferenceID: string; +}): Promise | ErrorResponse> => { + const response = await axios.get(`/org/preferences/${preferenceID}`); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; +}; + +export default getOrgPreference; diff --git a/frontend/src/api/preferences/getUserPreference.ts b/frontend/src/api/preferences/getUserPreference.ts new file mode 100644 index 0000000000..a750732a92 --- /dev/null +++ b/frontend/src/api/preferences/getUserPreference.ts @@ -0,0 +1,22 @@ +import axios from 'api'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { GetUserPreferenceResponseProps } from 'types/api/preferences/userOrgPreferences'; + +const getUserPreference = async ({ + preferenceID, +}: { + preferenceID: string; +}): Promise< + SuccessResponse | ErrorResponse +> => { + const response = await axios.get(`/user/preferences/${preferenceID}`); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; +}; + +export default getUserPreference; diff --git a/frontend/src/api/preferences/updateOrgPreference.ts b/frontend/src/api/preferences/updateOrgPreference.ts new file mode 100644 index 0000000000..76e5a68640 --- /dev/null +++ b/frontend/src/api/preferences/updateOrgPreference.ts @@ -0,0 +1,25 @@ +import axios from 'api'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { + UpdateOrgPreferenceProps, + UpdateOrgPreferenceResponseProps, +} from 'types/api/preferences/userOrgPreferences'; + +const updateOrgPreference = async ( + preferencePayload: UpdateOrgPreferenceProps, +): Promise< + SuccessResponse | ErrorResponse +> => { + const response = await axios.put(`/org/preferences`, { + preference_value: preferencePayload.value, + }); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data.data, + }; +}; + +export default updateOrgPreference; diff --git a/frontend/src/api/preferences/updateUserPreference.ts b/frontend/src/api/preferences/updateUserPreference.ts new file mode 100644 index 0000000000..5b6b0427d6 --- /dev/null +++ b/frontend/src/api/preferences/updateUserPreference.ts @@ -0,0 +1,25 @@ +import axios from 'api'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { + UpdateUserPreferenceProps, + UpdateUserPreferenceResponseProps, +} from 'types/api/preferences/userOrgPreferences'; + +const updateUserPreference = async ( + preferencePayload: UpdateUserPreferenceProps, +): Promise< + SuccessResponse | ErrorResponse +> => { + const response = await axios.put(`/user/preferences`, { + preference_value: preferencePayload.value, + }); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data.data, + }; +}; + +export default updateUserPreference; diff --git a/frontend/src/components/LaunchChatSupport/util.ts b/frontend/src/components/LaunchChatSupport/util.ts index 8b610b11cc..ebf40eadb1 100644 --- a/frontend/src/components/LaunchChatSupport/util.ts +++ b/frontend/src/components/LaunchChatSupport/util.ts @@ -1,46 +1,3 @@ -import { PANEL_TYPES } from 'constants/queryBuilder'; -import { AlertDef } from 'types/api/alerts/def'; -import { Dashboard, DashboardData } from 'types/api/dashboard/getAll'; - -export const chartHelpMessage = ( - selectedDashboard: Dashboard | undefined, - graphType: PANEL_TYPES, -): string => ` -Hi Team, - -I need help in creating this chart. Here are my dashboard details - -Name: ${selectedDashboard?.data.title || ''} -Panel type: ${graphType} -Dashboard Id: ${selectedDashboard?.uuid || ''} - -Thanks`; - -export const dashboardHelpMessage = ( - data: DashboardData | undefined, - selectedDashboard: Dashboard | undefined, -): string => ` -Hi Team, - -I need help with this dashboard. Here are my dashboard details - -Name: ${data?.title || ''} -Dashboard Id: ${selectedDashboard?.uuid || ''} - -Thanks`; - -export const dashboardListMessage = `Hi Team, - -I need help with dashboards. - -Thanks`; - -export const listAlertMessage = `Hi Team, - -I need help with managing alerts. - -Thanks`; - export const onboardingHelpMessage = ( dataSourceName: string, moduleId: string, @@ -55,35 +12,3 @@ Module: ${moduleId} Thanks `; - -export const alertHelpMessage = ( - alertDef: AlertDef, - ruleId: number, -): string => ` -Hi Team, - -I need help in configuring this alert. Here are my alert rule details - -Name: ${alertDef?.alert || ''} -Alert Type: ${alertDef?.alertType || ''} -State: ${(alertDef as any)?.state || ''} -Alert Id: ${ruleId} - -Thanks`; - -export const integrationsListMessage = `Hi Team, - -I need help with Integrations. - -Thanks`; - -export const integrationDetailMessage = ( - selectedIntegration: string, -): string => ` -Hi Team, - -I need help in configuring this integration. - -Integration Id: ${selectedIntegration} - -Thanks`; diff --git a/frontend/src/components/LogDetail/index.tsx b/frontend/src/components/LogDetail/index.tsx index 4748312ceb..c216694c35 100644 --- a/frontend/src/components/LogDetail/index.tsx +++ b/frontend/src/components/LogDetail/index.tsx @@ -129,6 +129,7 @@ function LogDetail({ return ( diff --git a/frontend/src/components/Logs/ListLogView/index.tsx b/frontend/src/components/Logs/ListLogView/index.tsx index ed2627552d..8d5c0118cd 100644 --- a/frontend/src/components/Logs/ListLogView/index.tsx +++ b/frontend/src/components/Logs/ListLogView/index.tsx @@ -195,21 +195,20 @@ function ListLogView({ return ( <>
- +
` width: 100% !important; @@ -41,13 +42,8 @@ export const Container = styled(Card)<{ ? `padding:0.3rem 0.6rem;` : ``} - ${({ $isActiveLog, $isDarkMode }): string => - $isActiveLog - ? `background-color: ${ - $isDarkMode ? Color.BG_SLATE_500 : Color.BG_VANILLA_300 - } !important` - : ''} - } + ${({ $isActiveLog, $isDarkMode, $logType }): string => + getActiveLogBackground($isActiveLog, $isDarkMode, $logType)} `; export const Text = styled(Typography.Text)` diff --git a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss index 2260bf5aa3..5c2720e954 100644 --- a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss +++ b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss @@ -41,10 +41,4 @@ background-color: var(--bg-sakura-500); } } - - &.isActive { - .line { - background-color: var(--bg-robin-400, #7190f9); - } - } } diff --git a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.test.tsx b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.test.tsx index 06cc9d3ec4..5ecddd5959 100644 --- a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.test.tsx +++ b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.test.tsx @@ -17,14 +17,6 @@ describe('LogStateIndicator', () => { ); }); - it('renders correctly when isActive is true', () => { - const { container } = render( - , - ); - const indicator = container.firstChild as HTMLElement; - expect(indicator.classList.contains('isActive')).toBe(true); - }); - it('renders correctly with different types', () => { const { container: containerInfo } = render( , diff --git a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.tsx b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.tsx index b9afa5b7a2..f831c6252a 100644 --- a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.tsx +++ b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.tsx @@ -44,22 +44,16 @@ export const LogType = { function LogStateIndicator({ type, - isActive, fontSize, }: { type: string; fontSize: FontSize; - isActive?: boolean; }): JSX.Element { return ( -
+
); } -LogStateIndicator.defaultProps = { - isActive: false, -}; - export default LogStateIndicator; diff --git a/frontend/src/components/Logs/RawLogView/index.tsx b/frontend/src/components/Logs/RawLogView/index.tsx index 2cda9c7247..c8246ad120 100644 --- a/frontend/src/components/Logs/RawLogView/index.tsx +++ b/frontend/src/components/Logs/RawLogView/index.tsx @@ -162,20 +162,15 @@ function RawLogView({ $isDarkMode={isDarkMode} $isReadOnly={isReadOnly} $isHightlightedLog={isHighlighted} - $isActiveLog={isActiveLog} + $isActiveLog={ + activeLog?.id === data.id || activeContextLog?.id === data.id || isActiveLog + } + $logType={logType} onMouseEnter={handleMouseEnter} onMouseLeave={handleMouseLeave} fontSize={fontSize} > - + ` position: relative; @@ -34,11 +35,12 @@ export const RawLogViewContainer = styled(Row)<{ : `margin: 2px 0;`} } - ${({ $isActiveLog }): string => getActiveLogBackground($isActiveLog)} + ${({ $isActiveLog, $logType }): string => + getActiveLogBackground($isActiveLog, true, $logType)} - ${({ $isReadOnly, $isActiveLog, $isDarkMode }): string => + ${({ $isReadOnly, $isActiveLog, $isDarkMode, $logType }): string => $isActiveLog - ? getActiveLogBackground($isActiveLog, $isDarkMode) + ? getActiveLogBackground($isActiveLog, $isDarkMode, $logType) : getDefaultLogBackground($isReadOnly, $isDarkMode)} ${({ $isHightlightedLog, $isDarkMode }): string => diff --git a/frontend/src/components/Logs/TableView/useTableView.tsx b/frontend/src/components/Logs/TableView/useTableView.tsx index 43b4ba2628..662686e67d 100644 --- a/frontend/src/components/Logs/TableView/useTableView.tsx +++ b/frontend/src/components/Logs/TableView/useTableView.tsx @@ -35,8 +35,6 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => { linesPerRow, fontSize, appendTo = 'center', - activeContextLog, - activeLog, isListViewPanel, } = props; @@ -90,9 +88,6 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
@@ -130,16 +125,7 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => { }, ...(appendTo === 'end' ? fieldColumns : []), ]; - }, [ - fields, - isListViewPanel, - appendTo, - isDarkMode, - linesPerRow, - activeLog?.id, - activeContextLog?.id, - fontSize, - ]); + }, [fields, isListViewPanel, appendTo, isDarkMode, linesPerRow, fontSize]); return { columns, dataSource: flattenLogData }; }; diff --git a/frontend/src/components/ResizeTable/DynamicColumnTable.tsx b/frontend/src/components/ResizeTable/DynamicColumnTable.tsx index 53cccbe546..d8b6838788 100644 --- a/frontend/src/components/ResizeTable/DynamicColumnTable.tsx +++ b/frontend/src/components/ResizeTable/DynamicColumnTable.tsx @@ -107,6 +107,7 @@ function DynamicColumnTable({ className="dynamicColumnTable-button filter-btn" size="middle" icon={} + data-testid="additional-filters-button" /> )} diff --git a/frontend/src/constants/alerts.ts b/frontend/src/constants/alerts.ts index 3565ded3d7..425926ea47 100644 --- a/frontend/src/constants/alerts.ts +++ b/frontend/src/constants/alerts.ts @@ -2,6 +2,7 @@ import { AlertTypes } from 'types/api/alerts/alertTypes'; import { DataSource } from 'types/common/queryBuilder'; export const ALERTS_DATA_SOURCE_MAP: Record = { + [AlertTypes.ANOMALY_BASED_ALERT]: DataSource.METRICS, [AlertTypes.METRICS_BASED_ALERT]: DataSource.METRICS, [AlertTypes.LOGS_BASED_ALERT]: DataSource.LOGS, [AlertTypes.TRACES_BASED_ALERT]: DataSource.TRACES, diff --git a/frontend/src/constants/features.ts b/frontend/src/constants/features.ts index 769522455d..9a2550ec0b 100644 --- a/frontend/src/constants/features.ts +++ b/frontend/src/constants/features.ts @@ -22,4 +22,5 @@ export enum FeatureKeys { GATEWAY = 'GATEWAY', PREMIUM_SUPPORT = 'PREMIUM_SUPPORT', QUERY_BUILDER_SEARCH_V2 = 'QUERY_BUILDER_SEARCH_V2', + ANOMALY_DETECTION = 'ANOMALY_DETECTION', } diff --git a/frontend/src/constants/query.ts b/frontend/src/constants/query.ts index 3ee0a39634..2214e9487c 100644 --- a/frontend/src/constants/query.ts +++ b/frontend/src/constants/query.ts @@ -36,4 +36,5 @@ export enum QueryParams { topic = 'topic', partition = 'partition', selectedTimelineQuery = 'selectedTimelineQuery', + ruleType = 'ruleType', } diff --git a/frontend/src/constants/queryFunctionOptions.ts b/frontend/src/constants/queryFunctionOptions.ts index 4a7b3b0413..0069646c40 100644 --- a/frontend/src/constants/queryFunctionOptions.ts +++ b/frontend/src/constants/queryFunctionOptions.ts @@ -67,6 +67,10 @@ export const metricQueryFunctionOptions: SelectOption[] = [ value: QueryFunctionsTypes.TIME_SHIFT, label: 'Time Shift', }, + { + value: QueryFunctionsTypes.TIME_SHIFT, + label: 'Time Shift', + }, ]; export const logsQueryFunctionOptions: SelectOption[] = [ @@ -80,10 +84,15 @@ interface QueryFunctionConfigType { showInput: boolean; inputType?: string; placeholder?: string; + disabled?: boolean; }; } export const queryFunctionsTypesConfig: QueryFunctionConfigType = { + anomaly: { + showInput: false, + disabled: true, + }, cutOffMin: { showInput: true, inputType: 'text', diff --git a/frontend/src/container/AnomalyAlertEvaluationView/AnomalyAlertEvaluationView.styles.scss b/frontend/src/container/AnomalyAlertEvaluationView/AnomalyAlertEvaluationView.styles.scss new file mode 100644 index 0000000000..32f42edf6a --- /dev/null +++ b/frontend/src/container/AnomalyAlertEvaluationView/AnomalyAlertEvaluationView.styles.scss @@ -0,0 +1,180 @@ +.anomaly-alert-evaluation-view { + display: flex; + flex-direction: row; + justify-content: space-between; + gap: 8px; + width: 100%; + height: 100%; + + .anomaly-alert-evaluation-view-chart-section { + height: 100%; + width: 100%; + + display: flex; + justify-content: center; + align-items: center; + + &.has-multi-series-data { + width: calc(100% - 240px); + } + + .anomaly-alert-evaluation-view-no-data-container { + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + gap: 8px; + } + } + + .anomaly-alert-evaluation-view-series-selection { + display: flex; + flex-direction: column; + gap: 8px; + width: 240px; + padding: 0px 8px; + height: 100%; + + .anomaly-alert-evaluation-view-series-list { + display: flex; + flex-direction: column; + gap: 8px; + height: 100%; + + .anomaly-alert-evaluation-view-series-list-search { + margin-bottom: 16px; + } + + .anomaly-alert-evaluation-view-series-list-title { + margin-top: 12px; + font-size: 13px !important; + font-weight: 400; + } + + .anomaly-alert-evaluation-view-series-list-items { + display: flex; + flex-direction: column; + gap: 8px; + height: 100%; + overflow-y: auto; + + .anomaly-alert-evaluation-view-series-list-item { + display: flex; + flex-direction: row; + gap: 8px; + + .anomaly-alert-evaluation-view-series-list-item-color { + width: 6px; + height: 6px; + border-radius: 50%; + + display: inline-flex; + margin-right: 8px; + vertical-align: middle; + } + + cursor: pointer; + } + + &::-webkit-scrollbar { + width: 0.1rem; + } + &::-webkit-scrollbar-corner { + background: transparent; + } + &::-webkit-scrollbar-thumb { + background: rgb(136, 136, 136); + border-radius: 0.625rem; + } + &::-webkit-scrollbar-track { + background: transparent; + } + } + } + } + + .uplot { + .u-title { + text-align: center; + font-size: 18px; + font-weight: 400; + display: flex; + height: 40px; + font-size: 13px; + align-items: center; + } + + .u-legend { + display: flex; + margin-top: 16px; + + tbody { + width: 100%; + + .u-series { + display: inline-flex; + } + } + } + } +} + +.uplot-tooltip { + background-color: rgba(0, 0, 0, 0.9); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + color: #ddd; + font-size: 13px; + line-height: 1.4; + padding: 8px 12px; + pointer-events: none; + position: absolute; + z-index: 100; + max-height: 500px; + width: 280px; + overflow-y: auto; + display: none; /* Hide tooltip by default */ + + &::-webkit-scrollbar { + width: 0.3rem; + } + &::-webkit-scrollbar-corner { + background: transparent; + } + &::-webkit-scrollbar-thumb { + background: rgb(136, 136, 136); + border-radius: 0.625rem; + } + &::-webkit-scrollbar-track { + background: transparent; + } +} + +.uplot-tooltip-title { + font-weight: bold; + margin-bottom: 4px; +} + +.uplot-tooltip-series { + display: flex; + gap: 4px; + padding: 4px 0px; + align-items: center; +} + +.uplot-tooltip-series-name { + margin-right: 4px; +} + +.uplot-tooltip-band { + font-style: italic; + color: #666; +} + +.uplot-tooltip-marker { + display: inline-block; + width: 6px; + height: 6px; + border-radius: 50%; + margin-right: 8px; + vertical-align: middle; +} diff --git a/frontend/src/container/AnomalyAlertEvaluationView/AnomalyAlertEvaluationView.tsx b/frontend/src/container/AnomalyAlertEvaluationView/AnomalyAlertEvaluationView.tsx new file mode 100644 index 0000000000..88eff524c6 --- /dev/null +++ b/frontend/src/container/AnomalyAlertEvaluationView/AnomalyAlertEvaluationView.tsx @@ -0,0 +1,363 @@ +import 'uplot/dist/uPlot.min.css'; +import './AnomalyAlertEvaluationView.styles.scss'; + +import { Checkbox, Typography } from 'antd'; +import Search from 'antd/es/input/Search'; +import { useIsDarkMode } from 'hooks/useDarkMode'; +import useDebouncedFn from 'hooks/useDebouncedFunction'; +import { useResizeObserver } from 'hooks/useDimensions'; +import getAxes from 'lib/uPlotLib/utils/getAxes'; +import { getUplotChartDataForAnomalyDetection } from 'lib/uPlotLib/utils/getUplotChartData'; +import { getYAxisScaleForAnomalyDetection } from 'lib/uPlotLib/utils/getYAxisScale'; +import { LineChart } from 'lucide-react'; +import { useEffect, useRef, useState } from 'react'; +import uPlot from 'uplot'; + +import tooltipPlugin from './tooltipPlugin'; + +function UplotChart({ + data, + options, + chartRef, +}: { + data: any; + options: any; + chartRef: any; +}): JSX.Element { + const plotInstance = useRef(null); + + useEffect(() => { + if (plotInstance.current) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + plotInstance.current.destroy(); + } + + if (data && data.length > 0) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + // eslint-disable-next-line new-cap + plotInstance.current = new uPlot(options, data, chartRef.current); + } + + return (): void => { + if (plotInstance.current) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + plotInstance.current.destroy(); + } + }; + }, [data, options, chartRef]); + + return
; +} + +function AnomalyAlertEvaluationView({ + data, + yAxisUnit, +}: { + data: any; + yAxisUnit: string; +}): JSX.Element { + const { spline } = uPlot.paths; + // eslint-disable-next-line @typescript-eslint/naming-convention + const _spline = spline ? spline() : undefined; + const chartRef = useRef(null); + const isDarkMode = useIsDarkMode(); + const [seriesData, setSeriesData] = useState({}); + const [selectedSeries, setSelectedSeries] = useState(null); + + const [filteredSeriesKeys, setFilteredSeriesKeys] = useState([]); + const [allSeries, setAllSeries] = useState([]); + + const graphRef = useRef(null); + const dimensions = useResizeObserver(graphRef); + + useEffect(() => { + const chartData = getUplotChartDataForAnomalyDetection(data, isDarkMode); + setSeriesData(chartData); + + setAllSeries(Object.keys(chartData)); + + setFilteredSeriesKeys(Object.keys(chartData)); + }, [data, isDarkMode]); + + useEffect(() => { + const seriesKeys = Object.keys(seriesData); + if (seriesKeys.length === 1) { + setSelectedSeries(seriesKeys[0]); // Automatically select if only one series + } else { + setSelectedSeries(null); // Default to "Show All" if multiple series + } + }, [seriesData]); + + const handleSeriesChange = (series: string | null): void => { + setSelectedSeries(series); + }; + + const bandsPlugin = { + hooks: { + draw: [ + (u: any): void => { + if (!selectedSeries) return; + + const { ctx } = u; + const upperBandIdx = 3; + const lowerBandIdx = 4; + + const xData = u.data[0]; + const yUpperData = u.data[upperBandIdx]; + const yLowerData = u.data[lowerBandIdx]; + + const strokeStyle = + u.series[1]?.stroke || seriesData[selectedSeries].color; + const fillStyle = + typeof strokeStyle === 'string' + ? strokeStyle.replace(')', ', 0.1)') + : 'rgba(255, 255, 255, 0.1)'; + + ctx.beginPath(); + const firstX = u.valToPos(xData[0], 'x', true); + const firstUpperY = u.valToPos(yUpperData[0], 'y', true); + ctx.moveTo(firstX, firstUpperY); + + for (let i = 0; i < xData.length; i++) { + const x = u.valToPos(xData[i], 'x', true); + const y = u.valToPos(yUpperData[i], 'y', true); + ctx.lineTo(x, y); + } + + for (let i = xData.length - 1; i >= 0; i--) { + const x = u.valToPos(xData[i], 'x', true); + const y = u.valToPos(yLowerData[i], 'y', true); + ctx.lineTo(x, y); + } + + ctx.closePath(); + ctx.fillStyle = fillStyle; + ctx.fill(); + }, + ], + }, + }; + + const initialData = allSeries.length + ? [ + seriesData[allSeries[0]].data[0], // Shared X-axis + ...allSeries.map((key) => seriesData[key].data[1]), // Map through Y-axis data for all series + ] + : []; + + const options = { + width: dimensions.width, + height: dimensions.height - 36, + plugins: [bandsPlugin, tooltipPlugin(isDarkMode)], + focus: { + alpha: 0.3, + }, + legend: { + show: true, + live: false, + isolate: true, + }, + cursor: { + lock: false, + focus: { + prox: 1e6, + bias: 1, + }, + points: { + size: ( + u: { series: { [x: string]: { points: { size: number } } } }, + seriesIdx: string | number, + ): number => u.series[seriesIdx].points.size * 3, + width: (u: any, seriesIdx: any, size: number): number => size / 4, + stroke: ( + u: { + series: { + [x: string]: { points: { stroke: (arg0: any, arg1: any) => any } }; + }; + }, + seriesIdx: string | number, + ): string => `${u.series[seriesIdx].points.stroke(u, seriesIdx)}90`, + fill: (): string => '#fff', + }, + }, + series: [ + { + label: 'Time', + }, + ...(selectedSeries + ? [ + { + label: `Main Series`, + stroke: seriesData[selectedSeries].color, + width: 2, + show: true, + paths: _spline, + spanGaps: true, + }, + { + label: `Predicted Value`, + stroke: seriesData[selectedSeries].color, + width: 1, + dash: [2, 2], + show: true, + paths: _spline, + spanGaps: true, + }, + { + label: `Upper Band`, + stroke: 'transparent', + show: true, + paths: _spline, + spanGaps: true, + points: { + show: false, + size: 1, + }, + }, + { + label: `Lower Band`, + stroke: 'transparent', + show: true, + paths: _spline, + spanGaps: true, + points: { + show: false, + size: 1, + }, + }, + ] + : allSeries.map((seriesKey) => ({ + label: seriesKey, + stroke: seriesData[seriesKey].color, + width: 2, + show: true, + paths: _spline, + spanGaps: true, + }))), + ], + scales: { + x: { + time: true, + spanGaps: true, + }, + y: { + ...getYAxisScaleForAnomalyDetection({ + seriesData, + selectedSeries, + initialData, + yAxisUnit, + }), + }, + }, + grid: { + show: true, + }, + axes: getAxes(isDarkMode, yAxisUnit), + }; + + const handleSearch = (searchText: string): void => { + if (!searchText || searchText.length === 0) { + setFilteredSeriesKeys(allSeries); + return; + } + + const filteredSeries = allSeries.filter((series) => + series.toLowerCase().includes(searchText.toLowerCase()), + ); + + setFilteredSeriesKeys(filteredSeries); + }; + + const handleSearchValueChange = useDebouncedFn((event): void => { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + const value = event?.target?.value || ''; + + handleSearch(value); + }, 300); + + return ( +
+
1 ? 'has-multi-series-data' : '' + }`} + ref={graphRef} + > + {allSeries.length > 0 ? ( + + ) : ( +
+ + + No Data +
+ )} +
+ + {allSeries.length > 1 && ( +
+ {allSeries.length > 1 && ( +
+ + +
+ {filteredSeriesKeys.length > 0 && ( + handleSeriesChange(null)} + > + Show All + + )} + + {filteredSeriesKeys.map((seriesKey) => ( +
+ handleSeriesChange(seriesKey)} + > +
+ + {seriesKey} + +
+ ))} + + {filteredSeriesKeys.length === 0 && ( + No series found + )} +
+
+ )} +
+ )} +
+ ); +} + +export default AnomalyAlertEvaluationView; diff --git a/frontend/src/container/AnomalyAlertEvaluationView/index.tsx b/frontend/src/container/AnomalyAlertEvaluationView/index.tsx new file mode 100644 index 0000000000..b99070cf6d --- /dev/null +++ b/frontend/src/container/AnomalyAlertEvaluationView/index.tsx @@ -0,0 +1,3 @@ +import AnomalyAlertEvaluationView from './AnomalyAlertEvaluationView'; + +export default AnomalyAlertEvaluationView; diff --git a/frontend/src/container/AnomalyAlertEvaluationView/tooltipPlugin.ts b/frontend/src/container/AnomalyAlertEvaluationView/tooltipPlugin.ts new file mode 100644 index 0000000000..6d32dbee35 --- /dev/null +++ b/frontend/src/container/AnomalyAlertEvaluationView/tooltipPlugin.ts @@ -0,0 +1,148 @@ +import { themeColors } from 'constants/theme'; +import { generateColor } from 'lib/uPlotLib/utils/generateColor'; + +const tooltipPlugin = ( + isDarkMode: boolean, +): { hooks: { init: (u: any) => void } } => { + let tooltip: HTMLDivElement; + const tooltipLeftOffset = 10; + const tooltipTopOffset = 10; + let isMouseOverPlot = false; + + function formatValue(value: string | number | Date): string | number | Date { + if (typeof value === 'string' && !Number.isNaN(parseFloat(value))) { + return parseFloat(value).toFixed(3); + } + if (typeof value === 'number') { + return value.toFixed(3); + } + if (value instanceof Date) { + return value.toLocaleString(); + } + if (value == null) { + return 'N/A'; + } + + return String(value); + } + + function updateTooltip(u: any, left: number, top: number): void { + const idx = u.posToIdx(left); + const xVal = u.data[0][idx]; + + if (xVal == null) { + tooltip.style.display = 'none'; + return; + } + + const xDate = new Date(xVal * 1000); + const formattedXDate = formatValue(xDate); + + let tooltipContent = `
Time: ${formattedXDate}
`; + + let mainValue; + let upperBand; + let lowerBand; + + let color = null; + + // Loop through all series (excluding the x-axis series) + for (let i = 1; i < u.series.length; i++) { + const series = u.series[i]; + + const yVal = u.data[i][idx]; + const formattedYVal = formatValue(yVal); + + color = generateColor( + series.label, + isDarkMode ? themeColors.chartcolors : themeColors.lightModeColor, + ); + + // Create the round marker for the series + const marker = ``; + + if (series.label.toLowerCase().includes('upper band')) { + upperBand = formattedYVal; + } else if (series.label.toLowerCase().includes('lower band')) { + lowerBand = formattedYVal; + } else if (series.label.toLowerCase().includes('main series')) { + mainValue = formattedYVal; + } else { + tooltipContent += ` +
+ ${marker} + ${series.label}: + ${formattedYVal} +
`; + } + } + + // Add main value, upper band, and lower band to the tooltip + if (mainValue !== undefined) { + const marker = ``; + tooltipContent += ` +
+ ${marker} + Main Series: + ${mainValue} +
`; + } + if (upperBand !== undefined) { + const marker = ``; + tooltipContent += ` +
+ ${marker} + Upper Band: + ${upperBand} +
`; + } + if (lowerBand !== undefined) { + const marker = ``; + tooltipContent += ` +
+ ${marker} + Lower Band: + ${lowerBand} +
`; + } + + tooltip.innerHTML = tooltipContent; + tooltip.style.display = 'block'; + tooltip.style.left = `${left + tooltipLeftOffset}px`; + tooltip.style.top = `${top + tooltipTopOffset}px`; + } + + function init(u: any): void { + tooltip = document.createElement('div'); + tooltip.className = 'uplot-tooltip'; + tooltip.style.display = 'none'; + u.over.appendChild(tooltip); + + // Add event listeners + u.over.addEventListener('mouseenter', () => { + isMouseOverPlot = true; + }); + + u.over.addEventListener('mouseleave', () => { + isMouseOverPlot = false; + tooltip.style.display = 'none'; + }); + + u.over.addEventListener('mousemove', (e: MouseEvent) => { + if (isMouseOverPlot) { + const rect = u.over.getBoundingClientRect(); + const left = e.clientX - rect.left; + const top = e.clientY - rect.top; + updateTooltip(u, left, top); + } + }); + } + + return { + hooks: { + init, + }, + }; +}; + +export default tooltipPlugin; diff --git a/frontend/src/container/AppLayout/index.tsx b/frontend/src/container/AppLayout/index.tsx index 9193decd6b..beb4cea61c 100644 --- a/frontend/src/container/AppLayout/index.tsx +++ b/frontend/src/container/AppLayout/index.tsx @@ -211,6 +211,13 @@ function AppLayout(props: AppLayoutProps): JSX.Element { } }, [licenseData, isFetching]); + useEffect(() => { + // after logging out hide the trial expiry banner + if (!isLoggedIn) { + setShowTrialExpiryBanner(false); + } + }, [isLoggedIn]); + const handleUpgrade = (): void => { if (role === 'ADMIN') { history.push(ROUTES.BILLING); diff --git a/frontend/src/container/CreateAlertRule/SelectAlertType/config.ts b/frontend/src/container/CreateAlertRule/SelectAlertType/config.ts index c973684e67..2986cb9f38 100644 --- a/frontend/src/container/CreateAlertRule/SelectAlertType/config.ts +++ b/frontend/src/container/CreateAlertRule/SelectAlertType/config.ts @@ -3,25 +3,41 @@ import { AlertTypes } from 'types/api/alerts/alertTypes'; import { OptionType } from './types'; -export const getOptionList = (t: TFunction): OptionType[] => [ - { - title: t('metric_based_alert'), - selection: AlertTypes.METRICS_BASED_ALERT, - description: t('metric_based_alert_desc'), - }, - { - title: t('log_based_alert'), - selection: AlertTypes.LOGS_BASED_ALERT, - description: t('log_based_alert_desc'), - }, - { - title: t('traces_based_alert'), - selection: AlertTypes.TRACES_BASED_ALERT, - description: t('traces_based_alert_desc'), - }, - { - title: t('exceptions_based_alert'), - selection: AlertTypes.EXCEPTIONS_BASED_ALERT, - description: t('exceptions_based_alert_desc'), - }, -]; +export const getOptionList = ( + t: TFunction, + isAnomalyDetectionEnabled: boolean, +): OptionType[] => { + const optionList: OptionType[] = [ + { + title: t('metric_based_alert'), + selection: AlertTypes.METRICS_BASED_ALERT, + description: t('metric_based_alert_desc'), + }, + { + title: t('log_based_alert'), + selection: AlertTypes.LOGS_BASED_ALERT, + description: t('log_based_alert_desc'), + }, + { + title: t('traces_based_alert'), + selection: AlertTypes.TRACES_BASED_ALERT, + description: t('traces_based_alert_desc'), + }, + { + title: t('exceptions_based_alert'), + selection: AlertTypes.EXCEPTIONS_BASED_ALERT, + description: t('exceptions_based_alert_desc'), + }, + ]; + + if (isAnomalyDetectionEnabled) { + optionList.unshift({ + title: t('anomaly_based_alert'), + selection: AlertTypes.ANOMALY_BASED_ALERT, + description: t('anomaly_based_alert_desc'), + isBeta: true, + }); + } + + return optionList; +}; diff --git a/frontend/src/container/CreateAlertRule/SelectAlertType/index.tsx b/frontend/src/container/CreateAlertRule/SelectAlertType/index.tsx index 48075649b7..70e1a1784e 100644 --- a/frontend/src/container/CreateAlertRule/SelectAlertType/index.tsx +++ b/frontend/src/container/CreateAlertRule/SelectAlertType/index.tsx @@ -1,6 +1,8 @@ -import { Row, Typography } from 'antd'; +import { Row, Tag, Typography } from 'antd'; import logEvent from 'api/common/logEvent'; import { ALERTS_DATA_SOURCE_MAP } from 'constants/alerts'; +import { FeatureKeys } from 'constants/features'; +import useFeatureFlags from 'hooks/useFeatureFlag'; import { useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { AlertTypes } from 'types/api/alerts/alertTypes'; @@ -12,11 +14,18 @@ import { OptionType } from './types'; function SelectAlertType({ onSelect }: SelectAlertTypeProps): JSX.Element { const { t } = useTranslation(['alerts']); - const optionList = getOptionList(t); + const isAnomalyDetectionEnabled = + useFeatureFlags(FeatureKeys.ANOMALY_DETECTION)?.active || false; + + const optionList = getOptionList(t, isAnomalyDetectionEnabled); function handleRedirection(option: AlertTypes): void { let url = ''; switch (option) { + case AlertTypes.ANOMALY_BASED_ALERT: + url = + 'https://signoz.io/docs/alerts-management/anomaly-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples'; + break; case AlertTypes.METRICS_BASED_ALERT: url = 'https://signoz.io/docs/alerts-management/metrics-based-alerts/?utm_source=product&utm_medium=alert-source-selection-page#examples'; @@ -52,6 +61,13 @@ function SelectAlertType({ onSelect }: SelectAlertTypeProps): JSX.Element { + Beta + + ) : undefined + } onClick={(): void => { onSelect(option.selection); }} diff --git a/frontend/src/container/CreateAlertRule/SelectAlertType/types.ts b/frontend/src/container/CreateAlertRule/SelectAlertType/types.ts index 670f5a2708..4c087b6590 100644 --- a/frontend/src/container/CreateAlertRule/SelectAlertType/types.ts +++ b/frontend/src/container/CreateAlertRule/SelectAlertType/types.ts @@ -4,4 +4,5 @@ export interface OptionType { title: string; selection: AlertTypes; description: string; + isBeta?: boolean; } diff --git a/frontend/src/container/CreateAlertRule/defaults.ts b/frontend/src/container/CreateAlertRule/defaults.ts index f9735e7644..44dee01d31 100644 --- a/frontend/src/container/CreateAlertRule/defaults.ts +++ b/frontend/src/container/CreateAlertRule/defaults.ts @@ -4,12 +4,15 @@ import { initialQueryPromQLData, PANEL_TYPES, } from 'constants/queryBuilder'; +import { AlertDetectionTypes } from 'container/FormAlertRules'; import { AlertTypes } from 'types/api/alerts/alertTypes'; import { AlertDef, + defaultAlgorithm, defaultCompareOp, defaultEvalWindow, defaultMatchType, + defaultSeasonality, } from 'types/api/alerts/def'; import { EQueryType } from 'types/common/dashboard'; @@ -46,6 +49,51 @@ export const alertDefaults: AlertDef = { }, op: defaultCompareOp, matchType: defaultMatchType, + algorithm: defaultAlgorithm, + seasonality: defaultSeasonality, + }, + labels: { + severity: 'warning', + }, + annotations: defaultAnnotations, + evalWindow: defaultEvalWindow, +}; + +export const anamolyAlertDefaults: AlertDef = { + alertType: AlertTypes.METRICS_BASED_ALERT, + version: ENTITY_VERSION_V4, + ruleType: AlertDetectionTypes.ANOMALY_DETECTION_ALERT, + condition: { + compositeQuery: { + builderQueries: { + A: { + ...initialQueryBuilderFormValuesMap.metrics, + functions: [ + { + name: 'anomaly', + args: [], + namedArgs: { z_score_threshold: 3 }, + }, + ], + }, + }, + promQueries: { A: initialQueryPromQLData }, + chQueries: { + A: { + name: 'A', + query: ``, + legend: '', + disabled: false, + }, + }, + queryType: EQueryType.QUERY_BUILDER, + panelType: PANEL_TYPES.TIME_SERIES, + unit: undefined, + }, + op: defaultCompareOp, + matchType: defaultMatchType, + algorithm: defaultAlgorithm, + seasonality: defaultSeasonality, }, labels: { severity: 'warning', @@ -56,6 +104,7 @@ export const alertDefaults: AlertDef = { export const logAlertDefaults: AlertDef = { alertType: AlertTypes.LOGS_BASED_ALERT, + version: ENTITY_VERSION_V4, condition: { compositeQuery: { builderQueries: { @@ -86,6 +135,7 @@ export const logAlertDefaults: AlertDef = { export const traceAlertDefaults: AlertDef = { alertType: AlertTypes.TRACES_BASED_ALERT, + version: ENTITY_VERSION_V4, condition: { compositeQuery: { builderQueries: { @@ -116,6 +166,7 @@ export const traceAlertDefaults: AlertDef = { export const exceptionAlertDefaults: AlertDef = { alertType: AlertTypes.EXCEPTIONS_BASED_ALERT, + version: ENTITY_VERSION_V4, condition: { compositeQuery: { builderQueries: { @@ -145,6 +196,7 @@ export const exceptionAlertDefaults: AlertDef = { }; export const ALERTS_VALUES_MAP: Record = { + [AlertTypes.ANOMALY_BASED_ALERT]: anamolyAlertDefaults, [AlertTypes.METRICS_BASED_ALERT]: alertDefaults, [AlertTypes.LOGS_BASED_ALERT]: logAlertDefaults, [AlertTypes.TRACES_BASED_ALERT]: traceAlertDefaults, diff --git a/frontend/src/container/CreateAlertRule/index.tsx b/frontend/src/container/CreateAlertRule/index.tsx index f7e491cd70..96e605b9b0 100644 --- a/frontend/src/container/CreateAlertRule/index.tsx +++ b/frontend/src/container/CreateAlertRule/index.tsx @@ -2,7 +2,7 @@ import { Form, Row } from 'antd'; import logEvent from 'api/common/logEvent'; import { ENTITY_VERSION_V4 } from 'constants/app'; import { QueryParams } from 'constants/query'; -import FormAlertRules from 'container/FormAlertRules'; +import FormAlertRules, { AlertDetectionTypes } from 'container/FormAlertRules'; import { useGetCompositeQueryParam } from 'hooks/queryBuilder/useGetCompositeQueryParam'; import history from 'lib/history'; import { useEffect, useState } from 'react'; @@ -13,6 +13,7 @@ import { AlertDef } from 'types/api/alerts/def'; import { ALERT_TYPE_VS_SOURCE_MAPPING } from './config'; import { alertDefaults, + anamolyAlertDefaults, exceptionAlertDefaults, logAlertDefaults, traceAlertDefaults, @@ -24,8 +25,12 @@ function CreateRules(): JSX.Element { const location = useLocation(); const queryParams = new URLSearchParams(location.search); + const alertTypeFromURL = queryParams.get(QueryParams.ruleType); const version = queryParams.get('version'); - const alertTypeFromParams = queryParams.get(QueryParams.alertType); + const alertTypeFromParams = + alertTypeFromURL === AlertDetectionTypes.ANOMALY_DETECTION_ALERT + ? AlertTypes.ANOMALY_BASED_ALERT + : queryParams.get(QueryParams.alertType); const compositeQuery = useGetCompositeQueryParam(); function getAlertTypeFromDataSource(): AlertTypes | null { @@ -45,6 +50,7 @@ function CreateRules(): JSX.Element { const onSelectType = (typ: AlertTypes): void => { setAlertType(typ); + switch (typ) { case AlertTypes.LOGS_BASED_ALERT: setInitValues(logAlertDefaults); @@ -55,13 +61,40 @@ function CreateRules(): JSX.Element { case AlertTypes.EXCEPTIONS_BASED_ALERT: setInitValues(exceptionAlertDefaults); break; + case AlertTypes.ANOMALY_BASED_ALERT: + setInitValues({ + ...anamolyAlertDefaults, + version: version || ENTITY_VERSION_V4, + ruleType: AlertDetectionTypes.ANOMALY_DETECTION_ALERT, + }); + break; default: setInitValues({ ...alertDefaults, version: version || ENTITY_VERSION_V4, + ruleType: AlertDetectionTypes.THRESHOLD_ALERT, }); } - queryParams.set(QueryParams.alertType, typ); + + queryParams.set( + QueryParams.alertType, + typ === AlertTypes.ANOMALY_BASED_ALERT + ? AlertTypes.METRICS_BASED_ALERT + : typ, + ); + + if ( + typ === AlertTypes.ANOMALY_BASED_ALERT || + alertTypeFromURL === AlertDetectionTypes.ANOMALY_DETECTION_ALERT + ) { + queryParams.set( + QueryParams.ruleType, + AlertDetectionTypes.ANOMALY_DETECTION_ALERT, + ); + } else { + queryParams.set(QueryParams.ruleType, AlertDetectionTypes.THRESHOLD_ALERT); + } + const generatedUrl = `${location.pathname}?${queryParams.toString()}`; history.replace(generatedUrl); }; diff --git a/frontend/src/container/EditRules/index.tsx b/frontend/src/container/EditRules/index.tsx index 206c9d2d3e..b6a32615a6 100644 --- a/frontend/src/container/EditRules/index.tsx +++ b/frontend/src/container/EditRules/index.tsx @@ -7,18 +7,16 @@ function EditRules({ initialValue, ruleId }: EditRulesProps): JSX.Element { const [formInstance] = Form.useForm(); return ( -
- -
+ ); } diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss index 54e87fa458..9efc053245 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss @@ -18,7 +18,7 @@ display: inline-flex; align-items: center; gap: 12px; - padding: 10px 12px; + padding: 10px 10px; border-radius: 50px; border: 1px solid var(--bg-slate-400); background: rgba(22, 24, 29, 0.6); @@ -33,6 +33,7 @@ border: 1px solid var(--bg-slate-400); background: var(--bg-slate-500); cursor: pointer; + box-shadow: none; } .hidden { diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx index e882dd790e..8edd0444bf 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx @@ -45,7 +45,6 @@ import { PanelBottomClose, Plus, X, - XCircle, } from 'lucide-react'; import { CSSProperties, @@ -515,7 +514,11 @@ function ExplorerOptions({ return (
- {isQueryUpdated && !isExplorerOptionHidden && ( + { + // if a viewName is selected and the explorer options are not hidden then + // always show the clear option + } + {!isExplorerOptionHidden && viewName && (
} /> - - -
)} {!isExplorerOptionHidden && ( @@ -564,10 +574,7 @@ function ExplorerOptions({ }} dropdownStyle={dropdownStyle} className="views-dropdown" - allowClear={{ - clearIcon: , - }} - onClear={handleClearSelect} + allowClear={false} ref={ref} > {viewsData?.data?.data?.map((view) => { @@ -662,8 +669,8 @@ function ExplorerOptions({
)} - - Save this view} @@ -705,7 +711,6 @@ function ExplorerOptions({ />
- {isExplorerOptionHidden && ( <> - {isQueryUpdated && ( + {viewName && (
+
+ + +
{currentQuery.queryType === EQueryType.QUERY_BUILDER && renderQBChartPreview()} {currentQuery.queryType === EQueryType.PROM && renderPromAndChQueryChartPreview()} {currentQuery.queryType === EQueryType.CLICKHOUSE && renderPromAndChQueryChartPreview()} +
- - - + + + + +
+ {alertDef.alertType === AlertTypes.METRICS_BASED_ALERT && + isAnomalyDetectionEnabled && ( +
+ {t('alert_form_step1')} + + + +
+ {detectionMethod === AlertDetectionTypes.ANOMALY_DETECTION_ALERT + ? t('anomaly_detection_alert_desc') + : t('threshold_alert_desc')} +
+
+ )} {renderBasicInfo()} - - - } - disabled={ - isAlertNameMissing || - isAlertAvailableToSave || - !isChannelConfigurationValid || - queryStatus === 'error' - } - > - {isNewRule ? t('button_createrule') : t('button_savechanges')} - - - +
+ + } disabled={ isAlertNameMissing || + isAlertAvailableToSave || !isChannelConfigurationValid || queryStatus === 'error' } - type="default" - onClick={onTestRuleHandler} - > - {' '} - {t('button_testrule')} - - - {ruleId === 0 && t('button_cancelchanges')} - {ruleId > 0 && t('button_discard')} + {isNewRule ? t('button_createrule') : t('button_savechanges')} - -
- - - -
- - -
- - + {' '} + {t('button_testrule')} + + + {ruleId === 0 && t('button_cancelchanges')} + {ruleId > 0 && t('button_discard')} + + + +
); } diff --git a/frontend/src/container/FormAlertRules/styles.ts b/frontend/src/container/FormAlertRules/styles.ts index 11205c0ab4..d282a484a2 100644 --- a/frontend/src/container/FormAlertRules/styles.ts +++ b/frontend/src/container/FormAlertRules/styles.ts @@ -1,13 +1,9 @@ -import { Button, Card, Col, Form, Input, Row, Select, Typography } from 'antd'; +import { Button, Card, Col, Form, Input, Select, Typography } from 'antd'; import styled from 'styled-components'; const { TextArea } = Input; const { Item } = Form; -export const PanelContainer = styled(Row)` - flex-wrap: nowrap; -`; - export const StyledLeftContainer = styled(Col)` &&& { margin-right: 1rem; diff --git a/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx b/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx index 6d7839aa4f..28d869de66 100644 --- a/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx +++ b/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx @@ -18,6 +18,7 @@ import { QueryParams } from 'constants/query'; import { PANEL_TYPES } from 'constants/queryBuilder'; import useCreateAlerts from 'hooks/queryBuilder/useCreateAlerts'; import useComponentPermission from 'hooks/useComponentPermission'; +import useUrlQuery from 'hooks/useUrlQuery'; import history from 'lib/history'; import { RowData } from 'lib/query/createTableColumnsFromQuery'; import { isEmpty } from 'lodash-es'; @@ -72,16 +73,18 @@ function WidgetHeader({ tableProcessedDataRef, setSearchTerm, }: IWidgetHeaderProps): JSX.Element | null { + const urlQuery = useUrlQuery(); const onEditHandler = useCallback((): void => { const widgetId = widget.id; - history.push( - `${window.location.pathname}/new?widgetId=${widgetId}&graphType=${ - widget.panelTypes - }&${QueryParams.compositeQuery}=${encodeURIComponent( - JSON.stringify(widget.query), - )}`, + urlQuery.set(QueryParams.widgetId, widgetId); + urlQuery.set(QueryParams.graphType, widget.panelTypes); + urlQuery.set( + QueryParams.compositeQuery, + encodeURIComponent(JSON.stringify(widget.query)), ); - }, [widget.id, widget.panelTypes, widget.query]); + const generatedUrl = `${window.location.pathname}/new?${urlQuery}`; + history.push(generatedUrl); + }, [urlQuery, widget.id, widget.panelTypes, widget.query]); const onCreateAlertsHandler = useCreateAlerts(widget, 'dashboardView'); diff --git a/frontend/src/container/GridTableComponent/index.tsx b/frontend/src/container/GridTableComponent/index.tsx index dfa90b8255..63084be5f3 100644 --- a/frontend/src/container/GridTableComponent/index.tsx +++ b/frontend/src/container/GridTableComponent/index.tsx @@ -97,13 +97,19 @@ function GridTableComponent({ const newColumnData = columns.map((e) => ({ ...e, - render: (text: string): ReactNode => { - const isNumber = !Number.isNaN(Number(text)); + render: (text: string, ...rest: any): ReactNode => { + let textForThreshold = text; + if (columnUnits && columnUnits?.[e.title as string]) { + textForThreshold = rest[0][`${e.title}_without_unit`]; + } + const isNumber = !Number.isNaN(Number(textForThreshold)); + if (thresholds && isNumber) { const { hasMultipleMatches, threshold } = findMatchingThreshold( thresholds, e.title as string, - Number(text), + Number(textForThreshold), + columnUnits?.[e.title as string], ); const idx = thresholds.findIndex( diff --git a/frontend/src/container/GridTableComponent/utils.ts b/frontend/src/container/GridTableComponent/utils.ts index acd58af62d..52a4a7810b 100644 --- a/frontend/src/container/GridTableComponent/utils.ts +++ b/frontend/src/container/GridTableComponent/utils.ts @@ -1,5 +1,6 @@ /* eslint-disable sonarjs/cognitive-complexity */ import { ColumnsType, ColumnType } from 'antd/es/table'; +import { convertUnit } from 'container/NewWidget/RightContainer/dataFormatCategories'; import { ThresholdProps } from 'container/NewWidget/RightContainer/Threshold/types'; import { QUERY_TABLE_CONFIG } from 'container/QueryTable/config'; import { QueryTableProps } from 'container/QueryTable/QueryTable.intefaces'; @@ -30,10 +31,39 @@ function evaluateCondition( } } +/** + * Evaluates whether a given value meets a specified threshold condition. + * It first converts the value to the appropriate unit if a threshold unit is provided, + * and then checks the condition using the specified operator. + * + * @param value - The value to be evaluated. + * @param thresholdValue - The threshold value to compare against. + * @param thresholdOperator - The operator used for comparison (e.g., '>', '<', '=='). + * @param thresholdUnit - The unit to which the value should be converted. + * @param columnUnit - The current unit of the value. + * @returns A boolean indicating whether the value meets the threshold condition. + */ +function evaluateThresholdWithConvertedValue( + value: number, + thresholdValue: number, + thresholdOperator?: string, + thresholdUnit?: string, + columnUnit?: string, +): boolean { + const convertedValue = convertUnit(value, columnUnit, thresholdUnit); + + if (convertedValue) { + return evaluateCondition(thresholdOperator, convertedValue, thresholdValue); + } + + return evaluateCondition(thresholdOperator, value, thresholdValue); +} + export function findMatchingThreshold( thresholds: ThresholdProps[], label: string, value: number, + columnUnit?: string, ): { threshold: ThresholdProps; hasMultipleMatches: boolean; @@ -45,10 +75,12 @@ export function findMatchingThreshold( if ( threshold.thresholdValue !== undefined && threshold.thresholdTableOptions === label && - evaluateCondition( - threshold.thresholdOperator, + evaluateThresholdWithConvertedValue( value, - threshold.thresholdValue, + threshold?.thresholdValue, + threshold.thresholdOperator, + threshold.thresholdUnit, + columnUnit, ) ) { matchingThresholds.push(threshold); diff --git a/frontend/src/container/ListAlertRules/ListAlert.tsx b/frontend/src/container/ListAlertRules/ListAlert.tsx index 424d98ddd9..8e18efdb20 100644 --- a/frontend/src/container/ListAlertRules/ListAlert.tsx +++ b/frontend/src/container/ListAlertRules/ListAlert.tsx @@ -5,7 +5,6 @@ import type { ColumnsType } from 'antd/es/table/interface'; import saveAlertApi from 'api/alerts/save'; import logEvent from 'api/common/logEvent'; import DropDown from 'components/DropDown/DropDown'; -import { listAlertMessage } from 'components/LaunchChatSupport/util'; import { DynamicColumnsKey, TableDataSource, @@ -397,15 +396,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element { dynamicColumns={dynamicColumns} onChange={handleChange} pagination={paginationConfig} - facingIssueBtn={{ - attributes: { - screen: 'Alert list page', - }, - eventName: 'Alert: Facing Issues in alert', - buttonText: 'Facing issues with alerts?', - message: listAlertMessage, - onHoverText: 'Click here to get help with alerts', - }} /> ); diff --git a/frontend/src/container/ListOfDashboard/DashboardList.styles.scss b/frontend/src/container/ListOfDashboard/DashboardList.styles.scss index 6a5a148180..21d4a5e20d 100644 --- a/frontend/src/container/ListOfDashboard/DashboardList.styles.scss +++ b/frontend/src/container/ListOfDashboard/DashboardList.styles.scss @@ -5,6 +5,17 @@ justify-content: center; width: 100%; + // overridding the request integration style to fix the spacing for dashboard list + .request-entity-container { + margin-bottom: 16px !important; + margin-top: 0 !important; + } + + .integrations-content { + max-width: 100% !important; + width: 100% !important; + } + .dashboards-list-view-content { width: calc(100% - 30px); max-width: 836px; diff --git a/frontend/src/container/ListOfDashboard/DashboardsList.tsx b/frontend/src/container/ListOfDashboard/DashboardsList.tsx index 9908374a1b..5f6e5337b0 100644 --- a/frontend/src/container/ListOfDashboard/DashboardsList.tsx +++ b/frontend/src/container/ListOfDashboard/DashboardsList.tsx @@ -25,8 +25,6 @@ import logEvent from 'api/common/logEvent'; import createDashboard from 'api/dashboard/create'; import { AxiosError } from 'axios'; import cx from 'classnames'; -import LaunchChatSupport from 'components/LaunchChatSupport/LaunchChatSupport'; -import { dashboardListMessage } from 'components/LaunchChatSupport/util'; import { ENTITY_VERSION_V4 } from 'constants/app'; import ROUTES from 'constants/routes'; import { Base64Icons } from 'container/NewDashboard/DashboardSettings/General/utils'; @@ -79,6 +77,7 @@ import { isCloudUser } from 'utils/app'; import DashboardTemplatesModal from './DashboardTemplates/DashboardTemplatesModal'; import ImportJSON from './ImportJSON'; +import { RequestDashboardBtn } from './RequestDashboardBtn'; import { DeleteButton } from './TableComponents/DeleteButton'; import { DashboardDynamicColumns, @@ -693,17 +692,14 @@ function DashboardsList(): JSX.Element { Create and manage dashboards for your workspace. - + {isCloudUser() && ( +
+
+ +
+
+ )}
{isDashboardListLoading || diff --git a/frontend/src/container/ListOfDashboard/ImportJSON/index.tsx b/frontend/src/container/ListOfDashboard/ImportJSON/index.tsx index 62767e6799..6926db85be 100644 --- a/frontend/src/container/ListOfDashboard/ImportJSON/index.tsx +++ b/frontend/src/container/ListOfDashboard/ImportJSON/index.tsx @@ -82,6 +82,12 @@ function ImportJSON({ const dashboardData = JSON.parse(editorValue) as DashboardData; + // Add validation for uuid + if (dashboardData.uuid !== undefined && dashboardData.uuid.trim() === '') { + // silently remove uuid if it is empty + delete dashboardData.uuid; + } + if (dashboardData?.layout) { dashboardData.layout = getUpdatedLayout(dashboardData.layout); } else { @@ -123,11 +129,14 @@ function ImportJSON({ }); } setDashboardCreating(false); - } catch { + } catch (error) { setDashboardCreating(false); setIsFeatureAlert(false); setIsCreateDashboardError(true); + notifications.error({ + message: error instanceof Error ? error.message : t('error_loading_json'), + }); } }; diff --git a/frontend/src/container/ListOfDashboard/RequestDashboardBtn.tsx b/frontend/src/container/ListOfDashboard/RequestDashboardBtn.tsx new file mode 100644 index 0000000000..a2e09463ac --- /dev/null +++ b/frontend/src/container/ListOfDashboard/RequestDashboardBtn.tsx @@ -0,0 +1,95 @@ +import '../../pages/Integrations/Integrations.styles.scss'; + +import { LoadingOutlined } from '@ant-design/icons'; +import { Button, Input, Space, Typography } from 'antd'; +import logEvent from 'api/common/logEvent'; +import { useNotifications } from 'hooks/useNotifications'; +import { Check } from 'lucide-react'; +import { useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +export function RequestDashboardBtn(): JSX.Element { + const [ + isSubmittingRequestForDashboard, + setIsSubmittingRequestForDashboard, + ] = useState(false); + + const [requestedDashboardName, setRequestedDashboardName] = useState(''); + + const { notifications } = useNotifications(); + const { t } = useTranslation(['common']); + + const handleRequestDashboardSubmit = async (): Promise => { + try { + setIsSubmittingRequestForDashboard(true); + const response = await logEvent('Dashboard Requested', { + screen: 'Dashboard list page', + dashboard: requestedDashboardName, + }); + + if (response.statusCode === 200) { + notifications.success({ + message: 'Dashboard Request Submitted', + }); + + setIsSubmittingRequestForDashboard(false); + } else { + notifications.error({ + message: + response.error || + t('something_went_wrong', { + ns: 'common', + }), + }); + + setIsSubmittingRequestForDashboard(false); + } + } catch (error) { + notifications.error({ + message: t('something_went_wrong', { + ns: 'common', + }), + }); + + setIsSubmittingRequestForDashboard(false); + } + }; + + return ( +
+ + Can't find the dashboard you need? Request a new Dashboard. + + +
+ + setRequestedDashboardName(e.target.value)} + /> + + +
+
+ ); +} diff --git a/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.styles.scss b/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.styles.scss index 6b0593def3..1dba778569 100644 --- a/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.styles.scss +++ b/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.styles.scss @@ -2,3 +2,25 @@ cursor: pointer; position: relative; } + +.table-row-backdrop { + &.INFO { + background-color: var(--bg-robin-500) 10; + } + &.WARNING, + &.WARN { + background-color: var(--bg-amber-500) 10; + } + &.ERROR { + background-color: var(--bg-cherry-500) 10; + } + &.TRACE { + background-color: var(--bg-forest-400) 10; + } + &.DEBUG { + background-color: var(--bg-aqua-500) 10; + } + &.FATAL { + background-color: var(--bg-sakura-500) 10; + } +} diff --git a/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx b/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx index fe2d2ba1d4..3cc216962e 100644 --- a/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx +++ b/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx @@ -1,5 +1,6 @@ import LogDetail from 'components/LogDetail'; import { VIEW_TYPES } from 'components/LogDetail/constants'; +import { getLogIndicatorType } from 'components/Logs/LogStateIndicator/utils'; import { useTableView } from 'components/Logs/TableView/useTableView'; import { LOCALSTORAGE } from 'constants/localStorage'; import { useActiveLog } from 'hooks/logs/useActiveLog'; @@ -21,6 +22,11 @@ import { TableHeaderCellStyled, TableRowStyled } from './styles'; import TableRow from './TableRow'; import { InfinityTableProps } from './types'; +interface CustomTableRowProps { + activeContextLogId: string; + activeLogId: string; +} + // eslint-disable-next-line react/function-component-definition const CustomTableRow: TableComponents['TableRow'] = ({ children, @@ -31,10 +37,17 @@ const CustomTableRow: TableComponents['TableRow'] = ({ const isDarkMode = useIsDarkMode(); + const logType = getLogIndicatorType(props.item); + return ( @@ -66,8 +79,6 @@ const InfinityTable = forwardRef( ...tableViewProps, onClickExpand: onSetActiveLog, onOpenLogsContext: handleSetActiveContextLog, - activeLog, - activeContextLog, }); const { draggedColumns, onDragColumns } = useDragColumns< @@ -153,7 +164,14 @@ const InfinityTable = forwardRef( // TODO: fix it in the future // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore - TableRow: CustomTableRow, + TableRow: (props): any => + CustomTableRow({ + ...props, + context: { + activeContextLogId: activeContextLog?.id, + activeLogId: activeLog?.id, + }, + } as any), }} itemContent={itemContent} fixedHeaderContent={tableHeader} diff --git a/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts b/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts index 89c9592dd4..787b4c9105 100644 --- a/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts +++ b/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts @@ -1,5 +1,4 @@ /* eslint-disable no-nested-ternary */ -import { Color } from '@signozhq/design-tokens'; import { themeColors } from 'constants/theme'; import { FontSize } from 'container/OptionsMenu/types'; import styled from 'styled-components'; @@ -37,13 +36,12 @@ export const TableCellStyled = styled.td` export const TableRowStyled = styled.tr<{ $isActiveLog: boolean; $isDarkMode: boolean; + $logType: string; }>` td { - ${({ $isActiveLog, $isDarkMode }): string => + ${({ $isActiveLog, $isDarkMode, $logType }): string => $isActiveLog - ? `background-color: ${ - $isDarkMode ? Color.BG_SLATE_500 : Color.BG_VANILLA_300 - } !important` + ? getActiveLogBackground($isActiveLog, $isDarkMode, $logType) : ''}; } diff --git a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx index d6f63165d5..151fba7609 100644 --- a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx +++ b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx @@ -12,8 +12,6 @@ import { Typography, } from 'antd'; import logEvent from 'api/common/logEvent'; -import LaunchChatSupport from 'components/LaunchChatSupport/LaunchChatSupport'; -import { dashboardHelpMessage } from 'components/LaunchChatSupport/util'; import { SOMETHING_WENT_WRONG } from 'constants/api'; import { QueryParams } from 'constants/query'; import { PANEL_GROUP_TYPES, PANEL_TYPES } from 'constants/queryBuilder'; @@ -47,7 +45,11 @@ import { useTranslation } from 'react-i18next'; import { useSelector } from 'react-redux'; import { useCopyToClipboard } from 'react-use'; import { AppState } from 'store/reducers'; -import { Dashboard, DashboardData } from 'types/api/dashboard/getAll'; +import { + Dashboard, + DashboardData, + IDashboardVariable, +} from 'types/api/dashboard/getAll'; import AppReducer from 'types/reducer/app'; import { ROLES, USER_ROLES } from 'types/roles'; import { ComponentTypes } from 'utils/permission'; @@ -63,6 +65,30 @@ interface DashboardDescriptionProps { handle: FullScreenHandle; } +function sanitizeDashboardData( + selectedData: DashboardData, +): Omit { + if (!selectedData?.variables) { + const { uuid, ...rest } = selectedData; + return rest; + } + + const updatedVariables = Object.entries(selectedData.variables).reduce( + (acc, [key, value]) => { + const { selectedValue, ...rest } = value; + acc[key] = rest; + return acc; + }, + {} as Record, + ); + + const { uuid, ...restData } = selectedData; + return { + ...restData, + variables: updatedVariables, + }; +} + // eslint-disable-next-line sonarjs/cognitive-complexity function DashboardDescription(props: DashboardDescriptionProps): JSX.Element { const { handle } = props; @@ -328,18 +354,6 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element { {isDashboardLocked && }
- } onClick={(): void => { - downloadObjectAsJson(selectedData, selectedData.title); + downloadObjectAsJson( + sanitizeDashboardData(selectedData), + selectedData.title, + ); setIsDashbordSettingsOpen(false); }} > @@ -417,7 +434,9 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element { type="text" icon={} onClick={(): void => { - setCopy(JSON.stringify(selectedData, null, 2)); + setCopy( + JSON.stringify(sanitizeDashboardData(selectedData), null, 2), + ); setIsDashbordSettingsOpen(false); }} > diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx index a0a444a715..398ade8259 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx @@ -257,8 +257,7 @@ function VariableItem({ if (variableData.name) { if ( value === ALL_SELECT_VALUE || - (Array.isArray(value) && value.includes(ALL_SELECT_VALUE)) || - (Array.isArray(value) && value.length === 0) + (Array.isArray(value) && value.includes(ALL_SELECT_VALUE)) ) { onValueUpdate(variableData.name, variableData.id, optionsData, true); } else { @@ -324,10 +323,6 @@ function VariableItem({ Array.isArray(selectedValueStringified) && selectedValueStringified.includes(option.toString()) ) { - if (newSelectedValue.length === 0) { - handleChange(ALL_SELECT_VALUE); - return; - } if (newSelectedValue.length === 1) { handleChange(newSelectedValue[0].toString()); return; diff --git a/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx b/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx index d98bd25e1c..e22727add1 100644 --- a/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx +++ b/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx @@ -4,7 +4,6 @@ import { Color } from '@signozhq/design-tokens'; import { Button, Tabs, Typography } from 'antd'; import logEvent from 'api/common/logEvent'; import PromQLIcon from 'assets/Dashboard/PromQl'; -import LaunchChatSupport from 'components/LaunchChatSupport/LaunchChatSupport'; import TextToolTip from 'components/TextToolTip'; import { PANEL_TYPES } from 'constants/queryBuilder'; import { QBShortcuts } from 'constants/shortcuts/QBShortcuts'; @@ -235,21 +234,6 @@ function QuerySection({ onChange={handleQueryCategoryChange} tabBarExtraContent={ - (isEditEnabled); const [operator, setOperator] = useState( @@ -192,6 +194,13 @@ function Threshold({ const allowDragAndDrop = panelTypeVsDragAndDrop[selectedGraph]; + const isInvalidUnitComparison = useMemo( + () => + unit !== 'none' && + convertUnit(value, unit, columnUnits?.[tableSelectedOption]) === null, + [unit, value, columnUnits, tableSelectedOption], + ); + return (
)}
+ {isInvalidUnitComparison && ( + + Threshold unit ({unit}) is not valid in comparison with the column unit ( + {columnUnits?.[tableSelectedOption] || 'none'}) + + )} {isEditMode && (
diff --git a/frontend/src/container/NewWidget/RightContainer/Threshold/types.ts b/frontend/src/container/NewWidget/RightContainer/Threshold/types.ts index 820a621d84..6f09f2136e 100644 --- a/frontend/src/container/NewWidget/RightContainer/Threshold/types.ts +++ b/frontend/src/container/NewWidget/RightContainer/Threshold/types.ts @@ -1,5 +1,6 @@ import { PANEL_TYPES } from 'constants/queryBuilder'; import { Dispatch, ReactNode, SetStateAction } from 'react'; +import { ColumnUnit } from 'types/api/dashboard/getAll'; export type ThresholdOperators = '>' | '<' | '>=' | '<=' | '='; @@ -19,6 +20,7 @@ export type ThresholdProps = { moveThreshold: (dragIndex: number, hoverIndex: number) => void; selectedGraph: PANEL_TYPES; tableOptions?: Array<{ value: string; label: string }>; + columnUnits?: ColumnUnit; }; export type ShowCaseValueProps = { @@ -36,4 +38,5 @@ export type ThresholdSelectorProps = { thresholds: ThresholdProps[]; setThresholds: Dispatch>; selectedGraph: PANEL_TYPES; + columnUnits: ColumnUnit; }; diff --git a/frontend/src/container/NewWidget/RightContainer/constants.ts b/frontend/src/container/NewWidget/RightContainer/constants.ts index 03cee96d21..1fad229ed6 100644 --- a/frontend/src/container/NewWidget/RightContainer/constants.ts +++ b/frontend/src/container/NewWidget/RightContainer/constants.ts @@ -1,8 +1,5 @@ import { DefaultOptionType } from 'antd/es/select'; import { PANEL_TYPES } from 'constants/queryBuilder'; -import { categoryToSupport } from 'container/QueryBuilder/filters/BuilderUnitsFilter/config'; - -import { getCategorySelectOptionByName } from './alertFomatCategories'; export const operatorOptions: DefaultOptionType[] = [ { value: '>', label: '>' }, @@ -11,11 +8,6 @@ export const operatorOptions: DefaultOptionType[] = [ { value: '<=', label: '<=' }, ]; -export const unitOptions = categoryToSupport.map((category) => ({ - label: category, - options: getCategorySelectOptionByName(category), -})); - export const showAsOptions: DefaultOptionType[] = [ { value: 'Text', label: 'Text' }, { value: 'Background', label: 'Background' }, diff --git a/frontend/src/container/NewWidget/RightContainer/dataFormatCategories.ts b/frontend/src/container/NewWidget/RightContainer/dataFormatCategories.ts index c1b4944d53..ea5bc2ab55 100644 --- a/frontend/src/container/NewWidget/RightContainer/dataFormatCategories.ts +++ b/frontend/src/container/NewWidget/RightContainer/dataFormatCategories.ts @@ -438,3 +438,168 @@ export const dataTypeCategories: DataTypeCategories = [ export const flattenedCategories = flattenDeep( dataTypeCategories.map((category) => category.formats), ); + +type ConversionFactors = { + [key: string]: { + [key: string]: number | null; + }; +}; + +// Object containing conversion factors for various categories and formats +const conversionFactors: ConversionFactors = { + [CategoryNames.Time]: { + [TimeFormats.Hertz]: 1, + [TimeFormats.Nanoseconds]: 1e-9, + [TimeFormats.Microseconds]: 1e-6, + [TimeFormats.Milliseconds]: 1e-3, + [TimeFormats.Seconds]: 1, + [TimeFormats.Minutes]: 60, + [TimeFormats.Hours]: 3600, + [TimeFormats.Days]: 86400, + [TimeFormats.DurationMs]: 1e-3, + [TimeFormats.DurationS]: 1, + [TimeFormats.DurationHms]: null, // Requires special handling + [TimeFormats.DurationDhms]: null, // Requires special handling + [TimeFormats.Timeticks]: null, // Requires special handling + [TimeFormats.ClockMs]: 1e-3, + [TimeFormats.ClockS]: 1, + }, + [CategoryNames.Throughput]: { + [ThroughputFormats.CountsPerSec]: 1, + [ThroughputFormats.OpsPerSec]: 1, + [ThroughputFormats.RequestsPerSec]: 1, + [ThroughputFormats.ReadsPerSec]: 1, + [ThroughputFormats.WritesPerSec]: 1, + [ThroughputFormats.IOOpsPerSec]: 1, + [ThroughputFormats.CountsPerMin]: 1 / 60, + [ThroughputFormats.OpsPerMin]: 1 / 60, + [ThroughputFormats.ReadsPerMin]: 1 / 60, + [ThroughputFormats.WritesPerMin]: 1 / 60, + }, + [CategoryNames.Data]: { + [DataFormats.BytesIEC]: 1, + [DataFormats.BytesSI]: 1, + [DataFormats.BitsIEC]: 0.125, + [DataFormats.BitsSI]: 0.125, + [DataFormats.KibiBytes]: 1024, + [DataFormats.KiloBytes]: 1000, + [DataFormats.MebiBytes]: 1048576, + [DataFormats.MegaBytes]: 1000000, + [DataFormats.GibiBytes]: 1073741824, + [DataFormats.GigaBytes]: 1000000000, + [DataFormats.TebiBytes]: 1099511627776, + [DataFormats.TeraBytes]: 1000000000000, + [DataFormats.PebiBytes]: 1125899906842624, + [DataFormats.PetaBytes]: 1000000000000000, + }, + [CategoryNames.DataRate]: { + [DataRateFormats.PacketsPerSec]: null, // Cannot convert directly to other data rates + [DataRateFormats.BytesPerSecIEC]: 1, + [DataRateFormats.BytesPerSecSI]: 1, + [DataRateFormats.BitsPerSecIEC]: 0.125, + [DataRateFormats.BitsPerSecSI]: 0.125, + [DataRateFormats.KibiBytesPerSec]: 1024, + [DataRateFormats.KibiBitsPerSec]: 128, + [DataRateFormats.KiloBytesPerSec]: 1000, + [DataRateFormats.KiloBitsPerSec]: 125, + [DataRateFormats.MebiBytesPerSec]: 1048576, + [DataRateFormats.MebiBitsPerSec]: 131072, + [DataRateFormats.MegaBytesPerSec]: 1000000, + [DataRateFormats.MegaBitsPerSec]: 125000, + [DataRateFormats.GibiBytesPerSec]: 1073741824, + [DataRateFormats.GibiBitsPerSec]: 134217728, + [DataRateFormats.GigaBytesPerSec]: 1000000000, + [DataRateFormats.GigaBitsPerSec]: 125000000, + [DataRateFormats.TebiBytesPerSec]: 1099511627776, + [DataRateFormats.TebiBitsPerSec]: 137438953472, + [DataRateFormats.TeraBytesPerSec]: 1000000000000, + [DataRateFormats.TeraBitsPerSec]: 125000000000, + [DataRateFormats.PebiBytesPerSec]: 1125899906842624, + [DataRateFormats.PebiBitsPerSec]: 140737488355328, + [DataRateFormats.PetaBytesPerSec]: 1000000000000000, + [DataRateFormats.PetaBitsPerSec]: 125000000000000, + }, + [CategoryNames.Miscellaneous]: { + [MiscellaneousFormats.None]: null, + [MiscellaneousFormats.String]: null, + [MiscellaneousFormats.Short]: null, + [MiscellaneousFormats.Percent]: 1, + [MiscellaneousFormats.PercentUnit]: 100, + [MiscellaneousFormats.Humidity]: 1, + [MiscellaneousFormats.Decibel]: null, + [MiscellaneousFormats.Hexadecimal0x]: null, + [MiscellaneousFormats.Hexadecimal]: null, + [MiscellaneousFormats.ScientificNotation]: null, + [MiscellaneousFormats.LocaleFormat]: null, + [MiscellaneousFormats.Pixels]: null, + }, + [CategoryNames.Boolean]: { + [BooleanFormats.TRUE_FALSE]: null, // Not convertible + [BooleanFormats.YES_NO]: null, // Not convertible + [BooleanFormats.ON_OFF]: null, // Not convertible + }, +}; + +// Function to get the conversion factor between two units in a specific category +function getConversionFactor( + fromUnit: string, + toUnit: string, + category: CategoryNames, +): number | null { + // Retrieves the conversion factors for the specified category + const categoryFactors = conversionFactors[category]; + if (!categoryFactors) { + return null; // Returns null if the category does not exist + } + const fromFactor = categoryFactors[fromUnit]; + const toFactor = categoryFactors[toUnit]; + if ( + fromFactor === undefined || + toFactor === undefined || + fromFactor === null || + toFactor === null + ) { + return null; // Returns null if either unit does not exist or is not convertible + } + return fromFactor / toFactor; // Returns the conversion factor ratio +} + +// Function to convert a value from one unit to another +export function convertUnit( + value: number, + fromUnitId?: string, + toUnitId?: string, +): number | null { + let fromUnit: string | undefined; + let toUnit: string | undefined; + + // Finds the category that contains the specified units and extracts fromUnit and toUnit using array methods + const category = dataTypeCategories.find((category) => + category.formats.some((format) => { + if (format.id === fromUnitId) fromUnit = format.id; + if (format.id === toUnitId) toUnit = format.id; + return fromUnit && toUnit; // Break out early if both units are found + }), + ); + + if (!category || !fromUnit || !toUnit) return null; // Return null if category or units are not found + + // Gets the conversion factor for the specified units + const conversionFactor = getConversionFactor( + fromUnit, + toUnit, + category.name as any, + ); + if (conversionFactor === null) return null; // Return null if conversion is not possible + + return value * conversionFactor; +} + +// Function to get the category name for a given unit ID +export const getCategoryName = (unitId: string): CategoryNames | null => { + // Finds the category that contains the specified unit ID + const foundCategory = dataTypeCategories.find((category) => + category.formats.some((format) => format.id === unitId), + ); + return foundCategory ? (foundCategory.name as CategoryNames) : null; +}; diff --git a/frontend/src/container/NewWidget/RightContainer/index.tsx b/frontend/src/container/NewWidget/RightContainer/index.tsx index 43e3b5611d..55968c5aee 100644 --- a/frontend/src/container/NewWidget/RightContainer/index.tsx +++ b/frontend/src/container/NewWidget/RightContainer/index.tsx @@ -311,6 +311,7 @@ function RightContainer({ setThresholds={setThresholds} yAxisUnit={yAxisUnit} selectedGraph={selectedGraph} + columnUnits={columnUnits} /> )} diff --git a/frontend/src/container/NewWidget/utils.ts b/frontend/src/container/NewWidget/utils.ts index f8eef3157d..cb498ef932 100644 --- a/frontend/src/container/NewWidget/utils.ts +++ b/frontend/src/container/NewWidget/utils.ts @@ -1,3 +1,4 @@ +import { DefaultOptionType } from 'antd/es/select'; import { omitIdFromQuery } from 'components/ExplorerCard/utils'; import { initialQueryBuilderFormValuesMap, @@ -8,12 +9,19 @@ import { listViewInitialTraceQuery, PANEL_TYPES_INITIAL_QUERY, } from 'container/NewDashboard/ComponentsSlider/constants'; -import { cloneDeep, isEqual, set, unset } from 'lodash-es'; +import { categoryToSupport } from 'container/QueryBuilder/filters/BuilderUnitsFilter/config'; +import { cloneDeep, isEmpty, isEqual, set, unset } from 'lodash-es'; import { Widgets } from 'types/api/dashboard/getAll'; import { IBuilderQuery, Query } from 'types/api/queryBuilder/queryBuilderData'; import { EQueryType } from 'types/common/dashboard'; import { DataSource } from 'types/common/queryBuilder'; +import { + dataTypeCategories, + getCategoryName, +} from './RightContainer/dataFormatCategories'; +import { CategoryNames } from './RightContainer/types'; + export const getIsQueryModified = ( currentQuery: Query, stagedQuery: Query | null, @@ -529,3 +537,41 @@ export const PANEL_TYPE_TO_QUERY_TYPES: Record = { EQueryType.PROM, ], }; + +/** + * Retrieves a list of category select options based on the provided category name. + * If the category is found, it maps the formats to an array of objects containing + * the label and value for each format. + */ +export const getCategorySelectOptionByName = ( + name?: CategoryNames | string, +): DefaultOptionType[] => + dataTypeCategories + .find((category) => category.name === name) + ?.formats.map((format) => ({ + label: format.name, + value: format.id, + })) || []; + +/** + * Generates unit options based on the provided column unit. + * It first retrieves the category name associated with the column unit. + * If the category is empty, it maps all supported categories to their respective + * select options. If a valid category is found, it filters the supported categories + * to return only the options for the matched category. + */ +export const unitOptions = (columnUnit: string): DefaultOptionType[] => { + const category = getCategoryName(columnUnit); + if (isEmpty(category)) { + return categoryToSupport.map((category) => ({ + label: category, + options: getCategorySelectOptionByName(category), + })); + } + return categoryToSupport + .filter((supportedCategory) => supportedCategory === category) + .map((filteredCategory) => ({ + label: filteredCategory, + options: getCategorySelectOptionByName(filteredCategory), + })); +}; diff --git a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/Http/md-docs/httpJsonPayload.md b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/Http/md-docs/httpJsonPayload.md index c74221dd7a..254bb815e2 100644 --- a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/Http/md-docs/httpJsonPayload.md +++ b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/Http/md-docs/httpJsonPayload.md @@ -60,7 +60,7 @@ This is a **sample cURL request** which can be used as a template:   ```bash -curl --location 'https://ingest.{{REGION}}.signoz.cloud:443/logs/json/' \ +curl --location 'https://ingest.{{REGION}}.signoz.cloud:443/logs/json' \ --header 'Content-Type: application/json' \ --header 'signoz-access-token: {{SIGNOZ_INGESTION_KEY}}' \ --data '[ diff --git a/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx b/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx index a341d5db55..d74a6f6ea4 100644 --- a/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx +++ b/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx @@ -13,7 +13,7 @@ import { IBuilderQuery, QueryFunctionProps, } from 'types/api/queryBuilder/queryBuilderData'; -import { DataSource } from 'types/common/queryBuilder'; +import { DataSource, QueryFunctionsTypes } from 'types/common/queryBuilder'; interface FunctionProps { query: IBuilderQuery; @@ -33,7 +33,7 @@ export default function Function({ handleDeleteFunction, }: FunctionProps): JSX.Element { const isDarkMode = useIsDarkMode(); - const { showInput } = queryFunctionsTypesConfig[funcData.name]; + const { showInput, disabled } = queryFunctionsTypesConfig[funcData.name]; let functionValue; @@ -57,11 +57,19 @@ export default function Function({ ? logsQueryFunctionOptions : metricQueryFunctionOptions; + const disableRemoveFunction = funcData.name === QueryFunctionsTypes.ANOMALY; + + if (funcData.name === QueryFunctionsTypes.ANOMALY) { + // eslint-disable-next-line react/jsx-no-useless-fragment + return <>; + } + return ( All Integrations - {loading ? ( diff --git a/frontend/src/pages/Integrations/Integrations.styles.scss b/frontend/src/pages/Integrations/Integrations.styles.scss index fe0eaa3c23..738f5153f9 100644 --- a/frontend/src/pages/Integrations/Integrations.styles.scss +++ b/frontend/src/pages/Integrations/Integrations.styles.scss @@ -15,7 +15,6 @@ font-style: normal; line-height: 28px; /* 155.556% */ letter-spacing: -0.09px; - font-family: Inter; font-weight: 500; } @@ -25,7 +24,6 @@ font-style: normal; line-height: 20px; /* 142.857% */ letter-spacing: -0.07px; - font-family: Inter; font-weight: 400; } @@ -129,7 +127,6 @@ .heading { color: var(--bg-vanilla-100); - font-family: Inter; font-size: 14px; font-style: normal; font-weight: 500; @@ -140,7 +137,6 @@ .description { color: var(--bg-vanilla-400); - font-family: Inter; font-size: 12px; font-style: normal; font-weight: 400; @@ -163,7 +159,6 @@ background: var(--bg-ink-200); box-shadow: none; color: var(--bg-vanilla-400); - font-family: Inter; font-size: 12px; font-style: normal; font-weight: 400; diff --git a/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts b/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts index b0855ce524..d215b1025e 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts +++ b/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts @@ -10,7 +10,7 @@ import { DataSource } from 'types/common/queryBuilder'; import { v4 as uuid } from 'uuid'; export const KAFKA_SETUP_DOC_LINK = - 'https://github.com/shivanshuraj1333/kafka-opentelemetry-instrumentation/tree/master'; + 'https://signoz.io/docs/messaging-queues/kafka?utm_source=product&utm_medium=kafka-get-started'; export function convertToTitleCase(text: string): string { return text diff --git a/frontend/src/pages/SignUp/SignUp.tsx b/frontend/src/pages/SignUp/SignUp.tsx index 84329a1626..4917b0fe2d 100644 --- a/frontend/src/pages/SignUp/SignUp.tsx +++ b/frontend/src/pages/SignUp/SignUp.tsx @@ -303,7 +303,6 @@ function SignUp({ version }: SignUpProps): JSX.Element { return ( loading || !values.email || - !values.organizationName || (!precheck.sso && (!values.password || !values.confirmPassword)) || (!isDetailsDisable && !values.firstName) || confirmPasswordError || @@ -354,7 +353,6 @@ function SignUp({ version }: SignUpProps): JSX.Element { diff --git a/frontend/src/periscope/components/Tabs2/Tabs2.styles.scss b/frontend/src/periscope/components/Tabs2/Tabs2.styles.scss index 59b5156cdd..3c3293ebd9 100644 --- a/frontend/src/periscope/components/Tabs2/Tabs2.styles.scss +++ b/frontend/src/periscope/components/Tabs2/Tabs2.styles.scss @@ -11,8 +11,8 @@ gap: 10px; color: var(--text-vanilla-400); background: var(--bg-ink-400); - font-size: 14px; - line-height: 20px; + font-size: 13px; + line-height: 18px; letter-spacing: -0.07px; padding: 6px 24px; border-color: var(--bg-slate-400); diff --git a/frontend/src/periscope/components/Tabs2/Tabs2.tsx b/frontend/src/periscope/components/Tabs2/Tabs2.tsx index 051d80365e..0e2bfd7af1 100644 --- a/frontend/src/periscope/components/Tabs2/Tabs2.tsx +++ b/frontend/src/periscope/components/Tabs2/Tabs2.tsx @@ -1,7 +1,7 @@ import './Tabs2.styles.scss'; import { Color } from '@signozhq/design-tokens'; -import { Button } from 'antd'; +import { Button, Tag } from 'antd'; import { TimelineFilter } from 'container/AlertHistory/types'; import { Undo } from 'lucide-react'; import { useState } from 'react'; @@ -11,6 +11,7 @@ interface Tab { label: string | JSX.Element; disabled?: boolean; icon?: string | JSX.Element; + isBeta?: boolean; } interface TimelineTabsProps { @@ -63,6 +64,12 @@ function Tabs2({ style={{ minWidth: buttonMinWidth }} > {tab.label} + + {tab.isBeta && ( + + Beta + + )} ))} diff --git a/frontend/src/styles.scss b/frontend/src/styles.scss index 7e50e4e38c..130464a980 100644 --- a/frontend/src/styles.scss +++ b/frontend/src/styles.scss @@ -274,3 +274,28 @@ notifications - 2050 url('../public/fonts/GeistMonoVF.woff2') format('woff'); /* Add other formats if needed (e.g., woff2, truetype, opentype, svg) */ } + +@font-face { + font-family: 'Inter'; + src: url('../public/fonts/Inter-VariableFont_opsz,wght.ttf') format('truetype'); + font-weight: 300 700; + font-style: normal; +} +@font-face { + font-family: 'Work Sans'; + src: url('../public/fonts/WorkSans-VariableFont_wght.ttf') format('truetype'); + font-weight: 500; + font-style: normal; +} +@font-face { + font-family: 'Space Mono'; + src: url('../public/fonts/SpaceMono-Regular.ttf') format('truetype'); + font-weight: normal; + font-style: normal; +} +@font-face { + font-family: 'Fira Code'; + src: url('../public/fonts/FiraCode-VariableFont_wght.ttf') format('truetype'); + font-weight: 300 700; + font-style: normal; +} diff --git a/frontend/src/types/api/alerts/alertTypes.ts b/frontend/src/types/api/alerts/alertTypes.ts index 47e43b377f..f99891c901 100644 --- a/frontend/src/types/api/alerts/alertTypes.ts +++ b/frontend/src/types/api/alerts/alertTypes.ts @@ -1,5 +1,6 @@ // this list must exactly match with the backend export enum AlertTypes { + ANOMALY_BASED_ALERT = 'ANOMALY_BASED_ALERT', METRICS_BASED_ALERT = 'METRIC_BASED_ALERT', LOGS_BASED_ALERT = 'LOGS_BASED_ALERT', TRACES_BASED_ALERT = 'TRACES_BASED_ALERT', diff --git a/frontend/src/types/api/alerts/def.ts b/frontend/src/types/api/alerts/def.ts index 2897b6219b..40a6036411 100644 --- a/frontend/src/types/api/alerts/def.ts +++ b/frontend/src/types/api/alerts/def.ts @@ -12,6 +12,10 @@ export const defaultFrequency = '1m0s'; // default compare op: above export const defaultCompareOp = '1'; +export const defaultAlgorithm = 'standard'; + +export const defaultSeasonality = 'hourly'; + export interface AlertDef { id?: number; alertType?: string; @@ -40,6 +44,8 @@ export interface RuleCondition { absentFor?: number | undefined; requireMinPoints?: boolean | undefined; requiredNumPoints?: number | undefined; + algorithm?: string; + seasonality?: string; } export interface Labels { [key: string]: string; diff --git a/frontend/src/types/api/preferences/userOrgPreferences.ts b/frontend/src/types/api/preferences/userOrgPreferences.ts new file mode 100644 index 0000000000..6faa75d5f1 --- /dev/null +++ b/frontend/src/types/api/preferences/userOrgPreferences.ts @@ -0,0 +1,39 @@ +export interface GetOrgPreferenceResponseProps { + status: string; + data: Record; +} + +export interface GetUserPreferenceResponseProps { + status: string; + data: Record; +} + +export interface GetAllOrgPreferencesResponseProps { + status: string; + data: Record; +} + +export interface GetAllUserPreferencesResponseProps { + status: string; + data: Record; +} + +export interface UpdateOrgPreferenceProps { + key: string; + value: unknown; +} + +export interface UpdateUserPreferenceProps { + key: string; + value: unknown; +} + +export interface UpdateOrgPreferenceResponseProps { + status: string; + data: Record; +} + +export interface UpdateUserPreferenceResponseProps { + status: string; + data: Record; +} diff --git a/frontend/src/types/api/queryBuilder/queryBuilderData.ts b/frontend/src/types/api/queryBuilder/queryBuilderData.ts index 832bd09411..d15a41cec9 100644 --- a/frontend/src/types/api/queryBuilder/queryBuilderData.ts +++ b/frontend/src/types/api/queryBuilder/queryBuilderData.ts @@ -49,7 +49,8 @@ export type OrderByPayload = { export interface QueryFunctionProps { name: string; - args: string[]; + args: (string | number)[]; + namedArgs?: Record; } // Type for query builder diff --git a/frontend/src/types/api/widgets/getQuery.ts b/frontend/src/types/api/widgets/getQuery.ts index 52d2f18d80..b7387d5c98 100644 --- a/frontend/src/types/api/widgets/getQuery.ts +++ b/frontend/src/types/api/widgets/getQuery.ts @@ -8,6 +8,10 @@ export interface PayloadProps { export type ListItem = { timestamp: string; data: Omit }; export interface QueryData { + lowerBoundSeries?: [number, string][]; + upperBoundSeries?: [number, string][]; + predictedSeries?: [number, string][]; + anomalyScores?: [number, string][]; metric: { [key: string]: string; }; @@ -34,6 +38,11 @@ export interface QueryDataV3 { quantity?: number; unitPrice?: number; unit?: string; + lowerBoundSeries?: SeriesItem[] | null; + upperBoundSeries?: SeriesItem[] | null; + predictedSeries?: SeriesItem[] | null; + anomalyScores?: SeriesItem[] | null; + isAnomaly?: boolean; } export interface Props { diff --git a/frontend/src/types/common/queryBuilder.ts b/frontend/src/types/common/queryBuilder.ts index fd3b4c0530..0987347fd5 100644 --- a/frontend/src/types/common/queryBuilder.ts +++ b/frontend/src/types/common/queryBuilder.ts @@ -153,6 +153,7 @@ export enum LogsAggregatorOperator { } export enum QueryFunctionsTypes { + ANOMALY = 'anomaly', CUTOFF_MIN = 'cutOffMin', CUTOFF_MAX = 'cutOffMax', CLAMP_MIN = 'clampMin', diff --git a/frontend/src/utils/logs.ts b/frontend/src/utils/logs.ts index 10f6625f31..b230a678e3 100644 --- a/frontend/src/utils/logs.ts +++ b/frontend/src/utils/logs.ts @@ -1,5 +1,6 @@ import { orange } from '@ant-design/colors'; import { Color } from '@signozhq/design-tokens'; +import { LogType } from 'components/Logs/LogStateIndicator/LogStateIndicator'; export const getDefaultLogBackground = ( isReadOnly?: boolean, @@ -17,10 +18,28 @@ export const getDefaultLogBackground = ( export const getActiveLogBackground = ( isActiveLog = true, isDarkMode = true, + logType?: string, ): string => { if (!isActiveLog) return ``; - if (isDarkMode) return `background-color: ${Color.BG_SLATE_200};`; - return `background-color: ${Color.BG_VANILLA_300}; color: ${Color.TEXT_SLATE_400}`; + if (isDarkMode) { + switch (logType) { + case LogType.INFO: + return `background-color: ${Color.BG_ROBIN_500}10 !important;`; + case LogType.WARN: + return `background-color: ${Color.BG_AMBER_500}10 !important;`; + case LogType.ERROR: + return `background-color: ${Color.BG_CHERRY_500}10 !important;`; + case LogType.TRACE: + return `background-color: ${Color.BG_FOREST_400}10 !important;`; + case LogType.DEBUG: + return `background-color: ${Color.BG_AQUA_500}10 !important;`; + case LogType.FATAL: + return `background-color: ${Color.BG_SAKURA_500}10 !important;`; + default: + return `background-color: ${Color.BG_SLATE_200} !important;`; + } + } + return `background-color: ${Color.BG_VANILLA_300}!important; color: ${Color.TEXT_SLATE_400} !important;`; }; export const getHightLightedLogBackground = ( diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 1c501a08d3..4af84c5df0 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -6091,10 +6091,10 @@ bl@^4.1.0: inherits "^2.0.4" readable-stream "^3.4.0" -body-parser@1.20.2: - version "1.20.2" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd" - integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA== +body-parser@1.20.2, body-parser@1.20.3: + version "1.20.3" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.3.tgz#1953431221c6fb5cd63c4b36d53fab0928e548c6" + integrity sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g== dependencies: bytes "3.1.2" content-type "~1.0.5" @@ -6104,7 +6104,7 @@ body-parser@1.20.2: http-errors "2.0.0" iconv-lite "0.4.24" on-finished "2.4.1" - qs "6.11.0" + qs "6.13.0" raw-body "2.5.2" type-is "~1.6.18" unpipe "1.0.0" @@ -6139,7 +6139,7 @@ brace-expansion@^2.0.1: dependencies: balanced-match "^1.0.0" -braces@^3.0.2, braces@~3.0.2: +braces@^3.0.3, braces@~3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== @@ -6268,6 +6268,17 @@ call-bind@^1.0.0, call-bind@^1.0.2: function-bind "^1.1.1" get-intrinsic "^1.0.2" +call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" + callsites@^3.0.0: version "3.1.0" resolved "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz" @@ -7547,6 +7558,15 @@ defaults@^1.0.3: dependencies: clone "^1.0.2" +define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + gopd "^1.0.1" + define-lazy-prop@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz" @@ -7737,15 +7757,15 @@ domhandler@^5.0.2, domhandler@^5.0.3: dependencies: domelementtype "^2.3.0" -dompurify@3.0.0: - version "3.0.0" - resolved "https://registry.npmjs.org/dompurify/-/dompurify-3.0.0.tgz" - integrity sha512-0g/yr2IJn4nTbxwL785YxS7/AvvgGFJw6LLWP+BzWzB1+BYOqPUT9Hy0rXrZh5HLdHnxH72aDdzvC9SdTjsuaA== +dompurify@3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.1.3.tgz#cfe3ce4232c216d923832f68f2aa18b2fb9bd223" + integrity sha512-5sOWYSNPaxz6o2MUPvtyxTTqR4D3L77pr5rUQoWgD5ROQtVIZQgJkXbo1DLlK3vj11YGw5+LnF4SYti4gZmwng== dompurify@^2.4.3: - version "2.4.7" - resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.4.7.tgz#277adeb40a2c84be2d42a8bcd45f582bfa4d0cfc" - integrity sha512-kxxKlPEDa6Nc5WJi+qRgPbOAbgTpSULL+vI3NUXsZMlkJxTqYI9wg5ZTay2sFrdZRWHPWNi+EdAhcJf81WtoMQ== + version "2.5.7" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.5.7.tgz#6e0d36b9177db5a99f18ade1f28579db5ab839d7" + integrity sha512-2q4bEI+coQM8f5ez7kt2xclg1XsecaV9ASJk/54vwlfRRNQfDqJz2pzQ8t0Ix/ToBpXlVjrRIx7pFC/o8itG2Q== domutils@^2.5.2, domutils@^2.8.0: version "2.8.0" @@ -8014,6 +8034,18 @@ es-abstract@^1.22.1: unbox-primitive "^1.0.2" which-typed-array "^1.1.10" +es-define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + es-get-iterator@^1.1.3: version "1.1.3" resolved "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz" @@ -8961,6 +8993,17 @@ get-intrinsic@^1.2.1: has-proto "^1.0.1" has-symbols "^1.0.3" +get-intrinsic@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" + integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + hasown "^2.0.0" + get-package-type@^0.1.0: version "0.1.0" resolved "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz" @@ -9136,6 +9179,13 @@ has-property-descriptors@^1.0.0: dependencies: get-intrinsic "^1.1.1" +has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== + dependencies: + es-define-property "^1.0.0" + has-proto@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz" @@ -12073,11 +12123,11 @@ micromark@^3.0.0: uvu "^0.5.0" micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: - version "4.0.5" - resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" microseconds@0.2.0: @@ -12507,6 +12557,11 @@ object-inspect@^1.12.2, object-inspect@^1.12.3, object-inspect@^1.9.0: resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz" integrity sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g== +object-inspect@^1.13.1: + version "1.13.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" + integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== + object-is@^1.1.5: version "1.1.5" resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.1.5.tgz#b9deeaa5fc7f1846a0faecdceec138e5778f53ac" @@ -13551,6 +13606,13 @@ qs@6.11.0: dependencies: side-channel "^1.0.4" +qs@6.13.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" + integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== + dependencies: + side-channel "^1.0.6" + quad-indices@^2.0.1: version "2.0.1" resolved "https://registry.npmjs.org/quad-indices/-/quad-indices-2.0.1.tgz" @@ -15104,6 +15166,18 @@ set-cookie-parser@^2.4.6: resolved "https://registry.yarnpkg.com/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz#131921e50f62ff1a66a461d7d62d7b21d5d15a51" integrity sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ== +set-function-length@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + set-harmonic-interval@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/set-harmonic-interval/-/set-harmonic-interval-1.0.1.tgz" @@ -15174,6 +15248,16 @@ side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" +side-channel@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + object-inspect "^1.13.1" + signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" diff --git a/go.mod b/go.mod index 04ec7d710a..e211f2692c 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,13 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.25.0 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd - github.com/SigNoz/signoz-otel-collector v0.102.12 + github.com/SigNoz/signoz-otel-collector v0.111.5 github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 github.com/auth0/go-jwt-middleware v1.0.1 github.com/cespare/xxhash v1.1.0 - github.com/coreos/go-oidc/v3 v3.10.0 + github.com/coreos/go-oidc/v3 v3.11.0 github.com/dustin/go-humanize v1.0.1 github.com/go-co-op/gocron v1.30.1 github.com/go-kit/log v0.2.1 @@ -31,17 +31,16 @@ require ( github.com/knadh/koanf v1.5.0 github.com/mailru/easyjson v0.7.7 github.com/mattn/go-sqlite3 v2.0.3+incompatible - github.com/minio/minio-go/v6 v6.0.57 github.com/oklog/oklog v0.3.2 github.com/open-telemetry/opamp-go v0.5.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.102.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.111.0 github.com/opentracing/opentracing-go v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/common v0.59.1 + github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v2.5.0+incompatible - github.com/rs/cors v1.11.0 + github.com/rs/cors v1.11.1 github.com/russellhaering/gosaml2 v0.9.0 github.com/russellhaering/goxmldsig v1.2.0 github.com/samber/lo v1.38.1 @@ -50,45 +49,44 @@ require ( github.com/soheilhy/cmux v0.1.5 github.com/srikanthccv/ClickHouse-go-mock v0.9.0 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/component v0.103.0 - go.opentelemetry.io/collector/confmap v0.103.0 - go.opentelemetry.io/collector/confmap/converter/expandconverter v0.103.0 - go.opentelemetry.io/collector/confmap/provider/fileprovider v0.103.0 - go.opentelemetry.io/collector/connector v0.103.0 - go.opentelemetry.io/collector/consumer v0.103.0 - go.opentelemetry.io/collector/exporter v0.103.0 - go.opentelemetry.io/collector/extension v0.103.0 - go.opentelemetry.io/collector/otelcol v0.103.0 - go.opentelemetry.io/collector/pdata v1.14.1 - go.opentelemetry.io/collector/processor v0.103.0 - go.opentelemetry.io/collector/receiver v0.103.0 - go.opentelemetry.io/collector/service v0.103.0 + go.opentelemetry.io/collector/component v0.111.0 + go.opentelemetry.io/collector/confmap v1.17.0 + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 + go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer/consumertest v0.111.0 + go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/otelcol v0.111.0 + go.opentelemetry.io/collector/pdata v1.17.0 + go.opentelemetry.io/collector/processor v0.111.0 + go.opentelemetry.io/collector/receiver v0.111.0 + go.opentelemetry.io/collector/service v0.111.0 go.opentelemetry.io/contrib/bridges/otelzap v0.0.0-20240820072021-3fab5f5f20fb - go.opentelemetry.io/contrib/config v0.8.0 - go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/log v0.4.0 - go.opentelemetry.io/otel/metric v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 - go.opentelemetry.io/otel/trace v1.29.0 + go.opentelemetry.io/contrib/config v0.10.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/log v0.6.0 + go.opentelemetry.io/otel/metric v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.26.0 + golang.org/x/crypto v0.27.0 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 - golang.org/x/net v0.28.0 + golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.23.0 golang.org/x/text v0.18.0 - google.golang.org/grpc v1.66.0 + google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.34.2 gopkg.in/segmentio/analytics-go.v3 v3.1.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/apimachinery v0.31.0 + k8s.io/apimachinery v0.31.1 ) require ( - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -105,23 +103,26 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/ebitengine/purego v0.8.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/elastic/lunes v0.1.0 // indirect github.com/expr-lang/expr v1.16.9 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect - github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/go-jose/go-jose/v4 v4.0.2 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect @@ -131,22 +132,19 @@ require ( github.com/hashicorp/go-version v1.7.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/klauspost/cpuid v1.2.3 // indirect + github.com/klauspost/compress v1.17.10 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leodido/go-syslog/v4 v4.1.0 // indirect + github.com/leodido/go-syslog/v4 v4.2.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 // indirect + github.com/magefile/mage v1.15.0 // indirect github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect - github.com/minio/md5-simd v1.1.0 // indirect - github.com/minio/sha256-simd v0.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -154,7 +152,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/oklog/run v1.1.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect github.com/paulmach/orb v0.11.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect @@ -167,52 +165,62 @@ require ( github.com/robfig/cron/v3 v3.0.1 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/backo-go v1.0.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shirou/gopsutil/v4 v4.24.9 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/smarty/assertions v1.15.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.13 // indirect + github.com/tklauser/numcpus v0.7.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vjeantet/grok v1.0.1 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.103.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/envprovider v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/httpprovider v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0 // indirect - go.opentelemetry.io/collector/featuregate v1.13.0 // indirect - go.opentelemetry.io/collector/semconv v0.108.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.4.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect + go.opentelemetry.io/collector v0.111.0 // indirect + go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect + go.opentelemetry.io/collector/connector v0.111.0 // indirect + go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/extension v0.111.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect + go.opentelemetry.io/collector/featuregate v1.17.0 // indirect + go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect + go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline v0.111.0 // indirect + go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/semconv v0.111.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/time v0.6.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.195.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + gonum.org/v1/gonum v0.15.1 // indirect + google.golang.org/api v0.199.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - k8s.io/client-go v0.31.0 // indirect + k8s.io/client-go v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect ) diff --git a/go.sum b/go.sum index 0b935bba50..a09bb6b322 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -62,16 +62,16 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkbj57eGXx8H3ZJ4zhmQXBnrW523ktj8= github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc= github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE= github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA= -github.com/SigNoz/signoz-otel-collector v0.102.12 h1:5yY0IBtNz6SHMzKzwHmKfIx99Ij8mr72nDI2Xi08pDQ= -github.com/SigNoz/signoz-otel-collector v0.102.12/go.mod h1:tcNyU+NSn7ZkzZcLa+k+dJIPOPV+CjHn3+z1SICAfdA= +github.com/SigNoz/signoz-otel-collector v0.111.5 h1:kLpJSv9U46doA+89nfUvTLcNb6WbIxiMAtNlTNL88ZE= +github.com/SigNoz/signoz-otel-collector v0.111.5/go.mod h1:/nyVFDiEz/QBfyqekB3zRwstZ/KSIB85qgV9NnzAtig= github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc= github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo= github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY= @@ -137,13 +137,13 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= -github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -155,8 +155,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -166,8 +166,12 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE= +github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4= +github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -210,8 +214,8 @@ github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7F github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= -github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= +github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -228,8 +232,9 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= @@ -248,10 +253,12 @@ github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqw github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= -github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -292,8 +299,8 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -311,7 +318,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -442,14 +448,14 @@ github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -464,10 +470,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= -github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= @@ -489,8 +493,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-syslog/v4 v4.1.0 h1:Wsl194qyWXr7V6DrGWC3xmxA9Ra6XgWO+toNt2fmCaI= -github.com/leodido/go-syslog/v4 v4.1.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98= +github.com/leodido/go-syslog/v4 v4.2.0 h1:A7vpbYxsO4e2E8udaurkLlxP5LDpDbmPMsGnuhb7jVk= +github.com/leodido/go-syslog/v4 v4.2.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -498,8 +502,10 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 h1:1KuuSOy4ZNgW0KA2oYIngXVFhQcXxhLqCVK7cBcldkk= +github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= @@ -524,12 +530,6 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= -github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= -github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v6 v6.0.57 h1:ixPkbKkyD7IhnluRgQpGSpHdpvNVaW6OD5R9IAO/9Tw= -github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -587,20 +587,20 @@ github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/open-telemetry/opamp-go v0.5.0 h1:2YFbb6G4qBkq3yTRdVb5Nfz9hKHW/ldUyex352e1J7g= github.com/open-telemetry/opamp-go v0.5.0/go.mod h1:IMdeuHGVc5CjKSu5/oNV0o+UmiXuahoHvoZ4GOmAI9M= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.102.0 h1:7QHxeMnKzMXMw9oh5lnOHakfPpGSglxiZfbYUn6l6yc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.102.0/go.mod h1:BtKaHa1yDHfhM9qjGUHweb0HgqFGxFSM7AMzwLXVR98= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 h1:PNLVcz8kJLE9V5kGnbBh277Bvl4WwiVZ+NbFbOB80WY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0/go.mod h1:cBbjwd8m4rBVgCQksUbAVQX1EoM5IuCyNQw2mzvibEM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0 h1:vJL6lDaeI3pVA7ADnWKD3HMpI80BSrZ2UnGc+qkwqoY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0/go.mod h1:xtE7tds5j8PtI/wMuGb+Em5K9rJH8hm6t28Qe4QrpoU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 h1:TvJYcU/DLRFCgHr7nT98k5D+qkZ4syKVxc8OJjv+K4c= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0/go.mod h1:WzD3Ox7tywAQHknxAFpAC1oZJGItMp5mbvgUGjvzNY8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0 h1:J8GFYxKLWG1360XRukc1tY5K9BF80MFXcO91UpCMgcQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0/go.mod h1:GNxigQNap2jyOEPdOedAKqCbh61y576ND4BKn/7i8xY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.102.0 h1:XOoV42CE0BJUsKJQ7+Fie2jusw0MBzzOc79IoQONJAk= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.102.0/go.mod h1:nCpPHY7XLM+zbJxKxP132IuV0xHCu5E6oa3ZLpmBPl4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0 h1:n1p2DedLvPEN1XEx26s1PR1PCuXTgCY4Eo+kDTq7q0s= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0/go.mod h1:PdhkBzDs6Qvcv3lxNQw0ztrGaa1foNBODrF2v09zlyA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 h1:QhEwQTGTXitMPbmyloNfLVz1r9YzZ8izJUJivI8obzs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0/go.mod h1:I7nEkR7TDPFw162jYtPJZVevkniQfQ0FLIFuu2RGK3A= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 h1:Hh3Lt6GIw/jMfCSJ5XjBoZRmjZ1pbJJu6Xi7WrDTUi0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0/go.mod h1:rQ9lQhijXIJIT5UGuwiKoEcWW6bdWJ4fnO+PndfuYEw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 h1:Ld/1EUAQ6z3CirSyf4A8waHzUAZbMPrDOno+7tb0vKM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0/go.mod h1:wAOT1iGOOTPTw2ysr0DW2Wrfi0/TECVgiGByRQfFiV4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 h1:kUUO8VNv/d9Tpx0NvOsRnUsz/JvZ8SWRnK+vT0cNjuU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0/go.mod h1:SstR8PglIFBVGCZHS69bwJGl6TaCQQ5aLSEoas/8SRA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 h1:TnAhTFTwmJzFq6vVcf57lnRzAp+rNx5tEyrMudtDGsc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0/go.mod h1:l0CUp7vTH+Wv0tF5PYaHpPn1dLiVuMRAMqbBgXFpz54= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.111.0 h1:60NMfD7WMOHKCkV+GVM8HRqWMB4EAbqEY5sF9gYUG1Y= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.111.0/go.mod h1:/qECmbWAqic6qoYp3oBmAFRpnKbJdGuk9iDdMhwHYfw= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -655,8 +655,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -675,8 +675,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russellhaering/gosaml2 v0.9.0 h1:CNMnH42z/GirrKjdmNrSS6bAAs47F9bPdl4PfRmVOIk= github.com/russellhaering/gosaml2 v0.9.0/go.mod h1:byViER/1YPUa0Puj9ROZblpoq2jsE7h/CJmitzX0geU= github.com/russellhaering/goxmldsig v1.2.0 h1:Y6GTTc9Un5hCxSzVz4UIWQ/zuVwDvzJk80guqzwx6Vg= @@ -696,17 +696,12 @@ github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N+ github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI= +github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -714,7 +709,6 @@ github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGB github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= @@ -723,8 +717,8 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/srikanthccv/ClickHouse-go-mock v0.9.0 h1:XKr1Tb7GL1HlifKH874QGR3R6l0e6takXasROUiZawU= @@ -749,10 +743,10 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= +github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= +github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= +github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= @@ -787,110 +781,134 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.103.0 h1:mssWo1y31p1F/SRsSBnVUX6YocgawCqM1blpE+hkWog= -go.opentelemetry.io/collector v0.103.0/go.mod h1:mgqdTFB7QCYiOeEdJSSEktovPqy+2fw4oTKJzyeSB0U= -go.opentelemetry.io/collector/component v0.103.0 h1:j52YAsp8EmqYUotVUwhovkqFZGuxArEkk65V4TI46NE= -go.opentelemetry.io/collector/component v0.103.0/go.mod h1:jKs19tGtCO8Hr5/YM0F+PoFcl8SVe/p4Ge30R6srkbc= -go.opentelemetry.io/collector/config/configauth v0.103.0 h1:tv2Ilj0X9T8ZsDd4mB8Sl+nXQ8CG8MJVQ1Lo4mmE0Pk= -go.opentelemetry.io/collector/config/configauth v0.103.0/go.mod h1:VIo8DpFeyOOCMUVoQsBdq3t2snUiBBECP0UxW1bwz/o= -go.opentelemetry.io/collector/config/configcompression v1.10.0 h1:ClkAY1rzaxFawmC53BUf3TjTWKOGx+2xnpqOJIkg6Tk= -go.opentelemetry.io/collector/config/configcompression v1.10.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/confighttp v0.103.0 h1:tgCWMKuIorSr4+iQOv0A8Ya/8do73hiG5KHinWaz63Q= -go.opentelemetry.io/collector/config/confighttp v0.103.0/go.mod h1:xMXoLsTGTJlftu+VAL3iadEs4gkmqFrvuPPnpNi6ETo= -go.opentelemetry.io/collector/config/configopaque v1.10.0 h1:FAxj6ggLpJE/kFnR1ezYwjRdo6gHo2+CjlIsHVCFVnQ= -go.opentelemetry.io/collector/config/configopaque v1.10.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configtelemetry v0.103.0 h1:KLbhkFqdw9D31t0IhJ/rnhMRvz/s14eie0fKfm5xWns= -go.opentelemetry.io/collector/config/configtelemetry v0.103.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.103.0 h1:nbk8sJIHoYYQbpZtUkUQceTbjC4wEjoePKJ15v8cCcU= -go.opentelemetry.io/collector/config/configtls v0.103.0/go.mod h1:046dfdfHW8wWCMhzUaWJo7guRiCoSz5QzVjCSDzymdU= -go.opentelemetry.io/collector/config/internal v0.103.0 h1:pimS3uLHfOBbConZrviGoTwu+bkTNDoQBtbeWCg8U8k= -go.opentelemetry.io/collector/config/internal v0.103.0/go.mod h1:kJRkB+PgamWqPi/GWbYWvnRzVzS1rwDUh6+VSz4C7NQ= -go.opentelemetry.io/collector/confmap v0.103.0 h1:qKKZyWzropSKfgtGv12JzADOXNgThqH1Vx6qzblBE24= -go.opentelemetry.io/collector/confmap v0.103.0/go.mod h1:TlOmqe/Km3K6WgxyhEAdCb/V1Yp6eSU76fCoiluEa88= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.103.0 h1:zApcKLSosuu9I/4IRHTqlE1H6XNiZNAgd26YbzHwkto= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.103.0/go.mod h1:hoel+3CPjRhPSHzCrE1E+wCyoSLHlgW7662Ntwx2ujM= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.103.0 h1:0XHQ/ffxSUx3sMbnYSf8a4jnVYLUrxo+/XwdhXkizgs= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.103.0/go.mod h1:NiE4Fe42Sew1TyXuU1YEd0xZBDNI+w6IRkC2OTlJUak= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.103.0 h1:5dB2G7d6RKmWS8ptuAWvAEKGYODk2DTRm84bU9HooLQ= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.103.0/go.mod h1:GT/GBk17lDhc27762w6PNHvKYbA+TnHvNEyQHUsjKpY= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.103.0 h1:Hrp+nw4W9/jeJfi3GfJW6EYh7DeNkaC1wojOh4x8CbI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.103.0/go.mod h1:kUst0pGVBlKDSlvJYDclrsApbkMv7ahRDh6/pE4LsBc= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0 h1:JUDRYsMOhkIBxZqZli0BU+64zahIUgnEPZSe9wo2T0Q= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0/go.mod h1:+mUrWjpdGIdSKMeeplLO+qXFSBc287as2oIPVdKMTxc= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0 h1:boTv+ZRkn1h5eUbt5sLSU5lCrItCCxCen/laRmsHLyg= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0/go.mod h1:0pZ7RD7SPg+yklgGPN+74Zzbps4R9x5bRPZX1D1gtGM= -go.opentelemetry.io/collector/connector v0.103.0 h1:jwmrgCT6ftz3U4o8mAqP+/yaQ5KsLMFXo2+OHXhy+tE= -go.opentelemetry.io/collector/connector v0.103.0/go.mod h1:6RDaeDMiXTKEXSy1eIaO0EiM+/91NVHdBxOc9e2++2A= -go.opentelemetry.io/collector/consumer v0.103.0 h1:L/7SA/U2ua5L4yTLChnI9I+IFGKYU5ufNQ76QKYcPYs= -go.opentelemetry.io/collector/consumer v0.103.0/go.mod h1:7jdYb9kSSOsu2R618VRX0VJ+Jt3OrDvvUsDToHTEOLI= -go.opentelemetry.io/collector/exporter v0.103.0 h1:g0nF/FAwuA7tTJf5uo1PvlQl7xFqCgvfH+FYqufBSiw= -go.opentelemetry.io/collector/exporter v0.103.0/go.mod h1:PC2OvciPEew2kaA/ZMyxRqfeOW8Wgi0CYR614PEyS/w= -go.opentelemetry.io/collector/extension v0.103.0 h1:vTsd+GElvT7qKk9Y9d6UKuuT2Ngx0mai8Q48hkKQMwM= -go.opentelemetry.io/collector/extension v0.103.0/go.mod h1:rp2l3xskNKWv0yBCyU69Pv34TnP1QVD1ijr0zSndnsM= -go.opentelemetry.io/collector/extension/auth v0.103.0 h1:i7cQl+Ewpve/DIN4rFMg1GiyUPE14LZsYWrJ1RqtP84= -go.opentelemetry.io/collector/extension/auth v0.103.0/go.mod h1:JdYBS/EkPAz2APAi8g7xTiSRlZTc7c4H82AQM9epzxw= -go.opentelemetry.io/collector/extension/zpagesextension v0.103.0 h1:jgSEQY++zOI6hFQygwuvS6ulJ/Yu4xXgUg+Ijoxx51I= -go.opentelemetry.io/collector/extension/zpagesextension v0.103.0/go.mod h1:2OUi0Hp+3zPUJmi7goJ6d1/kGgFAw3SDESRX7xQ0QHE= -go.opentelemetry.io/collector/featuregate v1.13.0 h1:rc84eCf5hesXQ8/bP6Zc15wqthbomfLBHmox5tT7AwM= -go.opentelemetry.io/collector/featuregate v1.13.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.103.0 h1:Skqnc2mxDdk3eiYioUuG7ST6ur5k83SOv7mIBt60fBw= -go.opentelemetry.io/collector/otelcol v0.103.0/go.mod h1:iJF3ghCv+nRZI6+hI7z3kGRZrgH///Fd9tNXY82X90g= -go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= -go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= -go.opentelemetry.io/collector/pdata/testdata v0.103.0 h1:iI6NOE0L2je/bxlWzAWHQ/yCtnGupgv42Hl9Al1q/g4= -go.opentelemetry.io/collector/pdata/testdata v0.103.0/go.mod h1:tLzRhb/h37/9wFRQVr+CxjKi5qmhSRpCAiOlhwRkeEk= -go.opentelemetry.io/collector/processor v0.103.0 h1:YZ+LRuHKtOam7SCeLkJAP6bS1d6XxeYP22OyMN3VP0s= -go.opentelemetry.io/collector/processor v0.103.0/go.mod h1:/mxyh0NpJgpZycm7iHDpM7i5PdtWvKKdCZf0cyADJfU= -go.opentelemetry.io/collector/receiver v0.103.0 h1:V3JBKkX+7e/NYpDDZVyeu2VQB1/lLFuoJFPfupdCcZs= -go.opentelemetry.io/collector/receiver v0.103.0/go.mod h1:Yybv4ynKFdMOYViWWPMmjkugR89FSQN0P37wP6mX6qM= -go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= -go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/collector/service v0.103.0 h1:e4Eri4jo+YOuEK0+/JE9SUdT/NZaJ2jz/ROJlmLn96s= -go.opentelemetry.io/collector/service v0.103.0/go.mod h1:p1mlniiC1MuPN5FANYJYgf5V5CGFP0hNqWfI8t7Aw8M= +go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo= +go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE= +go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms= +go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys= +go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ= +go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE= +go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM= +go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo= +go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM= +go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY= +go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw= +go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8= +go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU= +go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU= +go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc= +go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684= +go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g= +go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4= +go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00= +go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0= +go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY= +go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc= +go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q= +go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc= +go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8= +go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc= +go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM= +go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0 h1:FlrfejpK6J+OytGuYEElrVZGjP4D3mTQUcqe/tkIMZQ= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0/go.mod h1:7wnSpMS3KE6wBUG8OhQELPBJod5gV6SgSbJEEpBwlR0= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 h1:UyMO2ddtO7GKuFjrkR51IxmeBuRJrb1KKatu60oosxI= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg= +go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro= +go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg= +go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU= +go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38= +go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI= +go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY= +go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U= +go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4= +go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU= +go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw= +go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8= +go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI= +go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk= +go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY= +go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg= +go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI= +go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw= +go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY= +go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws= +go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs= +go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc= +go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo= +go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc= +go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8= +go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk= +go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis= +go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w= +go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ= +go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58= +go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8= +go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s= +go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s= +go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls= +go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ= +go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g= +go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo= +go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM= +go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ= +go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s= +go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM= +go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw= +go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms= +go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY= go.opentelemetry.io/contrib/bridges/otelzap v0.0.0-20240820072021-3fab5f5f20fb h1:ZqncifxU0B1q64FRbhKxsJugRsrEToakmYUsgQ5tGbY= go.opentelemetry.io/contrib/bridges/otelzap v0.0.0-20240820072021-3fab5f5f20fb/go.mod h1:mzv0k5dTnSUE5/ZerXUwGiNKzcPJTakuCh6Wm1emNvU= -go.opentelemetry.io/contrib/config v0.8.0 h1:OD7aDMhL+2EpzdSHfkDmcdD/uUA+PgKM5faFyF9XFT0= -go.opentelemetry.io/contrib/config v0.8.0/go.mod h1:dGeVZWE//3wrxYHHP0iCBYJU1QmOmPcbV+FNB7pjDYI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 h1:zBPZAISA9NOc5cE8zydqDiS0itvg/P/0Hn9m72a5gvM= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0/go.mod h1:gcj2fFjEsqpV3fXuzAA+0Ze1p2/4MJ4T7d77AmkvueQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/log v0.4.0 h1:/vZ+3Utqh18e8TPjuc3ecg284078KWrR8BRz+PQAj3o= -go.opentelemetry.io/otel/log v0.4.0/go.mod h1:DhGnQvky7pHy82MIRV43iXh3FlKN8UUKftn0KbLOq6I= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/sdk/log v0.4.0 h1:1mMI22L82zLqf6KtkjrRy5BbagOTWdJsqMY/HSqILAA= -go.opentelemetry.io/otel/sdk/log v0.4.0/go.mod h1:AYJ9FVF0hNOgAVzUG/ybg/QttnXhUePWAupmCqtdESo= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= +go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY= +go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho= +go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY= +go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ= +go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= +go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8= +go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI= +go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE= +go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= +go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -908,14 +926,13 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -962,7 +979,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -993,8 +1009,8 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1077,13 +1093,11 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1152,8 +1166,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1170,8 +1184,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1210,8 +1224,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1231,8 +1245,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1259,7 +1273,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/segmentio/analytics-go.v3 v3.1.0 h1:UzxH1uaGZRpMKDhJyBz0pexz6yUoBU3x8bJsRk/HV6U= @@ -1287,12 +1300,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= diff --git a/pkg/query-service/app/clickhouseReader/filter_suggestions.go b/pkg/query-service/app/clickhouseReader/filter_suggestions.go index fe78fc5555..4de924ddc3 100644 --- a/pkg/query-service/app/clickhouseReader/filter_suggestions.go +++ b/pkg/query-service/app/clickhouseReader/filter_suggestions.go @@ -8,7 +8,7 @@ import ( "slices" "strings" - "github.com/SigNoz/signoz-otel-collector/exporter/clickhouselogsexporter/logsv2" + "github.com/SigNoz/signoz-otel-collector/utils/fingerprint" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.uber.org/zap" @@ -260,7 +260,7 @@ func newRankingStrategy() attribRankingStrategy { } // Synonyms of interesting attributes should come next - resourceHierarchy := logsv2.ResourceHierarchy() + resourceHierarchy := fingerprint.ResourceHierarchy() for _, attr := range []string{ "service.name", "deployment.environment", diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 18078c0931..a6811dc2eb 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -3560,7 +3560,7 @@ func (r *ClickHouseReader) AggregateLogs(ctx context.Context, params *model.Logs } func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string) (*model.DashboardVar, error) { - var result model.DashboardVar + var result = model.DashboardVar{VariableValues: make([]interface{}, 0)} rows, err := r.db.Query(ctx, query) zap.L().Info(query) @@ -3595,7 +3595,11 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string) return &result, nil } -func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error) { +func (r *ClickHouseReader) GetMetricAggregateAttributes( + ctx context.Context, + req *v3.AggregateAttributeRequest, + skipDotNames bool, +) (*v3.AggregateAttributeResponse, error) { var query string var err error @@ -3622,6 +3626,10 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req if err := rows.Scan(&metricName, &typ, &isMonotonic, &temporality); err != nil { return nil, fmt.Errorf("error while scanning rows: %s", err.Error()) } + if skipDotNames && strings.Contains(metricName, ".") { + continue + } + // Non-monotonic cumulative sums are treated as gauges if typ == "Sum" && !isMonotonic && temporality == string(v3.Cumulative) { typ = "Gauge" diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 7078566eca..9ce110d545 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -27,6 +27,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/agentConf" "go.signoz.io/signoz/pkg/query-service/app/dashboards" "go.signoz.io/signoz/pkg/query-service/app/explorer" + "go.signoz.io/signoz/pkg/query-service/app/inframetrics" "go.signoz.io/signoz/pkg/query-service/app/integrations" "go.signoz.io/signoz/pkg/query-service/app/logs" logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" @@ -110,6 +111,13 @@ type APIHandler struct { Upgrader *websocket.Upgrader UseLogsNewSchema bool + + hostsRepo *inframetrics.HostsRepo + processesRepo *inframetrics.ProcessesRepo + podsRepo *inframetrics.PodsRepo + nodesRepo *inframetrics.NodesRepo + namespacesRepo *inframetrics.NamespacesRepo + clustersRepo *inframetrics.ClustersRepo } type APIHandlerOpts struct { @@ -179,6 +187,13 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { querier := querier.NewQuerier(querierOpts) querierv2 := querierV2.NewQuerier(querierOptsV2) + hostsRepo := inframetrics.NewHostsRepo(opts.Reader, querierv2) + processesRepo := inframetrics.NewProcessesRepo(opts.Reader, querierv2) + podsRepo := inframetrics.NewPodsRepo(opts.Reader, querierv2) + nodesRepo := inframetrics.NewNodesRepo(opts.Reader, querierv2) + namespacesRepo := inframetrics.NewNamespacesRepo(opts.Reader, querierv2) + clustersRepo := inframetrics.NewClustersRepo(opts.Reader, querierv2) + aH := &APIHandler{ reader: opts.Reader, appDao: opts.AppDao, @@ -196,6 +211,12 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { querier: querier, querierV2: querierv2, UseLogsNewSchema: opts.UseLogsNewSchema, + hostsRepo: hostsRepo, + processesRepo: processesRepo, + podsRepo: podsRepo, + nodesRepo: nodesRepo, + namespacesRepo: namespacesRepo, + clustersRepo: clustersRepo, } logsQueryBuilder := logsv3.PrepareLogsQuery @@ -344,6 +365,38 @@ func (aH *APIHandler) RegisterQueryRangeV3Routes(router *mux.Router, am *AuthMid subRouter.HandleFunc("/logs/livetail", am.ViewAccess(aH.liveTailLogs)).Methods(http.MethodGet) } +func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMiddleware) { + hostsSubRouter := router.PathPrefix("/api/v1/hosts").Subrouter() + hostsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getHostAttributeKeys)).Methods(http.MethodGet) + hostsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getHostAttributeValues)).Methods(http.MethodGet) + hostsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getHostList)).Methods(http.MethodPost) + + processesSubRouter := router.PathPrefix("/api/v1/processes").Subrouter() + processesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getProcessAttributeKeys)).Methods(http.MethodGet) + processesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getProcessAttributeValues)).Methods(http.MethodGet) + processesSubRouter.HandleFunc("/list", am.ViewAccess(aH.getProcessList)).Methods(http.MethodPost) + + podsSubRouter := router.PathPrefix("/api/v1/pods").Subrouter() + podsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getPodAttributeKeys)).Methods(http.MethodGet) + podsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getPodAttributeValues)).Methods(http.MethodGet) + podsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getPodList)).Methods(http.MethodPost) + + nodesSubRouter := router.PathPrefix("/api/v1/nodes").Subrouter() + nodesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getNodeAttributeKeys)).Methods(http.MethodGet) + nodesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getNodeAttributeValues)).Methods(http.MethodGet) + nodesSubRouter.HandleFunc("/list", am.ViewAccess(aH.getNodeList)).Methods(http.MethodPost) + + namespacesSubRouter := router.PathPrefix("/api/v1/namespaces").Subrouter() + namespacesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getNamespaceAttributeKeys)).Methods(http.MethodGet) + namespacesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getNamespaceAttributeValues)).Methods(http.MethodGet) + namespacesSubRouter.HandleFunc("/list", am.ViewAccess(aH.getNamespaceList)).Methods(http.MethodPost) + + clustersSubRouter := router.PathPrefix("/api/v1/clusters").Subrouter() + clustersSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getClusterAttributeKeys)).Methods(http.MethodGet) + clustersSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getClusterAttributeValues)).Methods(http.MethodGet) + clustersSubRouter.HandleFunc("/list", am.ViewAccess(aH.getClusterList)).Methods(http.MethodPost) +} + func (aH *APIHandler) RegisterWebSocketPaths(router *mux.Router, am *AuthMiddleware) { subRouter := router.PathPrefix("/ws").Subrouter() subRouter.HandleFunc("/query_progress", am.ViewAccess(aH.GetQueryProgressUpdates)).Methods(http.MethodGet) @@ -390,11 +443,11 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) { router.HandleFunc("/api/v1/rules/{id}/history/top_contributors", am.ViewAccess(aH.getRuleStateHistoryTopContributors)).Methods(http.MethodPost) router.HandleFunc("/api/v1/rules/{id}/history/overall_status", am.ViewAccess(aH.getOverallStateTransitions)).Methods(http.MethodPost) - router.HandleFunc("/api/v1/downtime_schedules", am.OpenAccess(aH.listDowntimeSchedules)).Methods(http.MethodGet) - router.HandleFunc("/api/v1/downtime_schedules/{id}", am.OpenAccess(aH.getDowntimeSchedule)).Methods(http.MethodGet) - router.HandleFunc("/api/v1/downtime_schedules", am.OpenAccess(aH.createDowntimeSchedule)).Methods(http.MethodPost) - router.HandleFunc("/api/v1/downtime_schedules/{id}", am.OpenAccess(aH.editDowntimeSchedule)).Methods(http.MethodPut) - router.HandleFunc("/api/v1/downtime_schedules/{id}", am.OpenAccess(aH.deleteDowntimeSchedule)).Methods(http.MethodDelete) + router.HandleFunc("/api/v1/downtime_schedules", am.ViewAccess(aH.listDowntimeSchedules)).Methods(http.MethodGet) + router.HandleFunc("/api/v1/downtime_schedules/{id}", am.ViewAccess(aH.getDowntimeSchedule)).Methods(http.MethodGet) + router.HandleFunc("/api/v1/downtime_schedules", am.EditAccess(aH.createDowntimeSchedule)).Methods(http.MethodPost) + router.HandleFunc("/api/v1/downtime_schedules/{id}", am.EditAccess(aH.editDowntimeSchedule)).Methods(http.MethodPut) + router.HandleFunc("/api/v1/downtime_schedules/{id}", am.EditAccess(aH.deleteDowntimeSchedule)).Methods(http.MethodDelete) router.HandleFunc("/api/v1/dashboards", am.ViewAccess(aH.getDashboards)).Methods(http.MethodGet) router.HandleFunc("/api/v1/dashboards", am.EditAccess(aH.createDashboards)).Methods(http.MethodPost) @@ -465,6 +518,7 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) { // === Authentication APIs === router.HandleFunc("/api/v1/invite", am.AdminAccess(aH.inviteUser)).Methods(http.MethodPost) + router.HandleFunc("/api/v1/invite/bulk", am.AdminAccess(aH.inviteUsers)).Methods(http.MethodPost) router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(aH.getInvite)).Methods(http.MethodGet) router.HandleFunc("/api/v1/invite/{email}", am.AdminAccess(aH.revokeInvite)).Methods(http.MethodDelete) router.HandleFunc("/api/v1/invite", am.AdminAccess(aH.listPendingInvites)).Methods(http.MethodGet) @@ -1955,6 +2009,32 @@ func (aH *APIHandler) inviteUser(w http.ResponseWriter, r *http.Request) { aH.WriteJSON(w, r, resp) } +func (aH *APIHandler) inviteUsers(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + req, err := parseInviteUsersRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) + return + } + + response, err := auth.InviteUsers(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + // Check the response status and set the appropriate HTTP status code + if response.Status == "failure" { + w.WriteHeader(http.StatusBadRequest) // 400 Bad Request for failure + } else if response.Status == "partial_success" { + w.WriteHeader(http.StatusPartialContent) // 206 Partial Content + } else { + w.WriteHeader(http.StatusOK) // 200 OK for success + } + + aH.WriteJSON(w, r, response) +} + // getInvite returns the invite object details for the given invite token. We do not need to // protect this API because invite token itself is meant to be private. func (aH *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) { @@ -2468,22 +2548,35 @@ func (aH *APIHandler) RegisterMessagingQueuesRoutes(router *mux.Router, am *Auth // SubRouter for kafka kafkaRouter := router.PathPrefix("/api/v1/messaging-queues/kafka").Subrouter() + onboardingRouter := kafkaRouter.PathPrefix("/onboarding").Subrouter() + onboardingRouter.HandleFunc("/producers", am.ViewAccess(aH.onboardProducers)).Methods(http.MethodPost) + onboardingRouter.HandleFunc("/consumers", am.ViewAccess(aH.onboardConsumers)).Methods(http.MethodPost) + onboardingRouter.HandleFunc("/kafka", am.ViewAccess(aH.onboardKafka)).Methods(http.MethodPost) + + partitionLatency := kafkaRouter.PathPrefix("/partition-latency").Subrouter() + partitionLatency.HandleFunc("/overview", am.ViewAccess(aH.getPartitionOverviewLatencyData)).Methods(http.MethodPost) + partitionLatency.HandleFunc("/consumer", am.ViewAccess(aH.getConsumerPartitionLatencyData)).Methods(http.MethodPost) + consumerLagRouter := kafkaRouter.PathPrefix("/consumer-lag").Subrouter() consumerLagRouter.HandleFunc("/producer-details", am.ViewAccess(aH.getProducerData)).Methods(http.MethodPost) consumerLagRouter.HandleFunc("/consumer-details", am.ViewAccess(aH.getConsumerData)).Methods(http.MethodPost) consumerLagRouter.HandleFunc("/network-latency", am.ViewAccess(aH.getNetworkData)).Methods(http.MethodPost) - onboardingRouter := kafkaRouter.PathPrefix("/onboarding").Subrouter() - onboardingRouter.HandleFunc("/producers", am.ViewAccess(aH.onboardProducers)).Methods(http.MethodPost) - onboardingRouter.HandleFunc("/consumers", am.ViewAccess(aH.onboardConsumers)).Methods(http.MethodPost) - onboardingRouter.HandleFunc("/kafka", am.ViewAccess(aH.onboardKafka)).Methods(http.MethodPost) + topicThroughput := kafkaRouter.PathPrefix("/topic-throughput").Subrouter() + topicThroughput.HandleFunc("/producer", am.ViewAccess(aH.getProducerThroughputOverview)).Methods(http.MethodPost) + topicThroughput.HandleFunc("/producer-details", am.ViewAccess(aH.getProducerThroughputDetails)).Methods(http.MethodPost) + topicThroughput.HandleFunc("/consumer", am.ViewAccess(aH.getConsumerThroughputOverview)).Methods(http.MethodPost) + topicThroughput.HandleFunc("/consumer-details", am.ViewAccess(aH.getConsumerThroughputDetails)).Methods(http.MethodPost) + + spanEvaluation := kafkaRouter.PathPrefix("/span").Subrouter() + spanEvaluation.HandleFunc("/evaluation", am.ViewAccess(aH.getProducerConsumerEval)).Methods(http.MethodPost) // for other messaging queues, add SubRouters here } // not using md5 hashing as the plain string would work -func uniqueIdentifier(clientID, serviceInstanceID, serviceName, separator string) string { - return clientID + separator + serviceInstanceID + separator + serviceName +func uniqueIdentifier(params []string, separator string) string { + return strings.Join(params, separator) } func (aH *APIHandler) onboardProducers( @@ -2826,7 +2919,7 @@ func (aH *APIHandler) getNetworkData( return } - queryRangeParams, err := mq.BuildQRParamsNetwork(messagingQueue, "throughput", attributeCache) + queryRangeParams, err := mq.BuildQRParamsWithCache(messagingQueue, "throughput", attributeCache) if err != nil { zap.L().Error(err.Error()) RespondError(w, apiErr, nil) @@ -2853,7 +2946,8 @@ func (aH *APIHandler) getNetworkData( clientID, clientIDOk := series.Labels["client_id"] serviceInstanceID, serviceInstanceIDOk := series.Labels["service_instance_id"] serviceName, serviceNameOk := series.Labels["service_name"] - hashKey := uniqueIdentifier(clientID, serviceInstanceID, serviceName, "#") + params := []string{clientID, serviceInstanceID, serviceName} + hashKey := uniqueIdentifier(params, "#") _, ok := attributeCache.Hash[hashKey] if clientIDOk && serviceInstanceIDOk && serviceNameOk && !ok { attributeCache.Hash[hashKey] = struct{}{} @@ -2864,7 +2958,7 @@ func (aH *APIHandler) getNetworkData( } } - queryRangeParams, err = mq.BuildQRParamsNetwork(messagingQueue, "fetch-latency", attributeCache) + queryRangeParams, err = mq.BuildQRParamsWithCache(messagingQueue, "fetch-latency", attributeCache) if err != nil { zap.L().Error(err.Error()) RespondError(w, apiErr, nil) @@ -2890,7 +2984,8 @@ func (aH *APIHandler) getNetworkData( clientID, clientIDOk := series.Labels["client_id"] serviceInstanceID, serviceInstanceIDOk := series.Labels["service_instance_id"] serviceName, serviceNameOk := series.Labels["service_name"] - hashKey := uniqueIdentifier(clientID, serviceInstanceID, serviceName, "#") + params := []string{clientID, serviceInstanceID, serviceName} + hashKey := uniqueIdentifier(params, "#") _, ok := attributeCache.Hash[hashKey] if clientIDOk && serviceInstanceIDOk && serviceNameOk && ok { latencySeries = append(latencySeries, series) @@ -2992,6 +3087,362 @@ func (aH *APIHandler) getConsumerData( aH.Respond(w, resp) } +// s1 +func (aH *APIHandler) getPartitionOverviewLatencyData( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "producer-topic-throughput") + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + result = postprocess.TransformToTableForClickHouseQueries(result) + + resp := v3.QueryRangeResponse{ + Result: result, + } + aH.Respond(w, resp) +} + +// s1 +func (aH *APIHandler) getConsumerPartitionLatencyData( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "consumer_partition_latency") + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + result = postprocess.TransformToTableForClickHouseQueries(result) + + resp := v3.QueryRangeResponse{ + Result: result, + } + aH.Respond(w, resp) +} + +// s3 p overview +// fetch traces +// cache attributes +// fetch byte rate metrics +func (aH *APIHandler) getProducerThroughputOverview( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + attributeCache := &mq.Clients{ + Hash: make(map[string]struct{}), + } + + queryRangeParams, err := mq.BuildQRParamsWithCache(messagingQueue, "producer-throughput-overview", attributeCache) + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + + for _, res := range result { + for _, series := range res.Series { + serviceName, serviceNameOk := series.Labels["service_name"] + topicName, topicNameOk := series.Labels["topic"] + params := []string{serviceName, topicName} + hashKey := uniqueIdentifier(params, "#") + _, ok := attributeCache.Hash[hashKey] + if topicNameOk && serviceNameOk && !ok { + attributeCache.Hash[hashKey] = struct{}{} + attributeCache.TopicName = append(attributeCache.TopicName, topicName) + attributeCache.ServiceName = append(attributeCache.ServiceName, serviceName) + } + } + } + + queryRangeParams, err = mq.BuildQRParamsWithCache(messagingQueue, "producer-throughput-overview-latency", attributeCache) + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQueriesByNameFetchLatency) + return + } + + latencyColumn := &v3.Result{QueryName: "latency"} + var latencySeries []*v3.Series + for _, res := range resultFetchLatency { + for _, series := range res.Series { + topic, topicOk := series.Labels["topic"] + serviceName, serviceNameOk := series.Labels["service_name"] + params := []string{topic, serviceName} + hashKey := uniqueIdentifier(params, "#") + _, ok := attributeCache.Hash[hashKey] + if topicOk && serviceNameOk && ok { + latencySeries = append(latencySeries, series) + } + } + } + + latencyColumn.Series = latencySeries + result = append(result, latencyColumn) + + resultFetchLatency = postprocess.TransformToTableForBuilderQueries(result, queryRangeParams) + + resp := v3.QueryRangeResponse{ + Result: resultFetchLatency, + } + aH.Respond(w, resp) +} + +// s3 p details +func (aH *APIHandler) getProducerThroughputDetails( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "producer-throughput-details") + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + result = postprocess.TransformToTableForClickHouseQueries(result) + + resp := v3.QueryRangeResponse{ + Result: result, + } + aH.Respond(w, resp) +} + +// s3 c overview +func (aH *APIHandler) getConsumerThroughputOverview( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "consumer-throughput-overview") + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + result = postprocess.TransformToTableForClickHouseQueries(result) + + resp := v3.QueryRangeResponse{ + Result: result, + } + aH.Respond(w, resp) +} + +// s3 c details +func (aH *APIHandler) getConsumerThroughputDetails( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "consumer-throughput-details") + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + result = postprocess.TransformToTableForClickHouseQueries(result) + + resp := v3.QueryRangeResponse{ + Result: result, + } + aH.Respond(w, resp) +} + +// s4 +// needs logic to parse duration +// needs logic to get the percentage +// show 10 traces +func (aH *APIHandler) getProducerConsumerEval( + w http.ResponseWriter, r *http.Request, +) { + messagingQueue, apiErr := ParseMessagingQueueBody(r) + + if apiErr != nil { + zap.L().Error(apiErr.Err.Error()) + RespondError(w, apiErr, nil) + return + } + + queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "producer-consumer-eval") + if err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + if err := validateQueryRangeParamsV3(queryRangeParams); err != nil { + zap.L().Error(err.Error()) + RespondError(w, apiErr, nil) + return + } + + var result []*v3.Result + var errQuriesByName map[string]error + + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErrObj, errQuriesByName) + return + } + + resp := v3.QueryRangeResponse{ + Result: result, + } + aH.Respond(w, resp) +} + // ParseMessagingQueueBody parse for messaging queue params func ParseMessagingQueueBody(r *http.Request) (*mq.MessagingQueue, *model.ApiError) { messagingQueue := new(mq.MessagingQueue) @@ -3783,7 +4234,7 @@ func (aH *APIHandler) autocompleteAggregateAttributes(w http.ResponseWriter, r * switch req.DataSource { case v3.DataSourceMetrics: - response, err = aH.reader.GetMetricAggregateAttributes(r.Context(), req) + response, err = aH.reader.GetMetricAggregateAttributes(r.Context(), req, true) case v3.DataSourceLogs: response, err = aH.reader.GetLogAggregateAttributes(r.Context(), req) case v3.DataSourceTraces: diff --git a/pkg/query-service/app/infra.go b/pkg/query-service/app/infra.go new file mode 100644 index 0000000000..73d10bdddb --- /dev/null +++ b/pkg/query-service/app/infra.go @@ -0,0 +1,336 @@ +package app + +import ( + "encoding/json" + "net/http" + + "go.signoz.io/signoz/pkg/query-service/model" +) + +func (aH *APIHandler) getHostAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + // get attribute keys + keys, err := aH.hostsRepo.GetHostAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + // write response + aH.Respond(w, keys) +} + +func (aH *APIHandler) getHostAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + // parse request + req, err := parseFilterAttributeValueRequest(r) + + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + // get attribute values + values, err := aH.hostsRepo.GetHostAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + // write response + aH.Respond(w, values) +} + +func (aH *APIHandler) getHostList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.HostListRequest{} + + // parse request + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + // get host list + hostList, err := aH.hostsRepo.GetHostList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + // write response + aH.Respond(w, hostList) +} + +func (aH *APIHandler) getProcessAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.processesRepo.GetProcessAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getProcessAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.processesRepo.GetProcessAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getProcessList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.ProcessListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + hostList, err := aH.processesRepo.GetProcessList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, hostList) +} + +func (aH *APIHandler) getPodAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.podsRepo.GetPodAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getPodAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.podsRepo.GetPodAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getPodList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.PodListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + podList, err := aH.podsRepo.GetPodList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, podList) +} + +func (aH *APIHandler) getNodeAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.nodesRepo.GetNodeAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getNodeAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.nodesRepo.GetNodeAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getNodeList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.NodeListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + nodeList, err := aH.nodesRepo.GetNodeList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, nodeList) +} + +func (aH *APIHandler) getNamespaceAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.namespacesRepo.GetNamespaceAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getNamespaceAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.namespacesRepo.GetNamespaceAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getNamespaceList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.NamespaceListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + namespaceList, err := aH.namespacesRepo.GetNamespaceList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, namespaceList) +} + +func (aH *APIHandler) getClusterAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.clustersRepo.GetClusterAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getClusterAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.clustersRepo.GetClusterAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.ClusterListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + clusterList, err := aH.clustersRepo.GetClusterList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, clusterList) +} diff --git a/pkg/query-service/app/inframetrics/clusters.go b/pkg/query-service/app/inframetrics/clusters.go new file mode 100644 index 0000000000..70ef5ca95a --- /dev/null +++ b/pkg/query-service/app/inframetrics/clusters.go @@ -0,0 +1,342 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForClusters = "k8s_node_cpu_utilization" + + clusterAttrsToEnrich = []string{"k8s_cluster_name"} + + k8sClusterUIDAttrKey = "k8s_cluster_uid" + + queryNamesForClusters = map[string][]string{ + "cpu": {"A"}, + "cpu_allocatable": {"B"}, + "memory": {"C"}, + "memory_allocatable": {"D"}, + } + clusterQueryNames = []string{"A", "B", "C", "D"} +) + +type ClustersRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewClustersRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *ClustersRepo { + return &ClustersRepo{reader: reader, querierV2: querierV2} +} + +func (n *ClustersRepo) GetClusterAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForClusters + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := n.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + return attributeKeysResponse, nil +} + +func (n *ClustersRepo) GetClusterAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForClusters + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := n.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (p *ClustersRepo) getMetadataAttributes(ctx context.Context, req model.ClusterListRequest) (map[string]map[string]string, error) { + clusterAttrs := map[string]map[string]string{} + + for _, key := range clusterAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForClusters, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + clusterUID := stringData[k8sClusterUIDAttrKey] + if _, ok := clusterAttrs[clusterUID]; !ok { + clusterAttrs[clusterUID] = map[string]string{} + } + + for _, key := range req.GroupBy { + clusterAttrs[clusterUID][key.Key] = stringData[key.Key] + } + } + + return clusterAttrs, nil +} + +func (p *ClustersRepo) getTopClusterGroups(ctx context.Context, req model.ClusterListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopClusters(req) + + queryNames := queryNamesForClusters[req.OrderBy.ColumnName] + topClusterGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topClusterGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topClusterGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topClusterGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + max := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopClusterGroupsSeries := formattedResponse[0].Series[req.Offset:int(max)] + + topClusterGroups := []map[string]string{} + for _, series := range paginatedTopClusterGroupsSeries { + topClusterGroups = append(topClusterGroups, series.Labels) + } + allClusterGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allClusterGroups = append(allClusterGroups, series.Labels) + } + + return topClusterGroups, allClusterGroups, nil +} + +func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterListRequest) (model.ClusterListResponse, error) { + resp := model.ClusterListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sClusterUIDAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := NodesTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + } + + clusterAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topClusterGroups, allClusterGroups, err := p.getTopClusterGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topClusterGroup := range topClusterGroups { + for k, v := range topClusterGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.ClusterListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.ClusterListRecord{ + CPUUsage: -1, + CPUAllocatable: -1, + MemoryUsage: -1, + MemoryAllocatable: -1, + } + + if clusterUID, ok := row.Data[k8sClusterUIDAttrKey].(string); ok { + record.ClusterUID = clusterUID + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + + if cpuAllocatable, ok := row.Data["B"].(float64); ok { + record.CPUAllocatable = cpuAllocatable + } + + if mem, ok := row.Data["C"].(float64); ok { + record.MemoryUsage = mem + } + + if memoryAllocatable, ok := row.Data["D"].(float64); ok { + record.MemoryAllocatable = memoryAllocatable + } + + record.Meta = map[string]string{} + if _, ok := clusterAttrs[record.ClusterUID]; ok { + record.Meta = clusterAttrs[record.ClusterUID] + } + + for k, v := range row.Data { + if slices.Contains(clusterQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allClusterGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/common.go b/pkg/query-service/app/inframetrics/common.go new file mode 100644 index 0000000000..6f83a6d46c --- /dev/null +++ b/pkg/query-service/app/inframetrics/common.go @@ -0,0 +1,81 @@ +package inframetrics + +import ( + "strings" + "time" + + "go.signoz.io/signoz/pkg/query-service/constants" + "go.signoz.io/signoz/pkg/query-service/model" +) + +// getParamsForTopItems returns the step, time series table name and samples table name +// for the top items query. what are we doing here? +// we want to identify the top hosts/pods/nodes quickly, so we use pre-aggregated data +// for samples and time series tables to speed up the query +// the speed of the query depends on the number of values in group by clause, the higher +// the step interval, the faster the query will be as number of rows to group by is reduced +// here we are using the averaged value of the time series data to get the top items +func getParamsForTopItems(start, end int64) (int64, string, string) { + var step int64 + var timeSeriesTableName string + var samplesTableName string + + if end-start < time.Hour.Milliseconds() { + // 5 minute aggregation for any query less than 1 hour + step = 5 * 60 + timeSeriesTableName = constants.SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME + samplesTableName = constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME + } else if end-start < time.Hour.Milliseconds()*6 { + // 15 minute aggregation for any query less than 6 hours + step = 15 * 60 + timeSeriesTableName = constants.SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME + samplesTableName = constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME + } else if end-start < time.Hour.Milliseconds()*24 { + // 1 hour aggregation for any query less than 1 day + step = 60 * 60 + timeSeriesTableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME + samplesTableName = constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME + } else if end-start < time.Hour.Milliseconds()*7 { + // 6 hours aggregation for any query less than 1 week + step = 6 * 60 * 60 + timeSeriesTableName = constants.SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME + samplesTableName = constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME + } else { + // 12 hours aggregation for any query greater than 1 week + step = 12 * 60 * 60 + timeSeriesTableName = constants.SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME + samplesTableName = constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME + } + return step, timeSeriesTableName, samplesTableName +} + +func getParamsForTopHosts(req model.HostListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopPods(req model.PodListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopNodes(req model.NodeListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopNamespaces(req model.NamespaceListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopClusters(req model.ClusterListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +// TODO(srikanthccv): remove this +// What is happening here? +// The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint +// goes to same shard. +// However, in this case, we are interested in the attributes values across all the shards. +// So, we replace the local time series table with the distributed time series table. +// See `PrepareTimeseriesFilterQuery` for more details. +func localQueryToDistributedQuery(query string) string { + return strings.Replace(query, ".time_series_v4", ".distributed_time_series_v4", 1) +} diff --git a/pkg/query-service/app/inframetrics/hosts.go b/pkg/query-service/app/inframetrics/hosts.go new file mode 100644 index 0000000000..b41cf1ecc1 --- /dev/null +++ b/pkg/query-service/app/inframetrics/hosts.go @@ -0,0 +1,485 @@ +package inframetrics + +import ( + "context" + "sort" + "strings" + "time" + + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +type HostsRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +var ( + // we don't have a way to get the resource attributes from the current time series table + // but we only want to suggest resource attributes for system metrics, + // this is a list of attributes that we skip from all labels as they are data point attributes + // TODO(srikanthccv): remove this once we have a way to get resource attributes + + pointAttrsToIgnore = []string{ + "state", + "cpu", + "device", + "direction", + "mode", + "mountpoint", + "type", + "os_type", + "process_cgroup", + "process_command", + "process_command_line", + "process_executable_name", + "process_executable_path", + "process_owner", + "process_parent_pid", + "process_pid", + } + + queryNamesForTopHosts = map[string][]string{ + "cpu": {"A", "B", "F1"}, + "memory": {"C", "D", "F2"}, + "wait": {"E", "F", "F3"}, + "load15": {"G"}, + } + + // TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric + metricToUseForHostAttributes = "system_cpu_load_average_15m" + hostNameAttrKey = "host_name" + // TODO(srikanthccv): remove k8s hacky logic from hosts repo after charts users are migrated + k8sNodeNameAttrKey = "k8s_node_name" + agentNameToIgnore = "k8s-infra-otel-agent" +) + +func NewHostsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *HostsRepo { + return &HostsRepo{reader: reader, querierV2: querierV2} +} + +func (h *HostsRepo) GetHostAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForHostAttributes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := h.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (h *HostsRepo) GetHostAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForHostAttributes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := h.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + if req.FilterAttributeKey != hostNameAttrKey { + return attributeValuesResponse, nil + } + hostNames := []string{} + + for _, attributeValue := range attributeValuesResponse.StringAttributeValues { + if strings.Contains(attributeValue, agentNameToIgnore) { + continue + } + hostNames = append(hostNames, attributeValue) + } + + req.FilterAttributeKey = k8sNodeNameAttrKey + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForHostAttributes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err = h.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + for _, attributeValue := range attributeValuesResponse.StringAttributeValues { + if strings.Contains(attributeValue, agentNameToIgnore) { + continue + } + hostNames = append(hostNames, attributeValue) + } + + return &v3.FilterAttributeValueResponse{StringAttributeValues: hostNames}, nil +} + +func (h *HostsRepo) getActiveHosts(ctx context.Context, + req model.HostListRequest, hostNameAttrKey string) (map[string]bool, error) { + activeStatus := map[string]bool{} + step := common.MinAllowedStepInterval(req.Start, req.End) + + hasHostName := false + for _, key := range req.GroupBy { + if key.Key == hostNameAttrKey { + hasHostName = true + } + } + + if !hasHostName { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: hostNameAttrKey}) + } + + params := v3.QueryRangeParamsV3{ + Start: time.Now().Add(-time.Minute * 10).UTC().UnixMilli(), + End: time.Now().UTC().UnixMilli(), + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: step, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForHostAttributes, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: req.Filters, + GroupBy: req.GroupBy, + Expression: "A", + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationAvg, + Disabled: false, + }, + }, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeGraph, + }, + } + + queryResponse, _, err := h.querierV2.QueryRange(ctx, ¶ms) + if err != nil { + return nil, err + } + + for _, result := range queryResponse { + for _, series := range result.Series { + name := series.Labels[hostNameAttrKey] + activeStatus[name] = true + } + } + + return activeStatus, nil +} + +// getTopHosts returns the top hosts for the given order by column name +func (h *HostsRepo) getTopHosts(ctx context.Context, req model.HostListRequest, q *v3.QueryRangeParamsV3, hostNameAttrKey string) ([]string, []string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopHosts(req) + + queryNames := queryNamesForTopHosts[req.OrderBy.ColumnName] + topHostsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topHostsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := h.querierV2.QueryRange(ctx, topHostsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topHostsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + paginatedTopHostsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit] + + topHosts := []string{} + for _, series := range paginatedTopHostsSeries { + topHosts = append(topHosts, series.Labels[hostNameAttrKey]) + } + allHosts := []string{} + for _, series := range formattedResponse[0].Series { + allHosts = append(allHosts, series.Labels[hostNameAttrKey]) + } + + return topHosts, allHosts, nil +} + +func (h *HostsRepo) getHostsForQuery(ctx context.Context, + req model.HostListRequest, q *v3.QueryRangeParamsV3, hostNameAttrKey string) ([]model.HostListRecord, []string, error) { + + step := common.MinAllowedStepInterval(req.Start, req.End) + + query := q.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + topHosts, allHosts, err := h.getTopHosts(ctx, req, q, hostNameAttrKey) + if err != nil { + return nil, nil, err + } + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + // check if the filter has host_name and is either IN or EQUAL operator + // if so, we don't need to add the topHosts filter again + hasHostNameInOrEqual := false + + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, item := range req.Filters.Items { + if item.Key.Key == hostNameAttrKey && (item.Operator == v3.FilterOperatorIn || item.Operator == v3.FilterOperatorEqual) { + hasHostNameInOrEqual = true + } + } + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + // what is happening here? + // if the filter has host_name and we are querying for k8s host metrics, + // we need to replace the host_name with k8s_node_name + if hostNameAttrKey == k8sNodeNameAttrKey { + for idx, item := range query.Filters.Items { + if item.Key.Key == hostNameAttrKey { + query.Filters.Items[idx].Key.Key = k8sNodeNameAttrKey + } + } + } + } + if !hasHostNameInOrEqual { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{ + Key: hostNameAttrKey, + }, + Value: topHosts, + Operator: v3.FilterOperatorIn, + }) + } + } + + activeHosts, err := h.getActiveHosts(ctx, req, hostNameAttrKey) + if err != nil { + return nil, nil, err + } + + queryResponse, _, err := h.querierV2.QueryRange(ctx, query) + if err != nil { + return nil, nil, err + } + + type hostTSInfo struct { + cpuTimeSeries *v3.Series + memoryTimeSeries *v3.Series + waitTimeSeries *v3.Series + load15TimeSeries *v3.Series + } + hostTSInfoMap := map[string]*hostTSInfo{} + + for _, result := range queryResponse { + for _, series := range result.Series { + hostName := series.Labels[hostNameAttrKey] + if _, ok := hostTSInfoMap[hostName]; !ok { + hostTSInfoMap[hostName] = &hostTSInfo{} + } + if result.QueryName == "G" { + loadSeries := *series + hostTSInfoMap[hostName].load15TimeSeries = &loadSeries + } + } + } + + query.FormatForWeb = false + query.CompositeQuery.PanelType = v3.PanelTypeGraph + + formulaResult, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return nil, nil, err + } + + for _, result := range formulaResult { + for _, series := range result.Series { + hostName := series.Labels[hostNameAttrKey] + if _, ok := hostTSInfoMap[hostName]; !ok { + hostTSInfoMap[hostName] = &hostTSInfo{} + } + if result.QueryName == "F1" { + hostTSInfoMap[hostName].cpuTimeSeries = series + } else if result.QueryName == "F2" { + hostTSInfoMap[hostName].memoryTimeSeries = series + } else if result.QueryName == "F3" { + hostTSInfoMap[hostName].waitTimeSeries = series + } + } + } + + query.FormatForWeb = true + query.CompositeQuery.PanelType = v3.PanelTypeTable + formattedResponse, _ := postprocess.PostProcessResult(queryResponse, query) + + records := []model.HostListRecord{} + + // there should be only one result in the response + hostsInfo := formattedResponse[0] + // each row represents a host + for _, row := range hostsInfo.Table.Rows { + record := model.HostListRecord{ + CPU: -1, + Memory: -1, + Wait: -1, + Load15: -1, + } + + hostName, ok := row.Data[hostNameAttrKey].(string) + if ok { + record.HostName = hostName + } + + osType, ok := row.Data["os_type"].(string) + if ok { + record.OS = osType + } + + cpu, ok := row.Data["F1"].(float64) + if ok { + record.CPU = cpu + } + memory, ok := row.Data["F2"].(float64) + if ok { + record.Memory = memory + } + wait, ok := row.Data["F3"].(float64) + if ok { + record.Wait = wait + } + load15, ok := row.Data["G"].(float64) + if ok { + record.Load15 = load15 + } + record.Active = activeHosts[record.HostName] + if hostTSInfoMap[record.HostName] != nil { + record.CPUTimeSeries = hostTSInfoMap[record.HostName].cpuTimeSeries + record.MemoryTimeSeries = hostTSInfoMap[record.HostName].memoryTimeSeries + record.WaitTimeSeries = hostTSInfoMap[record.HostName].waitTimeSeries + record.Load15TimeSeries = hostTSInfoMap[record.HostName].load15TimeSeries + } + records = append(records, record) + } + + return records, allHosts, nil +} + +func dedupRecords(records []model.HostListRecord) []model.HostListRecord { + seen := map[string]bool{} + deduped := []model.HostListRecord{} + for _, record := range records { + if !seen[record.HostName] { + seen[record.HostName] = true + deduped = append(deduped, record) + } + } + return deduped +} + +func (h *HostsRepo) GetHostList(ctx context.Context, req model.HostListRequest) (model.HostListResponse, error) { + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + resp := model.HostListResponse{ + Type: "list", + } + + vmRecords, vmAllHosts, err := h.getHostsForQuery(ctx, req, &NonK8STableListQuery, hostNameAttrKey) + if err != nil { + return resp, err + } + k8sRecords, k8sAllHosts, err := h.getHostsForQuery(ctx, req, &K8STableListQuery, k8sNodeNameAttrKey) + if err != nil { + return resp, err + } + + uniqueHosts := map[string]bool{} + for _, host := range vmAllHosts { + uniqueHosts[host] = true + } + for _, host := range k8sAllHosts { + uniqueHosts[host] = true + } + + records := append(vmRecords, k8sRecords...) + + // since we added the fix for incorrect host name, it is possible that both host_name and k8s_node_name + // are present in the response. we need to dedup the results. + records = dedupRecords(records) + + resp.Total = len(uniqueHosts) + + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/namespaces.go b/pkg/query-service/app/inframetrics/namespaces.go new file mode 100644 index 0000000000..3e336672c5 --- /dev/null +++ b/pkg/query-service/app/inframetrics/namespaces.go @@ -0,0 +1,329 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForNamespaces = "k8s_pod_cpu_utilization" + + namespaceAttrsToEnrich = []string{ + "k8s_namespace_name", + "k8s_cluster_name", + } + + queryNamesForNamespaces = map[string][]string{ + "cpu": {"A"}, + "memory": {"D"}, + } + namespaceQueryNames = []string{"A", "D"} + + attributesKeysForNamespaces = []v3.AttributeKey{ + {Key: "k8s_namespace_name"}, + {Key: "k8s_cluster_name"}, + } + + k8sNamespaceNameAttrKey = "k8s_namespace_name" +) + +type NamespacesRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewNamespacesRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *NamespacesRepo { + return &NamespacesRepo{reader: reader, querierV2: querierV2} +} + +func (p *NamespacesRepo) GetNamespaceAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + return &v3.FilterAttributeKeyResponse{AttributeKeys: attributesKeysForNamespaces}, nil +} + +func (p *NamespacesRepo) GetNamespaceAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForNamespaces + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := p.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + return attributeValuesResponse, nil +} + +func (p *NamespacesRepo) getMetadataAttributes(ctx context.Context, req model.NamespaceListRequest) (map[string]map[string]string, error) { + namespaceAttrs := map[string]map[string]string{} + + for _, key := range namespaceAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForNamespaces, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + namespaceName := stringData[k8sNamespaceNameAttrKey] + if _, ok := namespaceAttrs[namespaceName]; !ok { + namespaceAttrs[namespaceName] = map[string]string{} + } + + for _, key := range req.GroupBy { + namespaceAttrs[namespaceName][key.Key] = stringData[key.Key] + } + } + + return namespaceAttrs, nil +} + +func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, req model.NamespaceListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopNamespaces(req) + + queryNames := queryNamesForNamespaces[req.OrderBy.ColumnName] + topNamespaceGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topNamespaceGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topNamespaceGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topNamespaceGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + paginatedTopNamespaceGroupsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit] + + topNamespaceGroups := []map[string]string{} + for _, series := range paginatedTopNamespaceGroupsSeries { + topNamespaceGroups = append(topNamespaceGroups, series.Labels) + } + allNamespaceGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allNamespaceGroups = append(allNamespaceGroups, series.Labels) + } + + return topNamespaceGroups, allNamespaceGroups, nil +} + +func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.NamespaceListRequest) (model.NamespaceListResponse, error) { + resp := model.NamespaceListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sNamespaceNameAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := PodsTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, q := range query.CompositeQuery.BuilderQueries { + + if !slices.Contains(namespaceQueryNames, q.QueryName) { + delete(query.CompositeQuery.BuilderQueries, q.QueryName) + } + + q.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if q.Filters == nil { + q.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + q.Filters.Items = append(q.Filters.Items, req.Filters.Items...) + } + q.GroupBy = req.GroupBy + } + + namespaceAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topNamespaceGroups, allNamespaceGroups, err := p.getTopNamespaceGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topNamespaceGroup := range topNamespaceGroups { + for k, v := range topNamespaceGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.NamespaceListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.NamespaceListRecord{ + CPUUsage: -1, + MemoryUsage: -1, + } + + if name, ok := row.Data[k8sNamespaceNameAttrKey].(string); ok { + record.NamespaceName = name + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + + if memory, ok := row.Data["D"].(float64); ok { + record.MemoryUsage = memory + } + + record.Meta = map[string]string{} + if _, ok := namespaceAttrs[record.NamespaceName]; ok { + record.Meta = namespaceAttrs[record.NamespaceName] + } + + for k, v := range row.Data { + if slices.Contains(namespaceQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allNamespaceGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/nodes.go b/pkg/query-service/app/inframetrics/nodes.go new file mode 100644 index 0000000000..796d6acb73 --- /dev/null +++ b/pkg/query-service/app/inframetrics/nodes.go @@ -0,0 +1,349 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForNodes = "k8s_node_cpu_utilization" + + nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid"} + + k8sNodeUIDAttrKey = "k8s_node_uid" + + queryNamesForNodes = map[string][]string{ + "cpu": {"A"}, + "cpu_allocatable": {"B"}, + "memory": {"C"}, + "memory_allocatable": {"D"}, + } + nodeQueryNames = []string{"A", "B", "C", "D"} + + metricNamesForNodes = map[string]string{ + "cpu": "k8s_node_cpu_utilization", + "cpu_allocatable": "k8s_node_allocatable_cpu", + "memory": "k8s_node_memory_usage", + "memory_allocatable": "k8s_node_allocatable_memory", + } +) + +type NodesRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewNodesRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *NodesRepo { + return &NodesRepo{reader: reader, querierV2: querierV2} +} + +func (n *NodesRepo) GetNodeAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForNodes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := n.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + return attributeKeysResponse, nil +} + +func (n *NodesRepo) GetNodeAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForNodes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := n.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (p *NodesRepo) getMetadataAttributes(ctx context.Context, req model.NodeListRequest) (map[string]map[string]string, error) { + nodeAttrs := map[string]map[string]string{} + + for _, key := range nodeAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForNodes, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + nodeUID := stringData[k8sNodeUIDAttrKey] + if _, ok := nodeAttrs[nodeUID]; !ok { + nodeAttrs[nodeUID] = map[string]string{} + } + + for _, key := range req.GroupBy { + nodeAttrs[nodeUID][key.Key] = stringData[key.Key] + } + } + + return nodeAttrs, nil +} + +func (p *NodesRepo) getTopNodeGroups(ctx context.Context, req model.NodeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopNodes(req) + + queryNames := queryNamesForNodes[req.OrderBy.ColumnName] + topNodeGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topNodeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topNodeGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topNodeGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + max := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopNodeGroupsSeries := formattedResponse[0].Series[req.Offset:int(max)] + + topNodeGroups := []map[string]string{} + for _, series := range paginatedTopNodeGroupsSeries { + topNodeGroups = append(topNodeGroups, series.Labels) + } + allNodeGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allNodeGroups = append(allNodeGroups, series.Labels) + } + + return topNodeGroups, allNodeGroups, nil +} + +func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest) (model.NodeListResponse, error) { + resp := model.NodeListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sNodeUIDAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := NodesTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + } + + nodeAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topNodeGroups, allNodeGroups, err := p.getTopNodeGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topNodeGroup := range topNodeGroups { + for k, v := range topNodeGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.NodeListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.NodeListRecord{ + NodeCPUUsage: -1, + NodeCPUAllocatable: -1, + NodeMemoryUsage: -1, + NodeMemoryAllocatable: -1, + } + + if nodeUID, ok := row.Data[k8sNodeUIDAttrKey].(string); ok { + record.NodeUID = nodeUID + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.NodeCPUUsage = cpu + } + + if cpuAllocatable, ok := row.Data["B"].(float64); ok { + record.NodeCPUAllocatable = cpuAllocatable + } + + if mem, ok := row.Data["C"].(float64); ok { + record.NodeMemoryUsage = mem + } + + if memory, ok := row.Data["D"].(float64); ok { + record.NodeMemoryAllocatable = memory + } + + record.Meta = map[string]string{} + if _, ok := nodeAttrs[record.NodeUID]; ok { + record.Meta = nodeAttrs[record.NodeUID] + } + + for k, v := range row.Data { + if slices.Contains(nodeQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allNodeGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/nodes_query.go b/pkg/query-service/app/inframetrics/nodes_query.go new file mode 100644 index 0000000000..36503e4bd9 --- /dev/null +++ b/pkg/query-service/app/inframetrics/nodes_query.go @@ -0,0 +1,118 @@ +package inframetrics + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +var NodesTableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + // node cpu utilization + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["cpu"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // node cpu allocatable + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["cpu_allocatable"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "B", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // node memory utilization + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["memory"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // node memory allocatable + "D": { + QueryName: "D", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["memory_allocatable"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "D", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} diff --git a/pkg/query-service/app/inframetrics/pods.go b/pkg/query-service/app/inframetrics/pods.go new file mode 100644 index 0000000000..2bf101f746 --- /dev/null +++ b/pkg/query-service/app/inframetrics/pods.go @@ -0,0 +1,387 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForPods = "k8s_pod_cpu_utilization" + + podAttrsToEnrich = []string{ + "k8s_pod_uid", + "k8s_pod_name", + "k8s_namespace_name", + "k8s_node_name", + "k8s_deployment_name", + "k8s_statefulset_name", + "k8s_daemonset_name", + "k8s_job_name", + "k8s_cronjob_name", + } + + k8sPodUIDAttrKey = "k8s_pod_uid" + + queryNamesForPods = map[string][]string{ + "cpu": {"A"}, + "cpu_request": {"B", "A"}, + "cpu_limit": {"C", "A"}, + "memory": {"D"}, + "memory_request": {"E", "D"}, + "memory_limit": {"F", "D"}, + "restarts": {"G", "A"}, + } + podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G"} + + metricNamesForPods = map[string]string{ + "cpu": "k8s_pod_cpu_utilization", + "cpu_request": "k8s_pod_cpu_request_utilization", + "cpu_limit": "k8s_pod_cpu_limit_utilization", + "memory": "k8s_pod_memory_usage", + "memory_request": "k8s_pod_memory_request_utilization", + "memory_limit": "k8s_pod_memory_limit_utilization", + "restarts": "k8s_container_restarts", + } +) + +type PodsRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewPodsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *PodsRepo { + return &PodsRepo{reader: reader, querierV2: querierV2} +} + +func (p *PodsRepo) GetPodAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForPods + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := p.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (p *PodsRepo) GetPodAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForPods + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := p.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + return attributeValuesResponse, nil +} + +func (p *PodsRepo) getMetadataAttributes(ctx context.Context, req model.PodListRequest) (map[string]map[string]string, error) { + podAttrs := map[string]map[string]string{} + + for _, key := range podAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForPods, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + podName := stringData[k8sPodUIDAttrKey] + if _, ok := podAttrs[podName]; !ok { + podAttrs[podName] = map[string]string{} + } + + for _, key := range req.GroupBy { + podAttrs[podName][key.Key] = stringData[key.Key] + } + } + + return podAttrs, nil +} + +func (p *PodsRepo) getTopPodGroups(ctx context.Context, req model.PodListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopPods(req) + + queryNames := queryNamesForPods[req.OrderBy.ColumnName] + topPodGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topPodGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topPodGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topPodGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + paginatedTopPodGroupsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit] + + topPodGroups := []map[string]string{} + for _, series := range paginatedTopPodGroupsSeries { + topPodGroups = append(topPodGroups, series.Labels) + } + allPodGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allPodGroups = append(allPodGroups, series.Labels) + } + + return topPodGroups, allPodGroups, nil +} + +func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (model.PodListResponse, error) { + resp := model.PodListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sPodUIDAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := PodsTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + } + + podAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topPodGroups, allPodGroups, err := p.getTopPodGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topPodGroup := range topPodGroups { + for k, v := range topPodGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.PodListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.PodListRecord{ + PodCPU: -1, + PodCPURequest: -1, + PodCPULimit: -1, + PodMemory: -1, + PodMemoryRequest: -1, + PodMemoryLimit: -1, + RestartCount: -1, + } + + if podUID, ok := row.Data[k8sPodUIDAttrKey].(string); ok { + record.PodUID = podUID + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.PodCPU = cpu + } + if cpuRequest, ok := row.Data["B"].(float64); ok { + record.PodCPURequest = cpuRequest + } + + if cpuLimit, ok := row.Data["C"].(float64); ok { + record.PodCPULimit = cpuLimit + } + + if memory, ok := row.Data["D"].(float64); ok { + record.PodMemory = memory + } + + if memoryRequest, ok := row.Data["E"].(float64); ok { + record.PodMemoryRequest = memoryRequest + } + + if memoryLimit, ok := row.Data["F"].(float64); ok { + record.PodMemoryLimit = memoryLimit + } + + if restarts, ok := row.Data["G"].(float64); ok { + record.RestartCount = int(restarts) + } + + record.Meta = map[string]string{} + if _, ok := podAttrs[record.PodUID]; ok { + record.Meta = podAttrs[record.PodUID] + } + + for k, v := range row.Data { + if slices.Contains(podQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allPodGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/pods_query.go b/pkg/query-service/app/inframetrics/pods_query.go new file mode 100644 index 0000000000..108f8b1476 --- /dev/null +++ b/pkg/query-service/app/inframetrics/pods_query.go @@ -0,0 +1,196 @@ +package inframetrics + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +var PodsTableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + // pod cpu utilization + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["cpu"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod cpu request utilization + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["cpu_request"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "B", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod cpu limit utilization + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["cpu_limit"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod memory utilization + "D": { + QueryName: "D", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["memory"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "D", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod memory request utilization + "E": { + QueryName: "E", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["memory_request"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "E", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod memory limit utilization + "F": { + QueryName: "F", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["memory_limit"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "F", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + "G": { + QueryName: "G", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForPods["restarts"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sPodUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "G", + ReduceTo: v3.ReduceToOperatorSum, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationMax, + Functions: []v3.Function{{Name: v3.FunctionNameRunningDiff}}, + Disabled: false, + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} diff --git a/pkg/query-service/app/inframetrics/processes.go b/pkg/query-service/app/inframetrics/processes.go new file mode 100644 index 0000000000..5ca3c74c21 --- /dev/null +++ b/pkg/query-service/app/inframetrics/processes.go @@ -0,0 +1,334 @@ +package inframetrics + +import ( + "context" + "fmt" + "math" + "strings" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +type ProcessesRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewProcessesRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *ProcessesRepo { + return &ProcessesRepo{reader: reader, querierV2: querierV2} +} + +func (p *ProcessesRepo) GetProcessAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = "process_memory_usage" + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := p.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (p *ProcessesRepo) GetProcessAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = "process_memory_usage" + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := p.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + return attributeValuesResponse, nil +} + +func getGroupKeyForProcesses(record model.ProcessListRecord, groupBy []v3.AttributeKey) string { + groupKey := "" + for _, key := range groupBy { + groupKey += fmt.Sprintf("%s=%s,", key.Key, record.Meta[key.Key]) + } + return groupKey +} + +func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context, + req model.ProcessListRequest) (map[string]map[string]string, error) { + processAttrs := map[string]map[string]string{} + + keysToAdd := []string{"process_pid", "process_executable_name", "process_command", "process_command_line"} + for _, key := range keysToAdd { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + AggregateAttribute: v3.AttributeKey{ + Key: "process_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): remove this + // What is happening here? + // The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint + // goes to same shard. + // However, in this case, we are interested in the attributes values across all the shards. + // So, we replace the local time series table with the distributed time series table. + // See `PrepareTimeseriesFilterQuery` for more details. + query = strings.Replace(query, ".time_series_v4", ".distributed_time_series_v4", 1) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + pid := stringData["process_pid"] + if _, ok := processAttrs[pid]; !ok { + processAttrs[pid] = map[string]string{} + } + + for _, key := range req.GroupBy { + processAttrs[pid][key.Key] = stringData[key.Key] + } + } + + return processAttrs, nil +} + +func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessListRequest) (model.ProcessListResponse, error) { + if req.Limit == 0 { + req.Limit = 10 + } + + resp := model.ProcessListResponse{ + Type: "list", + } + + step := common.MinAllowedStepInterval(req.Start, req.End) + + query := ProcessesTableListQuery.Clone() + if req.OrderBy != nil { + for _, q := range query.CompositeQuery.BuilderQueries { + q.OrderBy = []v3.OrderBy{*req.OrderBy} + } + } + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + } + + processAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + type processTSInfo struct { + CpuTimeSeries *v3.Series `json:"cpu_time_series"` + MemoryTimeSeries *v3.Series `json:"memory_time_series"` + } + processTSInfoMap := map[string]*processTSInfo{} + + for _, result := range queryResponse { + for _, series := range result.Series { + pid := series.Labels["process_pid"] + if _, ok := processTSInfoMap[pid]; !ok { + processTSInfoMap[pid] = &processTSInfo{} + } + } + } + + query.FormatForWeb = false + query.CompositeQuery.PanelType = v3.PanelTypeGraph + + formulaResult, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + for _, result := range formulaResult { + for _, series := range result.Series { + pid := series.Labels["process_pid"] + if _, ok := processTSInfoMap[pid]; !ok { + processTSInfoMap[pid] = &processTSInfo{} + } + loadSeries := *series + if result.QueryName == "F1" { + processTSInfoMap[pid].CpuTimeSeries = &loadSeries + } else if result.QueryName == "C" { + processTSInfoMap[pid].MemoryTimeSeries = &loadSeries + } + } + } + + query.FormatForWeb = true + query.CompositeQuery.PanelType = v3.PanelTypeTable + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + if len(formattedResponse) == 0 { + return resp, nil + } + + records := []model.ProcessListRecord{} + + // there should be only one result in the response + processInfo := formattedResponse[0] + + for _, row := range processInfo.Table.Rows { + record := model.ProcessListRecord{ + ProcessCPU: -1, + ProcessMemory: -1, + } + + pid, ok := row.Data["process_pid"].(string) + if ok { + record.ProcessID = pid + } + + processCPU, ok := row.Data["F1"].(float64) + if ok { + record.ProcessCPU = processCPU + } + + processMemory, ok := row.Data["C"].(float64) + if ok { + record.ProcessMemory = processMemory + } + record.Meta = processAttrs[record.ProcessID] + if processTSInfoMap[record.ProcessID] != nil { + record.ProcessCPUTimeSeries = processTSInfoMap[record.ProcessID].CpuTimeSeries + record.ProcessMemoryTimeSeries = processTSInfoMap[record.ProcessID].MemoryTimeSeries + } + record.ProcessName = record.Meta["process_executable_name"] + record.ProcessCMD = record.Meta["process_command"] + record.ProcessCMDLine = record.Meta["process_command_line"] + records = append(records, record) + } + + resp.Total = len(records) + + if req.Offset > 0 { + records = records[req.Offset:] + } + if req.Limit > 0 && len(records) > req.Limit { + records = records[:req.Limit] + } + resp.Records = records + + if len(req.GroupBy) > 0 { + groups := []model.ProcessListGroup{} + + groupMap := make(map[string][]model.ProcessListRecord) + for _, record := range records { + groupKey := getGroupKeyForProcesses(record, req.GroupBy) + if _, ok := groupMap[groupKey]; !ok { + groupMap[groupKey] = []model.ProcessListRecord{record} + } else { + groupMap[groupKey] = append(groupMap[groupKey], record) + } + } + + for _, records := range groupMap { + var avgCPU, avgMemory float64 + var validCPU, validMemory int + for _, record := range records { + if !math.IsNaN(record.ProcessCPU) { + avgCPU += record.ProcessCPU + validCPU++ + } + if !math.IsNaN(record.ProcessMemory) { + avgMemory += record.ProcessMemory + validMemory++ + } + } + avgCPU /= float64(validCPU) + avgMemory /= float64(validMemory) + + // take any record and make it as the group meta + firstRecord := records[0] + var groupValues []string + for _, key := range req.GroupBy { + groupValues = append(groupValues, firstRecord.Meta[key.Key]) + } + processNames := []string{} + for _, record := range records { + processNames = append(processNames, record.ProcessName) + } + + groups = append(groups, model.ProcessListGroup{ + GroupValues: groupValues, + GroupCPUAvg: avgCPU, + GroupMemoryAvg: avgMemory, + ProcessNames: processNames, + }) + } + resp.Groups = groups + resp.Type = "grouped_list" + } + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/table_list_query.go b/pkg/query-service/app/inframetrics/table_list_query.go new file mode 100644 index 0000000000..3ea748a354 --- /dev/null +++ b/pkg/query-service/app/inframetrics/table_list_query.go @@ -0,0 +1,403 @@ +package inframetrics + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +var NonK8STableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorNotEqual, + Value: "idle", + }, + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "B", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F1": { + QueryName: "F1", + Expression: "A/B", + Legend: "CPU Usage (%)", + }, + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorIn, + Value: []string{"used", "cached"}, + }, + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "D": { + QueryName: "D", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "D", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F2": { + QueryName: "F2", + Expression: "C/D", + Legend: "Memory Usage (%)", + }, + "E": { + QueryName: "E", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "wait", + }, + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "E", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F": { + QueryName: "F", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "F", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F3": { + QueryName: "F3", + Expression: "E/F", + Legend: "CPU Wait Time (%)", + }, + "G": { + QueryName: "G", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_load_average_15m", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorNotContains, + Value: "k8s-infra-otel-agent", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "G", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + Legend: "CPU Load Average (15m)", + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} + +var ProcessesTableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "process_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "process_pid", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F1": { + QueryName: "F1", + Expression: "A", + Legend: "Process CPU Usage (%)", + }, + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "process_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "process_pid", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} diff --git a/pkg/query-service/app/inframetrics/table_list_query_k8s.go b/pkg/query-service/app/inframetrics/table_list_query_k8s.go new file mode 100644 index 0000000000..68cc9b92d6 --- /dev/null +++ b/pkg/query-service/app/inframetrics/table_list_query_k8s.go @@ -0,0 +1,269 @@ +package inframetrics + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +var K8STableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorNotEqual, + Value: "idle", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "B", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F1": { + QueryName: "F1", + Expression: "A/B", + Legend: "CPU Usage (%)", + }, + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorIn, + Value: []string{"used", "cached"}, + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "D": { + QueryName: "D", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "D", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F2": { + QueryName: "F2", + Expression: "C/D", + Legend: "Memory Usage (%)", + }, + "E": { + QueryName: "E", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "wait", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "E", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F": { + QueryName: "F", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_time", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "F", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: true, + }, + "F3": { + QueryName: "F3", + Expression: "E/F", + Legend: "CPU Wait Time (%)", + }, + "G": { + QueryName: "G", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_load_average_15m", + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "k8s_node_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "os_type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "G", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Legend: "CPU Load Average (15m)", + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/consumerLag.md b/pkg/query-service/app/integrations/messagingQueues/kafka/consumerLag.md index d338a9acb9..0b09b293ce 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/consumerLag.md +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/consumerLag.md @@ -28,6 +28,7 @@ Response in query range `table` format "resultType": "", "result": [ { + "table": { "columns": [ { @@ -519,3 +520,461 @@ Response in query range `table` format ] } ``` +### Partition Latency + +```json +/api/v1/messaging-queues/kafka/partition-latency/overview +``` +```json +{ + "start": 1728287046000000000, + "end": 1728587046000000000 +} +``` + +```json +{ + "status": "success", + "data": { + "resultType": "", + "result": [ + { + "table": { + "columns": [ + { + "name": "topic", + "queryName": "", + "isValueColumn": false + }, + { + "name": "p99", + "queryName": "", + "isValueColumn": false + }, + { + "name": "partition_latency", + "queryName": "partition_latency", + "isValueColumn": true + } + ], + "rows": [ + { + "data": { + "p99": "2", + "partition_latency": 1.18, + "topic": "topic1" + } + }, + { + "data": { + "p99": "2", + "partition_latency": 0.15, + "topic": "topic2" + } + }, + { + "data": { + "p99": "2", + "partition_latency": 0.26, + "topic": "topic3" + } + } + ] + } + } + ] + } +} +``` +--------- + +```json +/api/v1/messaging-queues/kafka/partition-latency/consumer +``` +```json +{ + "start": 1728287046000000000, + "end": 1728587046000000000, + "variables": { + "partition": "2", + "topic": "topic1" + } +} + +``` +```json +{ + "status": "success", + "data": { + "resultType": "", + "result": [ + { + "table": { + "columns": [ + { + "name": "consumer_group", + "queryName": "", + "isValueColumn": false + }, + { + "name": "service_name", + "queryName": "", + "isValueColumn": false + }, + { + "name": "p99", + "queryName": "", + "isValueColumn": false + }, + { + "name": "error_rate", + "queryName": "", + "isValueColumn": false + }, + { + "name": "throughput", + "queryName": "", + "isValueColumn": false + } + ], + "rows": [ + { + "data": { + "consumer_group": "cg1", + "error_rate": "0", + "p99": "0.11994228000000004", + "service_name": "consumer-svc", + "throughput": "1.18116" + } + } + ] + } + } + ] + } +} +``` +--------- +### Topic throughput + +```json +/api/v1/messaging-queues/kafka/topic-throughput/producer +``` +```json +{ + "start": 1728287046000000000, + "end": 1728587046000000000 +} +``` +```json +{ + "status": "success", + "data": { + "resultType": "", + "result": [ + { + "table": { + "columns": [ + { + "name": "topic", + "queryName": "", + "isValueColumn": false + }, + { + "name": "serviceName", + "queryName": "", + "isValueColumn": false + }, + { + "name": "p99", + "queryName": "", + "isValueColumn": false + }, + { + "name": "error_rate", + "queryName": "", + "isValueColumn": false + }, + { + "name": "throughput", + "queryName": "", + "isValueColumn": false + } + ], + "rows": [ + { + "data": { + "error_rate": "0", + "p99": "8.662880220000002", + "serviceName": "producer-svc1", + "throughput": "0.41642666666666667", + "topic": "topic1" + } + }, + { + "data": { + "error_rate": "0", + "p99": "9.786847500000016", + "serviceName": "producer-svc2", + "throughput": "0.76473", + "topic": "topic1" + } + }, + { + "data": { + "error_rate": "0", + "p99": "14.432925500000021", + "serviceName": "producer-svc3", + "throughput": "0.08976", + "topic": "topic2" + } + }, + { + "data": { + "error_rate": "0", + "p99": "14.32833297000002", + "serviceName": "producer-svc2", + "throughput": "0.06449333333333333", + "topic": "topic2" + } + }, + { + "data": { + "error_rate": "0", + "p99": "13.416533810000036", + "serviceName": "producer-svc4", + "throughput": "0.14766", + "topic": "topic3" + } + }, + { + "data": { + "error_rate": "0", + "p99": "13.366232000000034", + "serviceName": "producer-svc3", + "throughput": "0.11166666666666666", + "topic": "topic3" + } + } + ] + } + } + ] + } +} +``` +--------- +### Topic throughput + +```json +/api/v1/messaging-queues/kafka/topic-throughput/producer-details +``` +```json +{ + "start": 1728287046000000000, + "end": 1728587046000000000, + "variables": { + "partition": "2", + "topic": "topic1", + "service_name": "producer-svc2" + } +} +``` +```json +{ + "status": "success", + "data": { + "resultType": "", + "result": [ + { + "table": { + "columns": [ + { + "name": "partition", + "queryName": "", + "isValueColumn": false + }, + { + "name": "p99", + "queryName": "", + "isValueColumn": false + }, + { + "name": "error_rate", + "queryName": "", + "isValueColumn": false + }, + { + "name": "throughput", + "queryName": "", + "isValueColumn": false + } + ], + "rows": [ + { + "data": { + "error_rate": "0", + "p99": "9.165558780000026", + "partition": "2", + "throughput": "0.76473" + } + } + ] + } + } + ] + } +} +``` +--------- +### Topic throughput + +```json +/api/v1/messaging-queues/kafka/topic-throughput/consumer +``` +```json +{ + "start": 1728287046000000000, + "end": 1728587046000000000 +} +``` +```json +{ + "status": "success", + "data": { + "resultType": "", + "result": [ + { + "table": { + "columns": [ + { + "name": "topic", + "queryName": "", + "isValueColumn": false + }, + { + "name": "service_name", + "queryName": "", + "isValueColumn": false + }, + { + "name": "p99", + "queryName": "", + "isValueColumn": false + }, + { + "name": "error_rate", + "queryName": "", + "isValueColumn": false + }, + { + "name": "ingestion_rate", + "queryName": "", + "isValueColumn": false + }, + { + "name": "byte_rate", + "queryName": "", + "isValueColumn": false + } + ], + "rows": [ + { + "data": { + "byte_rate": "17.7174", + "error_rate": "0", + "ingestion_rate": "1.18116", + "p99": "0.12260112000000009", + "service_name": "consumer-svc", + "topic": "topic1" + } + }, + { + "data": { + "byte_rate": "2.1594533333333334", + "error_rate": "0", + "ingestion_rate": "0.15424666666666667", + "p99": "7.4079657800000005", + "service_name": "consumer-svc2", + "topic": "topic2" + } + }, + { + "data": { + "byte_rate": "3.66446", + "error_rate": "0", + "ingestion_rate": "0.25933", + "p99": "6.135769970000011", + "service_name": "consumer-svc3", + "topic": "topic3" + } + } + ] + } + } + ] + } +} +``` +--------- +### Topic throughput + +```json +/api/v1/messaging-queues/kafka/topic-throughput/consumer-details +``` +```json +{ + "start": 1728287046000000000, + "end": 1728587046000000000, + "variables": { + "topic": "topic1", + "service_name": "consumer-svc" + } +} +``` +```json +{ + "status": "success", + "data": { + "resultType": "", + "result": [ + { + "table": { + "columns": [ + { + "name": "partition", + "queryName": "", + "isValueColumn": false + }, + { + "name": "p99", + "queryName": "", + "isValueColumn": false + }, + { + "name": "error_rate", + "queryName": "", + "isValueColumn": false + }, + { + "name": "throughput", + "queryName": "", + "isValueColumn": false + } + ], + "rows": [ + { + "data": { + "error_rate": "0", + "p99": "0.11789381000000003", + "partition": "2", + "throughput": "1.18116" + } + } + ] + } + } + ] + } +} +``` diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/model.go b/pkg/query-service/app/integrations/messagingQueues/kafka/model.go index 803912c17f..de5d83487b 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/model.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/model.go @@ -5,6 +5,7 @@ const KafkaQueue = "kafka" type MessagingQueue struct { Start int64 `json:"start"` End int64 `json:"end"` + EvalTime int64 `json:"eval_time,omitempty"` Variables map[string]string `json:"variables,omitempty"` } @@ -13,6 +14,7 @@ type Clients struct { ClientID []string ServiceInstanceID []string ServiceName []string + TopicName []string } type OnboardingResponse struct { diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go index a4d3191fe1..eeb1167f23 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go @@ -12,7 +12,7 @@ WITH consumer_query AS ( serviceName, quantile(0.99)(durationNano) / 1000000 AS p99, COUNT(*) AS total_requests, - SUM(CASE WHEN statusCode = 2 THEN 1 ELSE 0 END) AS error_count, + sumIf(1, statusCode = 2) AS error_count, avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size FROM signoz_traces.distributed_signoz_index_v2 WHERE @@ -30,7 +30,7 @@ SELECT serviceName AS service_name, p99, COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, - COALESCE(total_requests / %d, 0) AS throughput, -- Convert nanoseconds to seconds + COALESCE(total_requests / %d, 0) AS throughput, COALESCE(avg_msg_size, 0) AS avg_msg_size FROM consumer_query @@ -40,6 +40,257 @@ ORDER BY return query } +// S1 landing +func generatePartitionLatencySQL(start, end int64, queueType string) string { + timeRange := (end - start) / 1000000000 + query := fmt.Sprintf(` +WITH partition_query AS ( + SELECT + quantile(0.99)(durationNano) / 1000000 AS p99, + count(*) AS total_requests, + stringTagMap['messaging.destination.name'] AS topic, + stringTagMap['messaging.destination.partition.id'] AS partition + FROM signoz_traces.distributed_signoz_index_v2 + WHERE + timestamp >= '%d' + AND timestamp <= '%d' + AND kind = 4 + AND msgSystem = '%s' + GROUP BY topic, partition +) + +SELECT + topic, + partition, + p99, + COALESCE(total_requests / %d, 0) AS throughput +FROM + partition_query +ORDER BY + topic; +`, start, end, queueType, timeRange) + return query +} + +// S1 consumer +func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string { + timeRange := (end - start) / 1000000000 + query := fmt.Sprintf(` +WITH consumer_pl AS ( + SELECT + stringTagMap['messaging.kafka.consumer.group'] AS consumer_group, + serviceName, + quantile(0.99)(durationNano) / 1000000 AS p99, + COUNT(*) AS total_requests, + sumIf(1, statusCode = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v2 + WHERE + timestamp >= '%d' + AND timestamp <= '%d' + AND kind = 5 + AND msgSystem = '%s' + AND stringTagMap['messaging.destination.name'] = '%s' + AND stringTagMap['messaging.destination.partition.id'] = '%s' + GROUP BY consumer_group, serviceName +) + +SELECT + consumer_group, + serviceName AS service_name, + p99, + COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, + COALESCE(total_requests / %d, 0) AS throughput +FROM + consumer_pl +ORDER BY + consumer_group; +`, start, end, queueType, topic, partition, timeRange) + return query +} + +// S3, producer overview +func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string { + timeRange := (end - start) / 1000000000 + // t, svc, rps, byte*, p99, err + query := fmt.Sprintf(` +WITH producer_latency AS ( + SELECT + serviceName, + quantile(0.99)(durationNano) / 1000000 AS p99, + stringTagMap['messaging.destination.name'] AS topic, + COUNT(*) AS total_requests, + sumIf(1, statusCode = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v2 + WHERE + timestamp >= '%d' + AND timestamp <= '%d' + AND kind = 4 + AND msgSystem = '%s' + GROUP BY topic, serviceName +) + +SELECT + topic, + serviceName AS service_name, + p99, + COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, + COALESCE(total_requests / %d, 0) AS throughput +FROM + producer_latency +`, start, end, queueType, timeRange) + return query +} + +// S3, producer topic/service overview +func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string { + timeRange := (end - start) / 1000000000 + query := fmt.Sprintf(` +WITH consumer_latency AS ( + SELECT + quantile(0.99)(durationNano) / 1000000 AS p99, + stringTagMap['messaging.destination.partition.id'] AS partition, + COUNT(*) AS total_requests, + sumIf(1, statusCode = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v2 + WHERE + timestamp >= '%d' + AND timestamp <= '%d' + AND kind = 4 + AND serviceName = '%s' + AND msgSystem = '%s' + AND stringTagMap['messaging.destination.name'] = '%s' + GROUP BY partition +) + +SELECT + partition, + p99, + COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, + COALESCE(total_requests / %d, 0) AS throughput +FROM + consumer_latency +`, start, end, service, queueType, topic, timeRange) + return query +} + +// S3 consumer overview +func generateConsumerLatencySQL(start, end int64, queueType string) string { + timeRange := (end - start) / 1000000000 + query := fmt.Sprintf(` +WITH consumer_latency AS ( + SELECT + serviceName, + stringTagMap['messaging.destination.name'] AS topic, + quantile(0.99)(durationNano) / 1000000 AS p99, + COUNT(*) AS total_requests, + sumIf(1, statusCode = 2) AS error_count, + SUM(numberTagMap['messaging.message.body.size']) AS total_bytes + FROM signoz_traces.distributed_signoz_index_v2 + WHERE + timestamp >= '%d' + AND timestamp <= '%d' + AND kind = 5 + AND msgSystem = '%s' + GROUP BY topic, serviceName +) + +SELECT + topic, + serviceName AS service_name, + p99, + COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, + COALESCE(total_requests / %d, 0) AS ingestion_rate, + COALESCE(total_bytes / %d, 0) AS byte_rate +FROM + consumer_latency +ORDER BY + topic; +`, start, end, queueType, timeRange, timeRange) + return query +} + +// S3 consumer topic/service +func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string { + timeRange := (end - start) / 1000000000 + query := fmt.Sprintf(` +WITH consumer_latency AS ( + SELECT + quantile(0.99)(durationNano) / 1000000 AS p99, + stringTagMap['messaging.destination.partition.id'] AS partition, + COUNT(*) AS total_requests, + sumIf(1, statusCode = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v2 + WHERE + timestamp >= '%d' + AND timestamp <= '%d' + AND kind = 5 + AND serviceName = '%s' + AND msgSystem = '%s' + AND stringTagMap['messaging.destination.name'] = '%s' + GROUP BY partition +) + +SELECT + partition, + p99, + COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, + COALESCE(total_requests / %d, 0) AS throughput +FROM + consumer_latency +`, start, end, service, queueType, topic, timeRange) + return query +} + +// s4 +func generateProducerConsumerEvalSQL(start, end int64, queueType string, evalTime int64) string { + query := fmt.Sprintf(` +WITH trace_data AS ( + SELECT + p.serviceName AS producer_service, + c.serviceName AS consumer_service, + p.traceID, + p.timestamp AS producer_timestamp, + c.timestamp AS consumer_timestamp, + p.durationNano AS durationNano, + (toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference + FROM + signoz_traces.distributed_signoz_index_v2 p + INNER JOIN + signoz_traces.distributed_signoz_index_v2 c + ON p.traceID = c.traceID + AND c.parentSpanID = p.spanID + WHERE + p.kind = 4 + AND c.kind = 5 + AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d' + AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d' + AND c.msgSystem = '%s' + AND p.msgSystem = '%s' +) + +SELECT + producer_service, + consumer_service, + COUNT(*) AS total_spans, + SUM(time_difference > '%d') AS breached_spans, + ((breached_spans) * 100.0) / total_spans AS breach_percentage, + arraySlice( + arrayMap(x -> x.1, + arraySort( + x -> -x.2, + groupArrayIf((traceID, time_difference), time_difference > '%d') + ) + ), + 1, 10 + ) AS top_traceIDs +FROM trace_data +GROUP BY + producer_service, + consumer_service +`, start, end, start, end, queueType, queueType, evalTime, evalTime) + return query +} + func generateProducerSQL(start, end int64, topic, partition, queueType string) string { timeRange := (end - start) / 1000000000 query := fmt.Sprintf(` @@ -48,7 +299,7 @@ WITH producer_query AS ( serviceName, quantile(0.99)(durationNano) / 1000000 AS p99, count(*) AS total_count, - SUM(CASE WHEN statusCode = 2 THEN 1 ELSE 0 END) AS error_count + sumIf(1, statusCode = 2) AS error_count FROM signoz_traces.distributed_signoz_index_v2 WHERE timestamp >= '%d' @@ -64,12 +315,11 @@ SELECT serviceName AS service_name, p99, COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage, - COALESCE(total_count / %d, 0) AS throughput -- Convert nanoseconds to seconds + COALESCE(total_count / %d, 0) AS throughput FROM producer_query ORDER BY serviceName; - `, start, end, queueType, topic, partition, timeRange) return query } diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go b/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go index 4dca2a2cda..a7d934d5a4 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go @@ -2,9 +2,8 @@ package kafka import ( "fmt" - "strings" - "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) @@ -12,6 +11,9 @@ var defaultStepInterval int64 = 60 func BuildQueryRangeParams(messagingQueue *MessagingQueue, queryContext string) (*v3.QueryRangeParamsV3, error) { + if constants.KafkaSpanEval == "false" && queryContext == "producer-consumer-eval" { + return nil, fmt.Errorf("span evaluation feature is disabled and is experimental") + } // ToDo: propagate this through APIs when there are different handlers queueType := KafkaQueue @@ -37,14 +39,6 @@ func BuildQueryRangeParams(messagingQueue *MessagingQueue, queryContext string) return queryRangeParams, nil } -func PrepareClikhouseQueries(messagingQueue *MessagingQueue, queryContext string) (*v3.ClickHouseQuery, error) { - queueType := KafkaQueue - - chq, err := BuildClickHouseQuery(messagingQueue, queueType, queryContext) - - return chq, err -} - func buildClickHouseQueryNetwork(messagingQueue *MessagingQueue, queueType string) (*v3.ClickHouseQuery, error) { start := messagingQueue.Start end := messagingQueue.End @@ -65,12 +59,60 @@ func buildClickHouseQueryNetwork(messagingQueue *MessagingQueue, queueType strin }, nil } -func formatstring(str []string) string { - joined := strings.Join(str, ", ") - if len(joined) <= 2 { - return "" +func buildBuilderQueriesProducerBytes(unixMilliStart, unixMilliEnd int64, attributeCache *Clients) (map[string]*v3.BuilderQuery, error) { + bq := make(map[string]*v3.BuilderQuery) + queryName := fmt.Sprintf("latency") + + chq := &v3.BuilderQuery{ + QueryName: queryName, + StepInterval: common.MinAllowedStepInterval(unixMilliStart, unixMilliEnd), + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "kafka_producer_byte_rate", + }, + AggregateOperator: v3.AggregateOperatorAvg, + Temporality: v3.Unspecified, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationAvg, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service_name", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorIn, + Value: attributeCache.ServiceName, + }, + { + Key: v3.AttributeKey{ + Key: "topic", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorIn, + Value: attributeCache.TopicName, + }, + }, + }, + Expression: queryName, + ReduceTo: v3.ReduceToOperatorAvg, + GroupBy: []v3.AttributeKey{{ + Key: "service_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + { + Key: "topic", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, } - return joined[1 : len(joined)-1] + bq[queryName] = chq + return bq, nil } func buildBuilderQueriesNetwork(unixMilliStart, unixMilliEnd int64, attributeCache *Clients) (map[string]*v3.BuilderQuery, error) { @@ -143,7 +185,7 @@ func buildBuilderQueriesNetwork(unixMilliStart, unixMilliEnd int64, attributeCac return bq, nil } -func BuildQRParamsNetwork(messagingQueue *MessagingQueue, queryContext string, attributeCache *Clients) (*v3.QueryRangeParamsV3, error) { +func BuildQRParamsWithCache(messagingQueue *MessagingQueue, queryContext string, attributeCache *Clients) (*v3.QueryRangeParamsV3, error) { queueType := KafkaQueue @@ -151,6 +193,7 @@ func BuildQRParamsNetwork(messagingQueue *MessagingQueue, queryContext string, a unixMilliEnd := messagingQueue.End / 1000000 var cq *v3.CompositeQuery + var err error if queryContext == "throughput" { chq, err := buildClickHouseQueryNetwork(messagingQueue, queueType) @@ -171,6 +214,24 @@ func BuildQRParamsNetwork(messagingQueue *MessagingQueue, queryContext string, a BuilderQueries: bhq, PanelType: v3.PanelTypeTable, } + } else if queryContext == "producer-throughput-overview" { + start := messagingQueue.Start + end := messagingQueue.End + query := generateProducerPartitionThroughputSQL(start, end, queueType) + + cq, err = buildCompositeQuery(&v3.ClickHouseQuery{ + Query: query, + }, queryContext) + } else if queryContext == "producer-throughput-overview-latency" { + bhq, err := buildBuilderQueriesProducerBytes(unixMilliStart, unixMilliEnd, attributeCache) + if err != nil { + return nil, err + } + cq = &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + BuilderQueries: bhq, + PanelType: v3.PanelTypeTable, + } } queryRangeParams := &v3.QueryRangeParamsV3{ @@ -182,7 +243,7 @@ func BuildQRParamsNetwork(messagingQueue *MessagingQueue, queryContext string, a FormatForWeb: true, } - return queryRangeParams, nil + return queryRangeParams, err } func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, queryContext string) (*v3.ClickHouseQuery, error) { @@ -190,16 +251,21 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer end := messagingQueue.End var topic, partition string - - if queryContext == "producer" || queryContext == "consumer" { + if queryContext == "producer" || + queryContext == "consumer" || + queryContext == "consumer_partition_latency" || + queryContext == "producer-throughput-details" || + queryContext == "consumer-throughput-details" { var ok bool topic, ok = messagingQueue.Variables["topic"] if !ok { return nil, fmt.Errorf("invalid type for Topic") } - partition, ok = messagingQueue.Variables["partition"] - if !ok { - return nil, fmt.Errorf("invalid type for Partition") + if queryContext != "consumer-throughput-details" { + partition, ok = messagingQueue.Variables["partition"] + if !ok { + return nil, fmt.Errorf("invalid type for Partition") + } } } @@ -212,6 +278,26 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer return nil, fmt.Errorf("invalid type for consumer group") } query = generateConsumerSQL(start, end, topic, partition, consumerGroup, queueType) + } else if queryContext == "producer-topic-throughput" { + query = generatePartitionLatencySQL(start, end, queueType) + } else if queryContext == "consumer_partition_latency" { + query = generateConsumerPartitionLatencySQL(start, end, topic, partition, queueType) + } else if queryContext == "producer-throughput-details" { + svcName, ok := messagingQueue.Variables["service_name"] + if !ok { + return nil, fmt.Errorf("invalid type for service") + } + query = generateProducerTopicLatencySQL(start, end, topic, svcName, queueType) + } else if queryContext == "consumer-throughput-overview" { + query = generateConsumerLatencySQL(start, end, queueType) + } else if queryContext == "consumer-throughput-details" { + svcName, ok := messagingQueue.Variables["service_name"] + if !ok { + return nil, fmt.Errorf("invalid type for service") + } + query = generateConsumerServiceLatencySQL(start, end, topic, svcName, queueType) + } else if queryContext == "producer-consumer-eval" { + query = generateProducerConsumerEvalSQL(start, end, queueType, messagingQueue.EvalTime) } else if queryContext == "onboard_producers" { query = onboardProducersSQL(start, end, queueType) } else if queryContext == "onboard_consumers" { @@ -219,13 +305,21 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer } else if queryContext == "onboard_kafka" { query = onboardKafkaSQL(start, end) } - return &v3.ClickHouseQuery{ Query: query, }, nil } func buildCompositeQuery(chq *v3.ClickHouseQuery, queryContext string) (*v3.CompositeQuery, error) { + + if queryContext == "producer-consumer-eva" { + return &v3.CompositeQuery{ + QueryType: v3.QueryTypeClickHouseSQL, + ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq}, + PanelType: v3.PanelTypeList, + }, nil + } + return &v3.CompositeQuery{ QueryType: v3.QueryTypeClickHouseSQL, ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq}, diff --git a/pkg/query-service/app/logparsingpipeline/collector_config.go b/pkg/query-service/app/logparsingpipeline/collector_config.go index 49f697fbd3..516ddf5bac 100644 --- a/pkg/query-service/app/logparsingpipeline/collector_config.go +++ b/pkg/query-service/app/logparsingpipeline/collector_config.go @@ -19,24 +19,28 @@ var lockLogsPipelineSpec sync.RWMutex // check if the processors already exist // if yes then update the processor. // if something doesn't exists then remove it. -func buildLogParsingProcessors(agentConf, parsingProcessors map[string]interface{}) error { +func updateProcessorConfigsInCollectorConf( + collectorConf map[string]interface{}, + signozPipelineProcessors map[string]interface{}, +) error { agentProcessors := map[string]interface{}{} - if agentConf["processors"] != nil { - agentProcessors = (agentConf["processors"]).(map[string]interface{}) + if collectorConf["processors"] != nil { + agentProcessors = (collectorConf["processors"]).(map[string]interface{}) } exists := map[string]struct{}{} - for key, params := range parsingProcessors { + for key, params := range signozPipelineProcessors { agentProcessors[key] = params exists[key] = struct{}{} } - // remove the old unwanted processors + // remove the old unwanted pipeline processors for k := range agentProcessors { - if _, ok := exists[k]; !ok && strings.HasPrefix(k, constants.LogsPPLPfx) { + _, isInDesiredPipelineProcs := exists[k] + if hasSignozPipelineProcessorPrefix(k) && !isInDesiredPipelineProcs { delete(agentProcessors, k) } } - agentConf["processors"] = agentProcessors + collectorConf["processors"] = agentProcessors return nil } @@ -65,21 +69,24 @@ func getOtelPipelineFromConfig(config map[string]interface{}) (*otelPipeline, er return &p, nil } -func buildLogsProcessors(current []string, logsParserPipeline []string) ([]string, error) { +func buildCollectorPipelineProcessorsList( + currentCollectorProcessors []string, + signozPipelineProcessorNames []string, +) ([]string, error) { lockLogsPipelineSpec.Lock() defer lockLogsPipelineSpec.Unlock() exists := map[string]struct{}{} - for _, v := range logsParserPipeline { + for _, v := range signozPipelineProcessorNames { exists[v] = struct{}{} } // removed the old processors which are not used var pipeline []string - for _, v := range current { - k := v - if _, ok := exists[k]; ok || !strings.HasPrefix(k, constants.LogsPPLPfx) { - pipeline = append(pipeline, v) + for _, procName := range currentCollectorProcessors { + _, isInDesiredPipelineProcs := exists[procName] + if isInDesiredPipelineProcs || !hasSignozPipelineProcessorPrefix(procName) { + pipeline = append(pipeline, procName) } } @@ -96,7 +103,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin existingVsSpec := map[int]int{} // go through plan and map its elements to current positions in effective config - for i, m := range logsParserPipeline { + for i, m := range signozPipelineProcessorNames { if loc, ok := existing[m]; ok { specVsExistingMap[i] = loc existingVsSpec[loc] = i @@ -106,11 +113,11 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin lastMatched := 0 newPipeline := []string{} - for i := 0; i < len(logsParserPipeline); i++ { - m := logsParserPipeline[i] + for i := 0; i < len(signozPipelineProcessorNames); i++ { + m := signozPipelineProcessorNames[i] if loc, ok := specVsExistingMap[i]; ok { for j := lastMatched; j < loc; j++ { - if strings.HasPrefix(pipeline[j], constants.LogsPPLPfx) { + if hasSignozPipelineProcessorPrefix(pipeline[j]) { delete(specVsExistingMap, existingVsSpec[j]) } else { newPipeline = append(newPipeline, pipeline[j]) @@ -159,23 +166,24 @@ func GenerateCollectorConfigWithPipelines( config []byte, pipelines []Pipeline, ) ([]byte, *coreModel.ApiError) { - var c map[string]interface{} - err := yaml.Unmarshal([]byte(config), &c) + var collectorConf map[string]interface{} + err := yaml.Unmarshal([]byte(config), &collectorConf) if err != nil { return nil, coreModel.BadRequest(err) } - processors, procNames, err := PreparePipelineProcessor(pipelines) + signozPipelineProcessors, signozPipelineProcNames, err := PreparePipelineProcessor(pipelines) if err != nil { return nil, coreModel.BadRequest(errors.Wrap( err, "could not prepare otel collector processors for log pipelines", )) } - // Escape any `$`s as `$$` in config generated for pipelines, to ensure any occurrences + // Escape any `$`s as `$$$` in config generated for pipelines, to ensure any occurrences // like $data do not end up being treated as env vars when loading collector config. - for _, procName := range procNames { - procConf := processors[procName] + // otel-collector-contrib versions 0.111 and above require using $$$ as escaped dollar (and not $$) + for _, procName := range signozPipelineProcNames { + procConf := signozPipelineProcessors[procName] serializedProcConf, err := yaml.Marshal(procConf) if err != nil { return nil, coreModel.InternalError(fmt.Errorf( @@ -183,7 +191,7 @@ func GenerateCollectorConfigWithPipelines( )) } escapedSerializedConf := strings.ReplaceAll( - string(serializedProcConf), "$", "$$", + string(serializedProcConf), "$", "$$$", ) var escapedConf map[string]interface{} @@ -194,14 +202,14 @@ func GenerateCollectorConfigWithPipelines( )) } - processors[procName] = escapedConf + signozPipelineProcessors[procName] = escapedConf } // Add processors to unmarshaled collector config `c` - buildLogParsingProcessors(c, processors) + updateProcessorConfigsInCollectorConf(collectorConf, signozPipelineProcessors) // build the new processor list in service.pipelines.logs - p, err := getOtelPipelineFromConfig(c) + p, err := getOtelPipelineFromConfig(collectorConf) if err != nil { return nil, coreModel.BadRequest(err) } @@ -211,16 +219,20 @@ func GenerateCollectorConfigWithPipelines( )) } - updatedProcessorList, _ := buildLogsProcessors(p.Pipelines.Logs.Processors, procNames) + updatedProcessorList, _ := buildCollectorPipelineProcessorsList(p.Pipelines.Logs.Processors, signozPipelineProcNames) p.Pipelines.Logs.Processors = updatedProcessorList // add the new processor to the data ( no checks required as the keys will exists) - c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] = p.Pipelines.Logs + collectorConf["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] = p.Pipelines.Logs - updatedConf, err := yaml.Marshal(c) + updatedConf, err := yaml.Marshal(collectorConf) if err != nil { return nil, coreModel.BadRequest(err) } return updatedConf, nil } + +func hasSignozPipelineProcessorPrefix(procName string) bool { + return strings.HasPrefix(procName, constants.LogsPPLPfx) || strings.HasPrefix(procName, constants.OldLogsPPLPfx) +} diff --git a/pkg/query-service/app/logparsingpipeline/collector_config_test.go b/pkg/query-service/app/logparsingpipeline/collector_config_test.go index f5ba7a352b..1ddf092e9f 100644 --- a/pkg/query-service/app/logparsingpipeline/collector_config_test.go +++ b/pkg/query-service/app/logparsingpipeline/collector_config_test.go @@ -1,12 +1,14 @@ package logparsingpipeline import ( + "context" "fmt" "testing" . "github.com/smartystreets/goconvey/convey" "github.com/stretchr/testify/require" "go.signoz.io/signoz/pkg/query-service/constants" + "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "gopkg.in/yaml.v3" ) @@ -94,7 +96,7 @@ var buildProcessorTestData = []struct { func TestBuildLogParsingProcessors(t *testing.T) { for _, test := range buildProcessorTestData { Convey(test.Name, t, func() { - err := buildLogParsingProcessors(test.agentConf, test.pipelineProcessor) + err := updateProcessorConfigsInCollectorConf(test.agentConf, test.pipelineProcessor) So(err, ShouldBeNil) So(test.agentConf, ShouldResemble, test.outputConf) }) @@ -200,7 +202,7 @@ var BuildLogsPipelineTestData = []struct { func TestBuildLogsPipeline(t *testing.T) { for _, test := range BuildLogsPipelineTestData { Convey(test.Name, t, func() { - v, err := buildLogsProcessors(test.currentPipeline, test.logsPipeline) + v, err := buildCollectorPipelineProcessorsList(test.currentPipeline, test.logsPipeline) So(err, ShouldBeNil) fmt.Println(test.Name, "\n", test.currentPipeline, "\n", v, "\n", test.expectedPipeline) So(v, ShouldResemble, test.expectedPipeline) @@ -293,3 +295,136 @@ func TestPipelineAliasCollisionsDontResultInDuplicateCollectorProcessors(t *test ) } + +func TestPipelineRouterWorksEvenIfFirstOpIsDisabled(t *testing.T) { + require := require.New(t) + + testPipelines := []Pipeline{ + { + OrderId: 1, + Name: "pipeline1", + Alias: "pipeline1", + Enabled: true, + Filter: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "method", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: "=", + Value: "GET", + }, + }, + }, + Config: []PipelineOperator{ + { + OrderId: 1, + ID: "add", + Type: "add", + Field: "attributes.test", + Value: "val", + Enabled: false, + Name: "test add", + }, + { + OrderId: 2, + ID: "add2", + Type: "add", + Field: "attributes.test2", + Value: "val2", + Enabled: true, + Name: "test add 2", + }, + }, + }, + } + + result, collectorWarnAndErrorLogs, err := SimulatePipelinesProcessing( + context.Background(), + testPipelines, + []model.SignozLog{ + makeTestSignozLog( + "test log body", + map[string]any{ + "method": "GET", + }, + ), + }, + ) + + require.Nil(err) + require.Equal(0, len(collectorWarnAndErrorLogs)) + require.Equal(1, len(result)) + + require.Equal( + map[string]string{ + "method": "GET", + "test2": "val2", + }, result[0].Attributes_string, + ) +} + +func TestPipeCharInAliasDoesntBreakCollectorConfig(t *testing.T) { + require := require.New(t) + + testPipelines := []Pipeline{ + { + OrderId: 1, + Name: "test | pipeline", + Alias: "test|pipeline", + Enabled: true, + Filter: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "method", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: "=", + Value: "GET", + }, + }, + }, + Config: []PipelineOperator{ + { + OrderId: 1, + ID: "add", + Type: "add", + Field: "attributes.test", + Value: "val", + Enabled: true, + Name: "test add", + }, + }, + }, + } + + result, collectorWarnAndErrorLogs, err := SimulatePipelinesProcessing( + context.Background(), + testPipelines, + []model.SignozLog{ + makeTestSignozLog( + "test log body", + map[string]any{ + "method": "GET", + }, + ), + }, + ) + + require.Nil(err) + require.Equal(0, len(collectorWarnAndErrorLogs)) + require.Equal(1, len(result)) + + require.Equal( + map[string]string{ + "method": "GET", + "test": "val", + }, result[0].Attributes_string, + ) +} diff --git a/pkg/query-service/app/logparsingpipeline/pipelineBuilder.go b/pkg/query-service/app/logparsingpipeline/pipelineBuilder.go index 5dd6406a79..07a0ea6346 100644 --- a/pkg/query-service/app/logparsingpipeline/pipelineBuilder.go +++ b/pkg/query-service/app/logparsingpipeline/pipelineBuilder.go @@ -2,6 +2,7 @@ package logparsingpipeline import ( "fmt" + "regexp" "slices" "strings" @@ -17,8 +18,13 @@ const ( NOOP = "noop" ) +// To ensure names used in generated collector config are never judged invalid, +// only alphabets, digits and `-` are used when translating pipeline identifiers +var badCharsForCollectorConfName = regexp.MustCompile("[^a-zA-Z0-9-]") + func CollectorConfProcessorName(p Pipeline) string { - return constants.LogsPPLPfx + p.Alias + normalizedAlias := badCharsForCollectorConfName.ReplaceAllString(p.Alias, "-") + return constants.LogsPPLPfx + normalizedAlias } func PreparePipelineProcessor(pipelines []Pipeline) (map[string]interface{}, []string, error) { @@ -49,7 +55,7 @@ func PreparePipelineProcessor(pipelines []Pipeline) (map[string]interface{}, []s Type: "router", Routes: &[]Route{ { - Output: v.Config[0].ID, + Output: operators[0].ID, Expr: filterExpr, }, }, diff --git a/pkg/query-service/app/logparsingpipeline/preview.go b/pkg/query-service/app/logparsingpipeline/preview.go index b37295eb96..548c1ee2f5 100644 --- a/pkg/query-service/app/logparsingpipeline/preview.go +++ b/pkg/query-service/app/logparsingpipeline/preview.go @@ -7,7 +7,7 @@ import ( "time" _ "github.com/SigNoz/signoz-otel-collector/pkg/parser/grok" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor" + "github.com/SigNoz/signoz-otel-collector/processor/signozlogspipelineprocessor" "github.com/pkg/errors" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" @@ -42,7 +42,7 @@ func SimulatePipelinesProcessing( simulatorInputPLogs := SignozLogsToPLogs(logs) processorFactories, err := processor.MakeFactoryMap( - logstransformprocessor.NewFactory(), + signozlogspipelineprocessor.NewFactory(), ) if err != nil { return nil, nil, model.InternalError(errors.Wrap( diff --git a/pkg/query-service/app/logs/v3/query_builder.go b/pkg/query-service/app/logs/v3/query_builder.go index 05bd799712..40f019ac91 100644 --- a/pkg/query-service/app/logs/v3/query_builder.go +++ b/pkg/query-service/app/logs/v3/query_builder.go @@ -496,7 +496,7 @@ func IsOrderByTs(orderBy []v3.OrderBy) bool { // PrepareLogsQuery prepares the query for logs // start and end are in epoch millisecond // step is in seconds -func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) { +func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) { // adjust the start and end time to the step interval // NOTE: Disabling this as it's creating confusion between charts and actual data diff --git a/pkg/query-service/app/logs/v3/query_builder_test.go b/pkg/query-service/app/logs/v3/query_builder_test.go index 0eb0c202e5..868afd0015 100644 --- a/pkg/query-service/app/logs/v3/query_builder_test.go +++ b/pkg/query-service/app/logs/v3/query_builder_test.go @@ -1201,7 +1201,7 @@ var testPrepLogsQueryData = []struct { TableName string AggregateOperator v3.AggregateOperator ExpectedQuery string - Options v3.LogQBOptions + Options v3.QBOptions }{ { Name: "Test TS with limit- first", @@ -1223,7 +1223,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value DESC) LIMIT 10", - Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + Options: v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, { Name: "Test TS with limit- first - with order by value", @@ -1246,7 +1246,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value ASC) LIMIT 10", - Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + Options: v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, { Name: "Test TS with limit- first - with order by attribute", @@ -1269,7 +1269,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by `method` ASC) LIMIT 10", - Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + Options: v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, { Name: "Test TS with limit- second", @@ -1291,7 +1291,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by value DESC", - Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, + Options: v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, }, { Name: "Test TS with limit- second - with order by", @@ -1314,7 +1314,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by `method` ASC", - Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, + Options: v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, }, // Live tail { @@ -1334,7 +1334,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND ", - Options: v3.LogQBOptions{IsLivetailQuery: true}, + Options: v3.QBOptions{IsLivetailQuery: true}, }, { Name: "Live Tail Query with contains", @@ -1353,7 +1353,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] ILIKE '%GET%' AND ", - Options: v3.LogQBOptions{IsLivetailQuery: true}, + Options: v3.QBOptions{IsLivetailQuery: true}, }, { Name: "Live Tail Query W/O filter", @@ -1369,7 +1369,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where ", - Options: v3.LogQBOptions{IsLivetailQuery: true}, + Options: v3.QBOptions{IsLivetailQuery: true}, }, { Name: "Table query w/o limit", @@ -1385,7 +1385,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC", - Options: v3.LogQBOptions{}, + Options: v3.QBOptions{}, }, { Name: "Table query with limit", @@ -1402,7 +1402,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC LIMIT 10", - Options: v3.LogQBOptions{}, + Options: v3.QBOptions{}, }, { Name: "Ignore offset if order by is timestamp in list queries", @@ -1488,7 +1488,7 @@ var testPrepLogsQueryLimitOffsetData = []struct { TableName string AggregateOperator v3.AggregateOperator ExpectedQuery string - Options v3.LogQBOptions + Options v3.QBOptions }{ { Name: "Test limit less than pageSize - order by ts", diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index 49e585e64b..e38fb94934 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -5,6 +5,7 @@ import ( "strings" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + "go.signoz.io/signoz/pkg/query-service/app/resource" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/utils" @@ -33,6 +34,7 @@ const ( BODY = "body" DISTRIBUTED_LOGS_V2 = "distributed_logs_v2" DISTRIBUTED_LOGS_V2_RESOURCE = "distributed_logs_v2_resource" + DB_NAME = "signoz_logs" NANOSECOND = 1000000000 ) @@ -372,7 +374,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build } // build the where clause for resource table - resourceSubQuery, err := buildResourceSubQuery(bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false) + resourceSubQuery, err := resource.BuildResourceSubQuery(DB_NAME, DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false) if err != nil { return "", err } @@ -463,7 +465,7 @@ func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) { } // no values for bucket start and end - resourceSubQuery, err := buildResourceSubQuery(0, 0, mq.Filters, mq.GroupBy, mq.AggregateAttribute, true) + resourceSubQuery, err := resource.BuildResourceSubQuery(DB_NAME, DISTRIBUTED_LOGS_V2_RESOURCE, 0, 0, mq.Filters, mq.GroupBy, mq.AggregateAttribute, true) if err != nil { return "", err } @@ -491,7 +493,7 @@ func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) { } // PrepareLogsQuery prepares the query for logs -func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) { +func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) { // adjust the start and end time to the step interval // NOTE: Disabling this as it's creating confusion between charts and actual data diff --git a/pkg/query-service/app/logs/v4/query_builder_test.go b/pkg/query-service/app/logs/v4/query_builder_test.go index 9c2b1fd2e5..64ba5c5004 100644 --- a/pkg/query-service/app/logs/v4/query_builder_test.go +++ b/pkg/query-service/app/logs/v4/query_builder_test.go @@ -806,7 +806,7 @@ func TestPrepareLogsQuery(t *testing.T) { queryType v3.QueryType panelType v3.PanelType mq *v3.BuilderQuery - options v3.LogQBOptions + options v3.QBOptions } tests := []struct { name string @@ -875,7 +875,7 @@ func TestPrepareLogsQuery(t *testing.T) { Limit: 10, GroupBy: []v3.AttributeKey{{Key: "user", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, }, - options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + options: v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, want: "SELECT `user` from (SELECT attributes_string['user'] as `user`, toFloat64(count(distinct(attributes_string['name']))) as value from signoz_logs.distributed_logs_v2 " + "where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['method'] = 'GET' " + @@ -904,7 +904,7 @@ func TestPrepareLogsQuery(t *testing.T) { GroupBy: []v3.AttributeKey{{Key: "user", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, Limit: 2, }, - options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, + options: v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, }, want: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string['user'] as `user`, toFloat64(count(distinct(attributes_string['name']))) as value " + "from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND " + @@ -929,7 +929,7 @@ func TestPrepareLogsQuery(t *testing.T) { }, }, }, - options: v3.LogQBOptions{IsLivetailQuery: true}, + options: v3.QBOptions{IsLivetailQuery: true}, }, want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string " + "from signoz_logs.distributed_logs_v2 where attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method') AND ", @@ -952,7 +952,7 @@ func TestPrepareLogsQuery(t *testing.T) { }, }, }, - options: v3.LogQBOptions{IsLivetailQuery: true}, + options: v3.QBOptions{IsLivetailQuery: true}, }, want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + "signoz_logs.distributed_logs_v2 where attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method') AND " + @@ -972,7 +972,7 @@ func TestPrepareLogsQuery(t *testing.T) { Expression: "A", Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}, }, - options: v3.LogQBOptions{IsLivetailQuery: true}, + options: v3.QBOptions{IsLivetailQuery: true}, }, want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string " + "from signoz_logs.distributed_logs_v2 where ", diff --git a/pkg/query-service/app/metrics/v4/cumulative/timeseries.go b/pkg/query-service/app/metrics/v4/cumulative/timeseries.go index 9845096223..cf5020b51d 100644 --- a/pkg/query-service/app/metrics/v4/cumulative/timeseries.go +++ b/pkg/query-service/app/metrics/v4/cumulative/timeseries.go @@ -114,12 +114,14 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery) samplesTableFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) + tableName := helpers.WhichSamplesTableToUse(start, end, mq) + // Select the aggregate value for interval queryTmpl := "SELECT fingerprint, %s" + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " %s as per_series_value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + tableName + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -130,37 +132,30 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery) selectLabelsAny := helpers.SelectLabelsAny(mq.GroupBy) selectLabels := helpers.SelectLabels(mq.GroupBy) + op := helpers.AggregationColumnForSamplesTable(start, end, mq) + switch mq.TimeAggregation { case v3.TimeAggregationAvg: - op := "avg(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationSum: - op := "sum(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationMin: - op := "min(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationMax: - op := "max(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationCount: - op := "count(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationCountDistinct: - op := "count(distinct(value))" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationAnyLast: - op := "anyLast(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationRate: - op := "max(value)" innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) rateQueryTmpl := "SELECT %s ts, " + rateWithoutNegative + " as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)" subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery) case v3.TimeAggregationIncrease: - op := "max(value)" innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) rateQueryTmpl := "SELECT %s ts, " + increaseWithoutNegative + diff --git a/pkg/query-service/app/metrics/v4/delta/timeseries.go b/pkg/query-service/app/metrics/v4/delta/timeseries.go index 365b09c56d..0f6cbf82fb 100644 --- a/pkg/query-service/app/metrics/v4/delta/timeseries.go +++ b/pkg/query-service/app/metrics/v4/delta/timeseries.go @@ -26,12 +26,14 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery) samplesTableFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) + tableName := helpers.WhichSamplesTableToUse(start, end, mq) + // Select the aggregate value for interval queryTmpl := "SELECT fingerprint, %s" + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " %s as per_series_value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + tableName + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -41,33 +43,27 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery) selectLabelsAny := helpers.SelectLabelsAny(mq.GroupBy) + op := helpers.AggregationColumnForSamplesTable(start, end, mq) + switch mq.TimeAggregation { case v3.TimeAggregationAvg: - op := "avg(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationSum: - op := "sum(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationMin: - op := "min(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationMax: - op := "max(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationCount: - op := "count(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationCountDistinct: - op := "count(distinct(value))" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationAnyLast: - op := "anyLast(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationRate: - op := fmt.Sprintf("sum(value)/%d", step) + op := fmt.Sprintf("%s/%d", op, step) subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationIncrease: - op := "sum(value)" subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) } return subQuery, nil @@ -89,10 +85,8 @@ func prepareQueryOptimized(start, end, step int64, mq *v3.BuilderQuery) (string, samplesTableFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) - var tableName string = constants.SIGNOZ_SAMPLES_V4_TABLENAME - if mq.AggregateAttribute.Type == v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) { - tableName = "distributed_exp_hist" - } + tableName := helpers.WhichSamplesTableToUse(start, end, mq) + // Select the aggregate value for interval queryTmpl := "SELECT %s" + @@ -108,16 +102,16 @@ func prepareQueryOptimized(start, end, step int64, mq *v3.BuilderQuery) (string, switch mq.SpaceAggregation { case v3.SpaceAggregationSum: - op := "sum(value)" + op := helpers.AggregationColumnForSamplesTable(start, end, mq) if mq.TimeAggregation == v3.TimeAggregationRate { - op = "sum(value)/" + fmt.Sprintf("%d", step) + op = fmt.Sprintf("%s/%d", op, step) } query = fmt.Sprintf(queryTmpl, selectLabels, step, op, timeSeriesSubQuery, groupBy, orderBy) case v3.SpaceAggregationMin: - op := "min(value)" + op := helpers.AggregationColumnForSamplesTable(start, end, mq) query = fmt.Sprintf(queryTmpl, selectLabels, step, op, timeSeriesSubQuery, groupBy, orderBy) case v3.SpaceAggregationMax: - op := "max(value)" + op := helpers.AggregationColumnForSamplesTable(start, end, mq) query = fmt.Sprintf(queryTmpl, selectLabels, step, op, timeSeriesSubQuery, groupBy, orderBy) case v3.SpaceAggregationPercentile50, v3.SpaceAggregationPercentile75, diff --git a/pkg/query-service/app/metrics/v4/helpers/sub_query.go b/pkg/query-service/app/metrics/v4/helpers/sub_query.go index e1edc5a964..468f693646 100644 --- a/pkg/query-service/app/metrics/v4/helpers/sub_query.go +++ b/pkg/query-service/app/metrics/v4/helpers/sub_query.go @@ -13,31 +13,248 @@ import ( var ( sixHoursInMilliseconds = time.Hour.Milliseconds() * 6 oneDayInMilliseconds = time.Hour.Milliseconds() * 24 + oneWeekInMilliseconds = oneDayInMilliseconds * 7 ) -// start and end are in milliseconds -func which(start, end int64) (int64, int64, string) { +func whichTSTableToUse(start, end int64, mq *v3.BuilderQuery) (int64, int64, string) { + + // if we have a hint for the table, we need to use it + // the hint will be used to override the default table selection logic + if mq.MetricTableHints != nil { + if mq.MetricTableHints.TimeSeriesTableName != "" { + switch mq.MetricTableHints.TimeSeriesTableName { + case constants.SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME: + // adjust the start time to nearest 1 hour + start = start - (start % (time.Hour.Milliseconds() * 1)) + case constants.SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME: + // adjust the start time to nearest 6 hours + start = start - (start % (time.Hour.Milliseconds() * 6)) + case constants.SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME: + // adjust the start time to nearest 1 day + start = start - (start % (time.Hour.Milliseconds() * 24)) + case constants.SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME: + // adjust the start time to nearest 1 week + start = start - (start % (time.Hour.Milliseconds() * 24 * 7)) + } + return start, end, mq.MetricTableHints.TimeSeriesTableName + } + } + // If time range is less than 6 hours, we need to use the `time_series_v4` table // else if time range is less than 1 day and greater than 6 hours, we need to use the `time_series_v4_6hrs` table - // else we need to use the `time_series_v4_1day` table + // else if time range is less than 1 week and greater than 1 day, we need to use the `time_series_v4_1day` table + // else we need to use the `time_series_v4_1week` table var tableName string - if end-start <= sixHoursInMilliseconds { + if end-start < sixHoursInMilliseconds { // adjust the start time to nearest 1 hour start = start - (start % (time.Hour.Milliseconds() * 1)) tableName = constants.SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME - } else if end-start <= oneDayInMilliseconds { + } else if end-start < oneDayInMilliseconds { // adjust the start time to nearest 6 hours start = start - (start % (time.Hour.Milliseconds() * 6)) tableName = constants.SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME - } else { + } else if end-start < oneWeekInMilliseconds { // adjust the start time to nearest 1 day start = start - (start % (time.Hour.Milliseconds() * 24)) tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME + } else { + if constants.UseMetricsPreAggregation() { + // adjust the start time to nearest 1 week + start = start - (start % (time.Hour.Milliseconds() * 24 * 7)) + tableName = constants.SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME + } else { + // continue to use the 1 day table + start = start - (start % (time.Hour.Milliseconds() * 24)) + tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME + } } return start, end, tableName } +// start and end are in milliseconds +// we have three tables for samples +// 1. distributed_samples_v4 +// 2. distributed_samples_v4_agg_5m - for queries with time range above or equal to 1 day and less than 1 week +// 3. distributed_samples_v4_agg_30m - for queries with time range above or equal to 1 week +// if the `timeAggregation` is `count_distinct` we can't use the aggregated tables because they don't support it +func WhichSamplesTableToUse(start, end int64, mq *v3.BuilderQuery) string { + + // if we have a hint for the table, we need to use it + // the hint will be used to override the default table selection logic + if mq.MetricTableHints != nil { + if mq.MetricTableHints.SamplesTableName != "" { + return mq.MetricTableHints.SamplesTableName + } + } + + // we don't have any aggregated table for sketches (yet) + if mq.AggregateAttribute.Type == v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) { + return constants.SIGNOZ_EXP_HISTOGRAM_TABLENAME + } + + // continue to use the old table if pre-aggregation is disabled + if !constants.UseMetricsPreAggregation() { + return constants.SIGNOZ_SAMPLES_V4_TABLENAME + } + + // if the time aggregation is count_distinct, we need to use the distributed_samples_v4 table + // because the aggregated tables don't support count_distinct + if mq.TimeAggregation == v3.TimeAggregationCountDistinct { + return constants.SIGNOZ_SAMPLES_V4_TABLENAME + } + + if end-start < oneDayInMilliseconds { + // if we are dealing with delta metrics and interval is greater than 5 minutes, we can use the 5m aggregated table + // why would interval be greater than 5 minutes? + // we allow people to configure the step interval so we can make use of this + if mq.Temporality == v3.Delta && mq.TimeAggregation == v3.TimeAggregationIncrease && mq.StepInterval >= 300 && mq.StepInterval < 1800 { + return constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME + } else if mq.Temporality == v3.Delta && mq.TimeAggregation == v3.TimeAggregationIncrease && mq.StepInterval >= 1800 { + // if we are dealing with delta metrics and interval is greater than 30 minutes, we can use the 30m aggregated table + return constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME + } + return constants.SIGNOZ_SAMPLES_V4_TABLENAME + } else if end-start < oneWeekInMilliseconds { + return constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME + } else { + return constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME + } +} + +func AggregationColumnForSamplesTable(start, end int64, mq *v3.BuilderQuery) string { + tableName := WhichSamplesTableToUse(start, end, mq) + var aggregationColumn string + switch mq.Temporality { + case v3.Delta: + // for delta metrics, we only support `RATE`/`INCREASE` both of which are sum + // although it doesn't make sense to use anyLast, avg, min, max, count on delta metrics, + // we are keeping it here to make sure that query will not be invalid + switch tableName { + case constants.SIGNOZ_SAMPLES_V4_TABLENAME: + switch mq.TimeAggregation { + case v3.TimeAggregationAnyLast: + aggregationColumn = "anyLast(value)" + case v3.TimeAggregationSum: + aggregationColumn = "sum(value)" + case v3.TimeAggregationAvg: + aggregationColumn = "avg(value)" + case v3.TimeAggregationMin: + aggregationColumn = "min(value)" + case v3.TimeAggregationMax: + aggregationColumn = "max(value)" + case v3.TimeAggregationCount: + aggregationColumn = "count(value)" + case v3.TimeAggregationCountDistinct: + aggregationColumn = "countDistinct(value)" + case v3.TimeAggregationRate, v3.TimeAggregationIncrease: // only these two options give meaningful results + aggregationColumn = "sum(value)" + } + case constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME, constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME: + switch mq.TimeAggregation { + case v3.TimeAggregationAnyLast: + aggregationColumn = "anyLast(last)" + case v3.TimeAggregationSum: + aggregationColumn = "sum(sum)" + case v3.TimeAggregationAvg: + aggregationColumn = "sum(sum) / sum(count)" + case v3.TimeAggregationMin: + aggregationColumn = "min(min)" + case v3.TimeAggregationMax: + aggregationColumn = "max(max)" + case v3.TimeAggregationCount: + aggregationColumn = "sum(count)" + // count_distinct is not supported in aggregated tables + case v3.TimeAggregationRate, v3.TimeAggregationIncrease: // only these two options give meaningful results + aggregationColumn = "sum(sum)" + } + } + case v3.Cumulative: + // for cumulative metrics, we only support `RATE`/`INCREASE`. The max value in window is + // used to calculate the sum which is then divided by the window size to get the rate + switch tableName { + case constants.SIGNOZ_SAMPLES_V4_TABLENAME: + switch mq.TimeAggregation { + case v3.TimeAggregationAnyLast: + aggregationColumn = "anyLast(value)" + case v3.TimeAggregationSum: + aggregationColumn = "sum(value)" + case v3.TimeAggregationAvg: + aggregationColumn = "avg(value)" + case v3.TimeAggregationMin: + aggregationColumn = "min(value)" + case v3.TimeAggregationMax: + aggregationColumn = "max(value)" + case v3.TimeAggregationCount: + aggregationColumn = "count(value)" + case v3.TimeAggregationCountDistinct: + aggregationColumn = "countDistinct(value)" + case v3.TimeAggregationRate, v3.TimeAggregationIncrease: // only these two options give meaningful results + aggregationColumn = "max(value)" + } + case constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME, constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME: + switch mq.TimeAggregation { + case v3.TimeAggregationAnyLast: + aggregationColumn = "anyLast(last)" + case v3.TimeAggregationSum: + aggregationColumn = "sum(sum)" + case v3.TimeAggregationAvg: + aggregationColumn = "sum(sum) / sum(count)" + case v3.TimeAggregationMin: + aggregationColumn = "min(min)" + case v3.TimeAggregationMax: + aggregationColumn = "max(max)" + case v3.TimeAggregationCount: + aggregationColumn = "sum(count)" + // count_distinct is not supported in aggregated tables + case v3.TimeAggregationRate, v3.TimeAggregationIncrease: // only these two options give meaningful results + aggregationColumn = "max(max)" + } + } + case v3.Unspecified: + switch tableName { + case constants.SIGNOZ_SAMPLES_V4_TABLENAME: + switch mq.TimeAggregation { + case v3.TimeAggregationAnyLast: + aggregationColumn = "anyLast(value)" + case v3.TimeAggregationSum: + aggregationColumn = "sum(value)" + case v3.TimeAggregationAvg: + aggregationColumn = "avg(value)" + case v3.TimeAggregationMin: + aggregationColumn = "min(value)" + case v3.TimeAggregationMax: + aggregationColumn = "max(value)" + case v3.TimeAggregationCount: + aggregationColumn = "count(value)" + case v3.TimeAggregationCountDistinct: + aggregationColumn = "countDistinct(value)" + case v3.TimeAggregationRate, v3.TimeAggregationIncrease: // ideally, this should never happen + aggregationColumn = "sum(value)" + } + case constants.SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME, constants.SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME: + switch mq.TimeAggregation { + case v3.TimeAggregationAnyLast: + aggregationColumn = "anyLast(last)" + case v3.TimeAggregationSum: + aggregationColumn = "sum(sum)" + case v3.TimeAggregationAvg: + aggregationColumn = "sum(sum) / sum(count)" + case v3.TimeAggregationMin: + aggregationColumn = "min(min)" + case v3.TimeAggregationMax: + aggregationColumn = "max(max)" + case v3.TimeAggregationCount: + aggregationColumn = "sum(count)" + // count_distinct is not supported in aggregated tables + case v3.TimeAggregationRate, v3.TimeAggregationIncrease: // ideally, this should never happen + aggregationColumn = "sum(sum)" + } + } + } + return aggregationColumn +} + // PrepareTimeseriesFilterQuery builds the sub-query to be used for filtering timeseries based on the search criteria func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string, error) { var conditions []string @@ -47,7 +264,7 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string conditions = append(conditions, fmt.Sprintf("metric_name = %s", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key))) conditions = append(conditions, fmt.Sprintf("temporality = '%s'", mq.Temporality)) - start, end, tableName := which(start, end) + start, end, tableName := whichTSTableToUse(start, end, mq) conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", start, end)) @@ -127,7 +344,7 @@ func PrepareTimeseriesFilterQueryV3(start, end int64, mq *v3.BuilderQuery) (stri conditions = append(conditions, fmt.Sprintf("metric_name = %s", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key))) conditions = append(conditions, fmt.Sprintf("temporality = '%s'", mq.Temporality)) - start, end, tableName := which(start, end) + start, end, tableName := whichTSTableToUse(start, end, mq) conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", start, end)) diff --git a/pkg/query-service/app/metrics/v4/query_builder_pre_agg_test.go b/pkg/query-service/app/metrics/v4/query_builder_pre_agg_test.go new file mode 100644 index 0000000000..121324a8da --- /dev/null +++ b/pkg/query-service/app/metrics/v4/query_builder_pre_agg_test.go @@ -0,0 +1,402 @@ +package v4 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func TestPrepareMetricQueryCumulativeRatePreAgg(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "true") + testCases := []struct { + name string + builderQuery *v3.BuilderQuery + expectedQueryContains string + }{ + { + name: "test time aggregation = rate, space aggregation = sum, temporality = cumulative", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_calls_total", + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service_name", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorContains, + Value: "frontend", + }, + }, + }, + GroupBy: []v3.AttributeKey{{ + Key: "service_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }}, + Expression: "A", + Disabled: false, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + }, + expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_calls_total' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + }, + { + name: "test time aggregation = rate, space aggregation = sum, temporality = cumulative, multiple group by", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_calls_total", + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "service_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + { + Key: "endpoint", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + Expression: "A", + Disabled: false, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + }, + expectedQueryContains: "SELECT service_name, endpoint, ts, sum(per_series_value) as value FROM (SELECT service_name, endpoint, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(endpoint) as endpoint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'endpoint') as endpoint, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_calls_total' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, endpoint, ts ORDER BY service_name ASC, endpoint ASC, ts ASC", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM + query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) + assert.Nil(t, err) + assert.Contains(t, query, testCase.expectedQueryContains) + }) + } +} + +func TestPrepareMetricQueryDeltaRatePreAgg(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "true") + testCases := []struct { + name string + builderQuery *v3.BuilderQuery + expectedQueryContains string + }{ + { + name: "test time aggregation = rate, space aggregation = sum, temporality = delta, no group by", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_calls_total", + }, + Temporality: v3.Delta, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + Expression: "A", + Disabled: false, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + }, + expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_calls_total' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY ts ORDER BY ts ASC", + }, + { + name: "test time aggregation = rate, space aggregation = sum, temporality = delta, group by service_name", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_calls_total", + }, + Temporality: v3.Delta, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{{ + Key: "service_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }}, + Expression: "A", + Disabled: false, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationSum, + }, + expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_calls_total' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM + query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) + assert.Nil(t, err) + assert.Contains(t, query, testCase.expectedQueryContains) + }) + } +} + +func TestPrepreMetricQueryCumulativeQuantilePreAgg(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "true") + testCases := []struct { + name string + builderQuery *v3.BuilderQuery + expectedQueryContains string + }{ + { + name: "test temporality = cumulative, quantile = 0.99", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_latency_bucket", + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service_name", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorContains, + Value: "frontend", + }, + }, + }, + GroupBy: []v3.AttributeKey{{ + Key: "service_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }}, + Expression: "A", + Disabled: false, + SpaceAggregation: v3.SpaceAggregationPercentile99, + }, + expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + }, + { + name: "test temporality = cumulative, quantile = 0.99 without group by", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_latency_bucket", + }, + Temporality: v3.Cumulative, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service_name", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorContains, + Value: "frontend", + }, + }, + }, + Expression: "A", + Disabled: false, + SpaceAggregation: v3.SpaceAggregationPercentile99, + }, + expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(max) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM + query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) + assert.Nil(t, err) + assert.Contains(t, query, testCase.expectedQueryContains) + }) + } +} + +func TestPrepreMetricQueryDeltaQuantilePreAgg(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "true") + testCases := []struct { + name string + builderQuery *v3.BuilderQuery + expectedQueryContains string + }{ + { + name: "test temporality = delta, quantile = 0.99 group by service_name", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_latency_bucket", + }, + Temporality: v3.Delta, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service_name", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorContains, + Value: "frontend", + }, + }, + }, + GroupBy: []v3.AttributeKey{{ + Key: "service_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }}, + Expression: "A", + Disabled: false, + SpaceAggregation: v3.SpaceAggregationPercentile99, + }, + expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + }, + { + name: "test temporality = delta, quantile = 0.99 no group by", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz_latency_bucket", + }, + Temporality: v3.Delta, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service_name", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorContains, + Value: "frontend", + }, + }, + }, + Expression: "A", + Disabled: false, + SpaceAggregation: v3.SpaceAggregationPercentile99, + }, + expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM + query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) + assert.Nil(t, err) + assert.Contains(t, query, testCase.expectedQueryContains) + }) + } +} + +func TestPrepareMetricQueryGaugePreAgg(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "true") + testCases := []struct { + name string + builderQuery *v3.BuilderQuery + expectedQueryContains string + }{ + { + name: "test gauge query with no group by", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_usage", + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + Expression: "A", + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC", + }, + { + name: "test gauge query with group by host_name", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: "system_cpu_usage", + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }}, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Expression: "A", + Disabled: false, + }, + expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM + query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) + assert.Nil(t, err) + assert.Contains(t, query, testCase.expectedQueryContains) + }) + } +} diff --git a/pkg/query-service/app/metrics/v4/query_builder_test.go b/pkg/query-service/app/metrics/v4/query_builder_test.go index 08d0f087ff..2fc83a9e1f 100644 --- a/pkg/query-service/app/metrics/v4/query_builder_test.go +++ b/pkg/query-service/app/metrics/v4/query_builder_test.go @@ -154,6 +154,7 @@ func TestPrepareTimeseriesFilterQuery(t *testing.T) { } func TestPrepareMetricQueryCumulativeRate(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "false") testCases := []struct { name string builderQuery *v3.BuilderQuery @@ -242,6 +243,7 @@ func TestPrepareMetricQueryCumulativeRate(t *testing.T) { } func TestPrepareMetricQueryDeltaRate(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "false") testCases := []struct { name string builderQuery *v3.BuilderQuery @@ -266,7 +268,7 @@ func TestPrepareMetricQueryDeltaRate(t *testing.T) { TimeAggregation: v3.TimeAggregationRate, SpaceAggregation: v3.SpaceAggregationSum, }, - expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_calls_total' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY ts ORDER BY ts ASC", + expectedQueryContains: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_calls_total' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY ts ORDER BY ts ASC", }, { name: "test time aggregation = rate, space aggregation = sum, temporality = delta, group by service_name", @@ -292,12 +294,14 @@ func TestPrepareMetricQueryDeltaRate(t *testing.T) { TimeAggregation: v3.TimeAggregationRate, SpaceAggregation: v3.SpaceAggregationSum, }, - expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_calls_total' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_calls_total' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_calls_total' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) assert.Nil(t, err) assert.Contains(t, query, testCase.expectedQueryContains) @@ -306,6 +310,7 @@ func TestPrepareMetricQueryDeltaRate(t *testing.T) { } func TestPrepreMetricQueryCumulativeQuantile(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "false") testCases := []struct { name string builderQuery *v3.BuilderQuery @@ -344,7 +349,7 @@ func TestPrepreMetricQueryCumulativeQuantile(t *testing.T) { Disabled: false, SpaceAggregation: v3.SpaceAggregationPercentile99, }, - expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, ts, sum(per_series_value) as value FROM (SELECT service_name, le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, { name: "test temporality = cumulative, quantile = 0.99 without group by", @@ -374,12 +379,14 @@ func TestPrepreMetricQueryCumulativeQuantile(t *testing.T) { Disabled: false, SpaceAggregation: v3.SpaceAggregationPercentile99, }, - expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC", + expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, ts, sum(per_series_value) as value FROM (SELECT le, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(le) as le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC", }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) assert.Nil(t, err) assert.Contains(t, query, testCase.expectedQueryContains) @@ -388,6 +395,7 @@ func TestPrepreMetricQueryCumulativeQuantile(t *testing.T) { } func TestPrepreMetricQueryDeltaQuantile(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "false") testCases := []struct { name string builderQuery *v3.BuilderQuery @@ -426,7 +434,7 @@ func TestPrepreMetricQueryDeltaQuantile(t *testing.T) { Disabled: false, SpaceAggregation: v3.SpaceAggregationPercentile99, }, - expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", + expectedQueryContains: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY service_name, le, ts ORDER BY service_name ASC, le ASC, ts ASC) GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, { name: "test temporality = delta, quantile = 0.99 no group by", @@ -456,12 +464,14 @@ func TestPrepreMetricQueryDeltaQuantile(t *testing.T) { Disabled: false, SpaceAggregation: v3.SpaceAggregationPercentile99, }, - expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC", + expectedQueryContains: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND like(JSONExtractString(labels, 'service_name'), '%frontend%')) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY le, ts ORDER BY le ASC, ts ASC) GROUP BY ts ORDER BY ts ASC", }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) assert.Nil(t, err) assert.Contains(t, query, testCase.expectedQueryContains) @@ -470,6 +480,7 @@ func TestPrepreMetricQueryDeltaQuantile(t *testing.T) { } func TestPrepareMetricQueryGauge(t *testing.T) { + t.Setenv("USE_METRICS_PRE_AGGREGATION", "false") testCases := []struct { name string builderQuery *v3.BuilderQuery @@ -494,7 +505,7 @@ func TestPrepareMetricQueryGauge(t *testing.T) { SpaceAggregation: v3.SpaceAggregationSum, Disabled: false, }, - expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC", + expectedQueryContains: "SELECT ts, sum(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC", }, { name: "test gauge query with group by host_name", @@ -520,12 +531,14 @@ func TestPrepareMetricQueryGauge(t *testing.T) { Expression: "A", Disabled: false, }, - expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC", + expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC", }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { + // 1650991982000 - April 26, 2022 10:23:02 PM + // 1651078382000 - April 27, 2022 10:23:02 PM query, err := PrepareMetricQuery(1650991982000, 1651078382000, v3.QueryTypeBuilder, v3.PanelTypeGraph, testCase.builderQuery, metricsV3.Options{}) assert.Nil(t, err) assert.Contains(t, query, testCase.expectedQueryContains) diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index 9ae242c19f..fcf6944234 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -22,6 +22,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/constants" baseconstants "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" @@ -724,6 +725,42 @@ func parseInviteRequest(r *http.Request) (*model.InviteRequest, error) { return &req, nil } +func isValidRole(role string) bool { + switch role { + case constants.AdminGroup, constants.EditorGroup, constants.ViewerGroup: + return true + } + return false +} + +func parseInviteUsersRequest(r *http.Request) (*model.BulkInviteRequest, error) { + var req model.BulkInviteRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, err + } + + // Validate that the request contains users + if len(req.Users) == 0 { + return nil, fmt.Errorf("no users provided for invitation") + } + + // Trim spaces and validate each user + for i := range req.Users { + req.Users[i].Email = strings.TrimSpace(req.Users[i].Email) + if req.Users[i].Email == "" { + return nil, fmt.Errorf("email is required for each user") + } + if req.Users[i].FrontendBaseUrl == "" { + return nil, fmt.Errorf("frontendBaseUrl is required for each user") + } + if !isValidRole(req.Users[i].Role) { + return nil, fmt.Errorf("invalid role for user: %s", req.Users[i].Email) + } + } + + return &req, nil +} + func parseSetApdexScoreRequest(r *http.Request) (*model.ApdexSettings, error) { var req model.ApdexSettings if err := json.NewDecoder(r.Body).Decode(&req); err != nil { @@ -1094,25 +1131,18 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE query.StepInterval = minStep } - // Remove the time shift function from the list of functions and set the shift by value - var timeShiftBy int64 - if len(query.Functions) > 0 { - for idx := range query.Functions { - function := &query.Functions[idx] - if function.Name == v3.FunctionNameTimeShift { - // move the function to the beginning of the list - // so any other function can use the shifted time - var fns []v3.Function - fns = append(fns, *function) - fns = append(fns, query.Functions[:idx]...) - fns = append(fns, query.Functions[idx+1:]...) - query.Functions = fns - timeShiftBy = int64(function.Args[0].(float64)) - break - } + if query.DataSource == v3.DataSourceMetrics && baseconstants.UseMetricsPreAggregation() { + // if the time range is greater than 1 day, and less than 1 week set the step interval to be multiple of 5 minutes + // if the time range is greater than 1 week, set the step interval to be multiple of 30 mins + start, end := queryRangeParams.Start, queryRangeParams.End + if end-start >= 24*time.Hour.Milliseconds() && end-start < 7*24*time.Hour.Milliseconds() { + query.StepInterval = int64(math.Round(float64(query.StepInterval)/300)) * 300 + } else if end-start >= 7*24*time.Hour.Milliseconds() { + query.StepInterval = int64(math.Round(float64(query.StepInterval)/1800)) * 1800 } } - query.ShiftBy = timeShiftBy + + query.SetShiftByFromFunc() if query.Filters == nil || len(query.Filters.Items) == 0 { continue diff --git a/pkg/query-service/app/parser_test.go b/pkg/query-service/app/parser_test.go index 9d58a190f7..ed290d1355 100644 --- a/pkg/query-service/app/parser_test.go +++ b/pkg/query-service/app/parser_test.go @@ -297,6 +297,8 @@ func TestParseQueryRangeParamsCompositeQuery(t *testing.T) { compositeQuery v3.CompositeQuery expectErr bool errMsg string + hasShiftBy bool + shiftBy int64 }{ { desc: "no query in request", @@ -496,6 +498,56 @@ func TestParseQueryRangeParamsCompositeQuery(t *testing.T) { expectErr: true, errMsg: "builder query A is invalid: group by is invalid", }, + { + desc: "builder query with shift by", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: "logs", + AggregateOperator: "sum", + AggregateAttribute: v3.AttributeKey{Key: "attribute"}, + GroupBy: []v3.AttributeKey{{Key: "group_key"}}, + Expression: "A", + Functions: []v3.Function{ + { + Name: v3.FunctionNameTimeShift, + Args: []interface{}{float64(10)}, + }, + }, + }, + }, + }, + hasShiftBy: true, + shiftBy: 10, + }, + { + desc: "builder query with shift by as string", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: "logs", + AggregateOperator: "sum", + AggregateAttribute: v3.AttributeKey{Key: "attribute"}, + GroupBy: []v3.AttributeKey{{Key: "group_key"}}, + Expression: "A", + Functions: []v3.Function{ + { + Name: v3.FunctionNameTimeShift, + Args: []interface{}{"3600"}, + }, + }, + }, + }, + }, + hasShiftBy: true, + shiftBy: 3600, + }, } for _, tc := range reqCases { @@ -514,13 +566,16 @@ func TestParseQueryRangeParamsCompositeQuery(t *testing.T) { require.NoError(t, err) req := httptest.NewRequest(http.MethodPost, "/api/v3/query_range", body) - _, apiErr := ParseQueryRangeParams(req) + params, apiErr := ParseQueryRangeParams(req) if tc.expectErr { require.Error(t, apiErr) require.Contains(t, apiErr.Error(), tc.errMsg) } else { require.Nil(t, apiErr) } + if tc.hasShiftBy { + require.Equal(t, tc.shiftBy, params.CompositeQuery.BuilderQueries["A"].ShiftBy) + } }) } } @@ -1416,12 +1471,6 @@ func TestParseQueryRangeParamsStepIntervalAdjustment(t *testing.T) { end: time.Now().UnixMilli(), step: 1, // gets updated }, - { - desc: "1 week and 1 minute step", - start: time.Now().Add(-7 * 24 * time.Hour).UnixMilli(), - end: time.Now().UnixMilli(), - step: 60, // gets updated - }, { desc: "1 day and 1 hour step", start: time.Now().Add(-24 * time.Hour).UnixMilli(), @@ -1446,12 +1495,6 @@ func TestParseQueryRangeParamsStepIntervalAdjustment(t *testing.T) { end: time.Now().UnixMilli(), step: 300, // no update }, - { - desc: "1 week and 10 minutes step", - start: time.Now().Add(-7 * 24 * time.Hour).UnixMilli(), - end: time.Now().UnixMilli(), - step: 600, // get updated - }, { desc: "1 week and 45 minutes step", start: time.Now().Add(-7 * 24 * time.Hour).UnixMilli(), diff --git a/pkg/query-service/app/preferences/map.go b/pkg/query-service/app/preferences/map.go index 219fb6c595..d7cf76aec8 100644 --- a/pkg/query-service/app/preferences/map.go +++ b/pkg/query-service/app/preferences/map.go @@ -1,37 +1,14 @@ package preferences var preferenceMap = map[string]Preference{ - "DASHBOARDS_LIST_VIEW": { - Key: "DASHBOARDS_LIST_VIEW", - Name: "Dashboards List View", - Description: "", - ValueType: "string", - DefaultValue: "grid", - AllowedValues: []interface{}{"grid", "list"}, - IsDiscreteValues: true, - AllowedScopes: []string{"user", "org"}, - }, - "LOGS_TOOLBAR_COLLAPSED": { - Key: "LOGS_TOOLBAR_COLLAPSED", - Name: "Logs toolbar", - Description: "", + "ORG_ONBOARDING": { + Key: "ORG_ONBOARDING", + Name: "Organisation Onboarding", + Description: "Organisation Onboarding", ValueType: "boolean", DefaultValue: false, AllowedValues: []interface{}{true, false}, IsDiscreteValues: true, - AllowedScopes: []string{"user", "org"}, - }, - "MAX_DEPTH_ALLOWED": { - Key: "MAX_DEPTH_ALLOWED", - Name: "Max Depth Allowed", - Description: "", - ValueType: "integer", - DefaultValue: 10, - IsDiscreteValues: false, - Range: Range{ - Min: 0, - Max: 100, - }, - AllowedScopes: []string{"user", "org"}, + AllowedScopes: []string{"org"}, }, } diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go index 00b287ce8e..798eb8f0b7 100644 --- a/pkg/query-service/app/querier/helper.go +++ b/pkg/query-service/app/querier/helper.go @@ -45,7 +45,7 @@ func prepareLogsQuery(_ context.Context, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -56,7 +56,7 @@ func prepareLogsQuery(_ context.Context, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -71,7 +71,7 @@ func prepareLogsQuery(_ context.Context, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - v3.LogQBOptions{PreferRPM: preferRPM}, + v3.QBOptions{PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -167,7 +167,7 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} @@ -178,7 +178,7 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} @@ -191,7 +191,7 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - tracesV3.Options{PreferRPM: preferRPM}, + v3.QBOptions{PreferRPM: preferRPM}, ) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go index bb41bc8c36..09d6cc2309 100644 --- a/pkg/query-service/app/querier/v2/helper.go +++ b/pkg/query-service/app/querier/v2/helper.go @@ -44,7 +44,7 @@ func prepareLogsQuery(_ context.Context, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -55,7 +55,7 @@ func prepareLogsQuery(_ context.Context, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -70,7 +70,7 @@ func prepareLogsQuery(_ context.Context, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - v3.LogQBOptions{PreferRPM: preferRPM}, + v3.QBOptions{PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -167,7 +167,7 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} @@ -178,7 +178,7 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, + v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} @@ -191,7 +191,7 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - tracesV3.Options{PreferRPM: preferRPM}, + v3.QBOptions{PreferRPM: preferRPM}, ) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index f8316d6f6c..311d213656 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -159,6 +159,8 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa cacheKeys := q.keyGenerator.GenerateKeys(params) + now := time.Now() + ch := make(chan channelResult, len(params.CompositeQuery.BuilderQueries)) var wg sync.WaitGroup @@ -171,6 +173,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa wg.Wait() close(ch) + zap.L().Info("time taken to run builder queries", zap.Duration("multiQueryDuration", time.Since(now)), zap.Int("num_queries", len(params.CompositeQuery.BuilderQueries))) results := make([]*v3.Result, 0) errQueriesByName := make(map[string]error) @@ -378,7 +381,15 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan } } - queries, err := q.builder.PrepareQueries(params) + queries := make(map[string]string) + var err error + if params.CompositeQuery.QueryType == v3.QueryTypeBuilder { + queries, err = q.builder.PrepareQueries(params) + } else if params.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL { + for name, chQuery := range params.CompositeQuery.ClickHouseQueries { + queries[name] = chQuery.Query + } + } if err != nil { return nil, nil, err @@ -449,7 +460,11 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) case v3.QueryTypePromQL: results, errQueriesByName, err = q.runPromQueries(ctx, params) case v3.QueryTypeClickHouseSQL: - results, errQueriesByName, err = q.runClickHouseQueries(ctx, params) + if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { + results, errQueriesByName, err = q.runBuilderListQueries(ctx, params) + } else { + results, errQueriesByName, err = q.runClickHouseQueries(ctx, params) + } default: err = fmt.Errorf("invalid query type") } diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go index 879c2d5153..de8db2057a 100644 --- a/pkg/query-service/app/queryBuilder/query_builder.go +++ b/pkg/query-service/app/queryBuilder/query_builder.go @@ -6,7 +6,6 @@ import ( "github.com/SigNoz/govaluate" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" - tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/interfaces" @@ -42,8 +41,8 @@ var SupportedFunctions = []string{ var EvalFuncs = map[string]govaluate.ExpressionFunction{} -type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options tracesV3.Options) (string, error) -type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) +type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.QBOptions) (string, error) +type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.QBOptions) (string, error) type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options metricsV3.Options) (string, error) type QueryBuilder struct { @@ -161,7 +160,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str } for queryName, query := range compositeQuery.BuilderQueries { if query.Expression == queryName { - queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{IsLivetailQuery: true}) + queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{IsLivetailQuery: true}) if err != nil { return "", err } @@ -195,12 +194,12 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin // for ts query with group by and limit form two queries if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 { limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query, - tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, - query, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } @@ -208,7 +207,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin queries[queryName] = query } else { queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, - query, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) + query, v3.QBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) if err != nil { return nil, err } @@ -217,18 +216,18 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin case v3.DataSourceLogs: // for ts query with limit replace it as it is already formed if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 { - limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } - placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } query := fmt.Sprintf(placeholderQuery, limitQuery) queries[queryName] = query } else { - queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) + queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) if err != nil { return nil, err } diff --git a/pkg/query-service/app/queryBuilder/query_builder_test.go b/pkg/query-service/app/queryBuilder/query_builder_test.go index 52af7af780..f7538f0efb 100644 --- a/pkg/query-service/app/queryBuilder/query_builder_test.go +++ b/pkg/query-service/app/queryBuilder/query_builder_test.go @@ -228,8 +228,8 @@ func TestDeltaQueryBuilder(t *testing.T) { { name: "TestQueryWithName - Request rate", query: &v3.QueryRangeParamsV3{ - Start: 1650991982000, - End: 1651078382000, + Start: 1650991982000, // 2022-04-25 10:53:02 + End: 1651078382000, // 2022-04-26 10:53:02 CompositeQuery: &v3.CompositeQuery{ QueryType: v3.QueryTypeBuilder, PanelType: v3.PanelTypeGraph, @@ -261,13 +261,13 @@ func TestDeltaQueryBuilder(t *testing.T) { }, }, queryToTest: "A", - expected: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts", + expected: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts", }, { name: "TestQueryWithExpression - Error rate", query: &v3.QueryRangeParamsV3{ - Start: 1650991982000, - End: 1651078382000, + Start: 1650991982000, // 2022-04-25 10:53:02 + End: 1651078382000, // 2022-04-26 10:53:02 CompositeQuery: &v3.CompositeQuery{ QueryType: v3.QueryTypeBuilder, PanelType: v3.PanelTypeGraph, @@ -331,7 +331,7 @@ func TestDeltaQueryBuilder(t *testing.T) { }, }, queryToTest: "C", - expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`", + expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`", }, { name: "TestQuery - Quantile", @@ -359,7 +359,7 @@ func TestDeltaQueryBuilder(t *testing.T) { }, }, queryToTest: "A", - expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) as value FROM (SELECT service_name,le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", + expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) as value FROM (SELECT service_name,le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", }, } diff --git a/pkg/query-service/app/logs/v4/resource_query_builder.go b/pkg/query-service/app/resource/resource_query_builder.go similarity index 95% rename from pkg/query-service/app/logs/v4/resource_query_builder.go rename to pkg/query-service/app/resource/resource_query_builder.go index 3f7f2682cb..bbf9310386 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder.go +++ b/pkg/query-service/app/resource/resource_query_builder.go @@ -1,4 +1,4 @@ -package v4 +package resource import ( "fmt" @@ -211,7 +211,7 @@ func buildResourceFiltersFromAggregateAttribute(aggregateAttribute v3.AttributeK return "" } -func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isLiveTail bool) (string, error) { +func BuildResourceSubQuery(dbName, tableName string, bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isLiveTail bool) (string, error) { // BUILD THE WHERE CLAUSE var conditions []string @@ -242,10 +242,10 @@ func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, group // BUILD THE FINAL QUERY var query string if isLiveTail { - query = fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE ", DISTRIBUTED_LOGS_V2_RESOURCE) + query = fmt.Sprintf("SELECT fingerprint FROM %s.%s WHERE ", dbName, tableName) query = "(" + query + conditionStr } else { - query = fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE (seen_at_ts_bucket_start >= %d) AND (seen_at_ts_bucket_start <= %d) AND ", DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd) + query = fmt.Sprintf("SELECT fingerprint FROM %s.%s WHERE (seen_at_ts_bucket_start >= %d) AND (seen_at_ts_bucket_start <= %d) AND ", dbName, tableName, bucketStart, bucketEnd) query = "(" + query + conditionStr + ")" } diff --git a/pkg/query-service/app/logs/v4/resource_query_builder_test.go b/pkg/query-service/app/resource/resource_query_builder_test.go similarity index 98% rename from pkg/query-service/app/logs/v4/resource_query_builder_test.go rename to pkg/query-service/app/resource/resource_query_builder_test.go index 7bea0bc291..f390ff9c6e 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder_test.go +++ b/pkg/query-service/app/resource/resource_query_builder_test.go @@ -1,4 +1,4 @@ -package v4 +package resource import ( "reflect" @@ -497,7 +497,7 @@ func Test_buildResourceSubQuery(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := buildResourceSubQuery(tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, false) + got, err := BuildResourceSubQuery("signoz_logs", "distributed_logs_v2_resource", tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, false) if (err != nil) != tt.wantErr { t.Errorf("buildResourceSubQuery() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index f16597aa31..dc6ac21e15 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -308,6 +308,7 @@ func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) { api.RegisterLogsRoutes(r, am) api.RegisterIntegrationRoutes(r, am) api.RegisterQueryRangeV3Routes(r, am) + api.RegisterInfraMetricsRoutes(r, am) api.RegisterWebSocketPaths(r, am) api.RegisterQueryRangeV4Routes(r, am) api.RegisterMessagingQueuesRoutes(r, am) diff --git a/pkg/query-service/app/traces/v3/query_builder.go b/pkg/query-service/app/traces/v3/query_builder.go index c66b95ea56..ce9c54573c 100644 --- a/pkg/query-service/app/traces/v3/query_builder.go +++ b/pkg/query-service/app/traces/v3/query_builder.go @@ -10,11 +10,6 @@ import ( "go.signoz.io/signoz/pkg/query-service/utils" ) -type Options struct { - GraphLimitQtype string - PreferRPM bool -} - var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ v3.AggregateOperatorP05: 0.05, v3.AggregateOperatorP10: 0.10, @@ -238,7 +233,7 @@ func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) { return "", nil } -func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, panelType v3.PanelType, options Options) (string, error) { +func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, panelType v3.PanelType, options v3.QBOptions) (string, error) { filterSubQuery, err := buildTracesFilterQuery(mq.Filters) if err != nil { @@ -358,12 +353,13 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan case v3.AggregateOperatorNoOp: var query string if panelType == v3.PanelTypeTrace { - withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, spanIndexTableTimeFilter, filterSubQuery) + withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME, spanIndexTableTimeFilter, filterSubQuery) withSubQuery = addLimitToQuery(withSubQuery, mq.Limit) if mq.Offset != 0 { withSubQuery = addOffsetToQuery(withSubQuery, mq.Offset) } - query = withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) + // query = withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) + query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, spanIndexTableTimeFilter) } else if panelType == v3.PanelTypeList { if len(mq.SelectColumns) == 0 { return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType) @@ -503,7 +499,7 @@ func addOffsetToQuery(query string, offset uint64) string { // PrepareTracesQuery returns the query string for traces // start and end are in epoch millisecond // step is in seconds -func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) { +func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) { // adjust the start and end time to the step interval start = start - (start % (mq.StepInterval * 1000)) end = end - (end % (mq.StepInterval * 1000)) diff --git a/pkg/query-service/app/traces/v3/query_builder_test.go b/pkg/query-service/app/traces/v3/query_builder_test.go index a1e7635a77..23b73a134d 100644 --- a/pkg/query-service/app/traces/v3/query_builder_test.go +++ b/pkg/query-service/app/traces/v3/query_builder_test.go @@ -490,7 +490,7 @@ var testBuildTracesQueryData = []struct { AggregateOperator v3.AggregateOperator ExpectedQuery string PanelType v3.PanelType - Options Options + Options v3.QBOptions }{ { Name: "Test aggregate count on fixed column of float64 type", @@ -524,7 +524,7 @@ var testBuildTracesQueryData = []struct { " signoz_traces.distributed_signoz_index_v2 where (timestamp >= '1680066360726210000' AND timestamp <=" + " '1680066458000000000') group by ts order by value DESC", PanelType: v3.PanelTypeGraph, - Options: Options{GraphLimitQtype: "", PreferRPM: true}, + Options: v3.QBOptions{GraphLimitQtype: "", PreferRPM: true}, }, { Name: "Test aggregate count on fixed column of float64 type with filter", @@ -867,7 +867,7 @@ var testBuildTracesQueryData = []struct { "where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000')" + " AND has(stringTagMap, 'method') group by `method`,ts order by `method` ASC", PanelType: v3.PanelTypeGraph, - Options: Options{GraphLimitQtype: "", + Options: v3.QBOptions{GraphLimitQtype: "", PreferRPM: false, }, }, @@ -892,7 +892,7 @@ var testBuildTracesQueryData = []struct { "AND has(stringTagMap, 'method') group by `method`,ts " + "order by `method` ASC", PanelType: v3.PanelTypeGraph, - Options: Options{GraphLimitQtype: "", PreferRPM: true}, + Options: v3.QBOptions{GraphLimitQtype: "", PreferRPM: true}, }, { Name: "Test aggregate RateSum without fixed column", @@ -916,7 +916,7 @@ var testBuildTracesQueryData = []struct { "AND has(stringTagMap, 'method') group by `method`,ts " + "order by `method` ASC", PanelType: v3.PanelTypeGraph, - Options: Options{GraphLimitQtype: "", PreferRPM: true}, + Options: v3.QBOptions{GraphLimitQtype: "", PreferRPM: true}, }, { Name: "Test aggregate with having clause", @@ -1162,13 +1162,14 @@ var testBuildTracesQueryData = []struct { }, }, }, - ExpectedQuery: "WITH subQuery AS (SELECT distinct on (traceID) traceID, durationNano, serviceName," + - " name FROM signoz_traces.distributed_signoz_index_v2 WHERE parentSpanID = '' AND (timestamp >= '1680066360726210000' AND " + - "timestamp <= '1680066458000000000') AND stringTagMap['method'] = 'GET' ORDER BY durationNano DESC LIMIT 100)" + - " SELECT subQuery.serviceName, subQuery.name, count() AS span_count, subQuery.durationNano, traceID" + - " FROM signoz_traces.distributed_signoz_index_v2 GLOBAL INNER JOIN subQuery ON distributed_signoz_index_v2.traceID" + - " = subQuery.traceID GROUP BY traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName " + - "ORDER BY subQuery.durationNano desc;", + ExpectedQuery: "SELECT subQuery.serviceName, subQuery.name, count() AS span_count, subQuery.durationNano, subQuery.traceID" + + " AS traceID FROM signoz_traces.distributed_signoz_index_v2 INNER JOIN" + + " ( SELECT * FROM (SELECT traceID, durationNano, serviceName, name " + + "FROM signoz_traces.signoz_index_v2 WHERE parentSpanID = '' AND (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " + + "AND stringTagMap['method'] = 'GET' ORDER BY durationNano DESC LIMIT 1 BY traceID LIMIT 100)" + + " AS inner_subquery ) AS subQuery " + + "ON signoz_traces.distributed_signoz_index_v2.traceID = subQuery.traceID WHERE (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " + + "GROUP BY subQuery.traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY subQuery.durationNano desc LIMIT 1 BY subQuery.traceID;", PanelType: v3.PanelTypeTrace, }, } @@ -1206,7 +1207,7 @@ var testPrepTracesQueryData = []struct { BuilderQuery *v3.BuilderQuery ExpectedQuery string Keys map[string]v3.AttributeKey - Options Options + Options v3.QBOptions }{ { Name: "Test TS with limit- first", @@ -1231,7 +1232,7 @@ var testPrepTracesQueryData = []struct { " where (timestamp >= '1680066360000000000' AND timestamp <= '1680066420000000000') AND" + " stringTagMap['method'] = 'GET' AND has(stringTagMap, 'method') group by `method` order by value DESC) LIMIT 10", Keys: map[string]v3.AttributeKey{"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.FirstQueryGraphLimit, }, }, @@ -1260,7 +1261,7 @@ var testPrepTracesQueryData = []struct { " AND timestamp <= '1680066420000000000') AND stringTagMap['method'] = 'GET' AND" + " has(stringTagMap, 'method') group by `method` order by value ASC) LIMIT 10", Keys: map[string]v3.AttributeKey{}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.FirstQueryGraphLimit, }, }, @@ -1286,7 +1287,7 @@ var testPrepTracesQueryData = []struct { " AND timestamp <= '1680066420000000000') " + "group by `serviceName` order by `serviceName` ASC) LIMIT 10", Keys: map[string]v3.AttributeKey{}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.FirstQueryGraphLimit, }, }, @@ -1316,7 +1317,7 @@ var testPrepTracesQueryData = []struct { " AND timestamp <= '1680066420000000000') AND has(stringTagMap, 'http.method') " + "group by `serviceName`,`http.method` order by `serviceName` ASC,value ASC) LIMIT 10", Keys: map[string]v3.AttributeKey{}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.FirstQueryGraphLimit, }, }, @@ -1344,7 +1345,7 @@ var testPrepTracesQueryData = []struct { " AND timestamp <= '1680066420000000000') AND stringTagMap['method'] = 'GET' AND" + " has(stringTagMap, 'method') AND (`method`) GLOBAL IN (%s) group by `method`,ts order by value DESC", Keys: map[string]v3.AttributeKey{}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.SecondQueryGraphLimit, }, }, @@ -1372,7 +1373,7 @@ var testPrepTracesQueryData = []struct { " as value from signoz_traces.distributed_signoz_index_v2 where (timestamp >= '1680066360000000000'" + " AND timestamp <= '1680066420000000000') AND stringTagMap['method'] = 'GET' AND" + " has(stringTagMap, 'method') AND (`method`) GLOBAL IN (%s) group by `method`,ts order by `method` ASC", Keys: map[string]v3.AttributeKey{}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.SecondQueryGraphLimit, }, }, @@ -1407,7 +1408,7 @@ var testPrepTracesQueryData = []struct { "AND (`method`,`name`) GLOBAL IN (%s) group by `method`,`name`,ts " + "order by `method` ASC,`name` ASC", Keys: map[string]v3.AttributeKey{}, - Options: Options{ + Options: v3.QBOptions{ GraphLimitQtype: constants.SecondQueryGraphLimit, }, }, diff --git a/pkg/query-service/auth/auth.go b/pkg/query-service/auth/auth.go index 16eea6a5f3..658c1ff4c4 100644 --- a/pkg/query-service/auth/auth.go +++ b/pkg/query-service/auth/auth.go @@ -61,6 +61,16 @@ func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteRespons return nil, errors.New("User already exists with the same email") } + // Check if an invite already exists + invite, apiErr := dao.DB().GetInviteFromEmail(ctx, req.Email) + if apiErr != nil { + return nil, errors.Wrap(apiErr.Err, "Failed to check existing invite") + } + + if invite != nil { + return nil, errors.New("An invite already exists for this email") + } + if err := validateInviteRequest(req); err != nil { return nil, errors.Wrap(err, "invalid invite request") } @@ -79,6 +89,113 @@ func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteRespons if apiErr != nil { return nil, errors.Wrap(err, "failed to query admin user from the DB") } + + inv := &model.InvitationObject{ + Name: req.Name, + Email: req.Email, + Token: token, + CreatedAt: time.Now().Unix(), + Role: req.Role, + OrgId: au.OrgId, + } + + if err := dao.DB().CreateInviteEntry(ctx, inv); err != nil { + return nil, errors.Wrap(err.Err, "failed to write to DB") + } + + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_USER_INVITATION_SENT, map[string]interface{}{ + "invited user email": req.Email, + }, au.Email, true, false) + + // send email if SMTP is enabled + if os.Getenv("SMTP_ENABLED") == "true" && req.FrontendBaseUrl != "" { + inviteEmail(req, au, token) + } + + return &model.InviteResponse{Email: inv.Email, InviteToken: inv.Token}, nil +} + +func InviteUsers(ctx context.Context, req *model.BulkInviteRequest) (*model.BulkInviteResponse, error) { + response := &model.BulkInviteResponse{ + Status: "success", + Summary: model.InviteSummary{TotalInvites: len(req.Users)}, + SuccessfulInvites: []model.SuccessfulInvite{}, + FailedInvites: []model.FailedInvite{}, + } + + jwtAdmin, ok := ExtractJwtFromContext(ctx) + if !ok { + return nil, errors.New("failed to extract admin jwt token") + } + + adminUser, err := validateUser(jwtAdmin) + if err != nil { + return nil, errors.Wrap(err, "failed to validate admin jwt token") + } + + au, apiErr := dao.DB().GetUser(ctx, adminUser.Id) + if apiErr != nil { + return nil, errors.Wrap(apiErr.Err, "failed to query admin user from the DB") + } + + for _, inviteReq := range req.Users { + inviteResp, err := inviteUser(ctx, &inviteReq, au) + if err != nil { + response.FailedInvites = append(response.FailedInvites, model.FailedInvite{ + Email: inviteReq.Email, + Error: err.Error(), + }) + response.Summary.FailedInvites++ + } else { + response.SuccessfulInvites = append(response.SuccessfulInvites, model.SuccessfulInvite{ + Email: inviteResp.Email, + InviteLink: fmt.Sprintf("%s/signup?token=%s", inviteReq.FrontendBaseUrl, inviteResp.InviteToken), + Status: "sent", + }) + response.Summary.SuccessfulInvites++ + } + } + + // Update the status based on the results + if response.Summary.FailedInvites == response.Summary.TotalInvites { + response.Status = "failure" + } else if response.Summary.FailedInvites > 0 { + response.Status = "partial_success" + } + + return response, nil +} + +// Helper function to handle individual invites +func inviteUser(ctx context.Context, req *model.InviteRequest, au *model.UserPayload) (*model.InviteResponse, error) { + token, err := utils.RandomHex(opaqueTokenSize) + if err != nil { + return nil, errors.Wrap(err, "failed to generate invite token") + } + + user, apiErr := dao.DB().GetUserByEmail(ctx, req.Email) + if apiErr != nil { + return nil, errors.Wrap(apiErr.Err, "Failed to check already existing user") + } + + if user != nil { + return nil, errors.New("User already exists with the same email") + } + + // Check if an invite already exists + invite, apiErr := dao.DB().GetInviteFromEmail(ctx, req.Email) + if apiErr != nil { + return nil, errors.Wrap(apiErr.Err, "Failed to check existing invite") + } + + if invite != nil { + return nil, errors.New("An invite already exists for this email") + } + + if err := validateInviteRequest(req); err != nil { + return nil, errors.Wrap(err, "invalid invite request") + } + inv := &model.InvitationObject{ Name: req.Name, Email: req.Email, diff --git a/pkg/query-service/cache/redis/redis.go b/pkg/query-service/cache/redis/redis.go index 6338eca6f3..737fa66d06 100644 --- a/pkg/query-service/cache/redis/redis.go +++ b/pkg/query-service/cache/redis/redis.go @@ -2,6 +2,7 @@ package redis import ( "context" + "errors" "fmt" "time" @@ -47,7 +48,7 @@ func (c *cache) Store(cacheKey string, data []byte, ttl time.Duration) error { func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.RetrieveStatus, error) { data, err := c.client.Get(context.Background(), cacheKey).Bytes() if err != nil { - if err == redis.Nil { + if errors.Is(err, redis.Nil) { return nil, status.RetrieveStatusKeyMiss, nil } return nil, status.RetrieveStatusError, err @@ -65,16 +66,13 @@ func (c *cache) SetTTL(cacheKey string, ttl time.Duration) { // Remove removes the cache entry func (c *cache) Remove(cacheKey string) { - err := c.client.Del(context.Background(), cacheKey).Err() - if err != nil { - zap.L().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err)) - } + c.BulkRemove([]string{cacheKey}) } // BulkRemove removes the cache entries func (c *cache) BulkRemove(cacheKeys []string) { - for _, cacheKey := range cacheKeys { - c.Remove(cacheKey) + if err := c.client.Del(context.Background(), cacheKeys...).Err(); err != nil { + zap.L().Error("error deleting cache keys", zap.Strings("cacheKeys", cacheKeys), zap.Error(err)) } } diff --git a/pkg/query-service/cache/redis/redis_test.go b/pkg/query-service/cache/redis/redis_test.go index 41d96836c8..baed3416a3 100644 --- a/pkg/query-service/cache/redis/redis_test.go +++ b/pkg/query-service/cache/redis/redis_test.go @@ -82,8 +82,7 @@ func TestBulkRemove(t *testing.T) { mock.ExpectSet("key2", []byte("value2"), 10*time.Second).RedisNil() c.Store("key2", []byte("value2"), 10*time.Second) - mock.ExpectDel("key").RedisNil() - mock.ExpectDel("key2").RedisNil() + mock.ExpectDel("key", "key2").RedisNil() c.BulkRemove([]string{"key", "key2"}) if err := mock.ExpectationsWereMet(); err != nil { diff --git a/pkg/query-service/collectorsimulator/collectorsimulator.go b/pkg/query-service/collectorsimulator/collectorsimulator.go index cf9e6cbafd..daae85a797 100644 --- a/pkg/query-service/collectorsimulator/collectorsimulator.go +++ b/pkg/query-service/collectorsimulator/collectorsimulator.go @@ -12,9 +12,7 @@ import ( "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/confmap/converter/expandconverter" "go.opentelemetry.io/collector/confmap/provider/fileprovider" - "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" @@ -143,11 +141,21 @@ func NewCollectorSimulator( // Build and start collector service. collectorErrChan := make(chan error) svcSettings := service.Settings{ - Receivers: receiver.NewBuilder(collectorCfg.Receivers, factories.Receivers), - Processors: processor.NewBuilder(collectorCfg.Processors, factories.Processors), - Exporters: exporter.NewBuilder(collectorCfg.Exporters, factories.Exporters), - Connectors: connector.NewBuilder(collectorCfg.Connectors, factories.Connectors), - Extensions: extension.NewBuilder(collectorCfg.Extensions, factories.Extensions), + ReceiversConfigs: collectorCfg.Receivers, + ReceiversFactories: factories.Receivers, + + ProcessorsConfigs: collectorCfg.Processors, + ProcessorsFactories: factories.Processors, + + ExportersConfigs: collectorCfg.Exporters, + ExportersFactories: factories.Exporters, + + ConnectorsConfigs: collectorCfg.Connectors, + ConnectorsFactories: factories.Connectors, + + ExtensionsConfigs: collectorCfg.Extensions, + ExtensionsFactories: factories.Extensions, + AsyncErrorChannel: collectorErrChan, } diff --git a/pkg/query-service/collectorsimulator/inmemoryexporter/exporter_test.go b/pkg/query-service/collectorsimulator/inmemoryexporter/exporter_test.go index a2d60439b4..2e8466614e 100644 --- a/pkg/query-service/collectorsimulator/inmemoryexporter/exporter_test.go +++ b/pkg/query-service/collectorsimulator/inmemoryexporter/exporter_test.go @@ -59,6 +59,6 @@ func makeTestExporter(exporterId string) (exporter.Logs, error) { confmap.NewFromStringMap(map[string]any{"id": exporterId}).Unmarshal(&cfg) return factory.CreateLogsExporter( - context.Background(), exporter.CreateSettings{}, cfg, + context.Background(), exporter.Settings{}, cfg, ) } diff --git a/pkg/query-service/collectorsimulator/inmemoryexporter/factory.go b/pkg/query-service/collectorsimulator/inmemoryexporter/factory.go index 6bcc3a1226..b86d9bc7dc 100644 --- a/pkg/query-service/collectorsimulator/inmemoryexporter/factory.go +++ b/pkg/query-service/collectorsimulator/inmemoryexporter/factory.go @@ -16,7 +16,7 @@ func createDefaultConfig() component.Config { } func createLogsExporter( - _ context.Context, _ exporter.CreateSettings, config component.Config, + _ context.Context, _ exporter.Settings, config component.Config, ) (exporter.Logs, error) { if err := component.ValidateConfig(config); err != nil { return nil, errors.Wrap(err, "invalid inmemory exporter config") diff --git a/pkg/query-service/collectorsimulator/inmemoryexporter/factory_test.go b/pkg/query-service/collectorsimulator/inmemoryexporter/factory_test.go index 1a9481169a..641fee11bd 100644 --- a/pkg/query-service/collectorsimulator/inmemoryexporter/factory_test.go +++ b/pkg/query-service/collectorsimulator/inmemoryexporter/factory_test.go @@ -21,7 +21,7 @@ func TestCreateLogsExporter(t *testing.T) { cfg := factory.CreateDefaultConfig() te, err := factory.CreateLogsExporter( - context.Background(), exporter.CreateSettings{}, cfg, + context.Background(), exporter.Settings{}, cfg, ) assert.NoError(t, err) assert.NotNil(t, te) diff --git a/pkg/query-service/collectorsimulator/inmemoryreceiver/factory.go b/pkg/query-service/collectorsimulator/inmemoryreceiver/factory.go index 584fbb28fb..ed90e06cd8 100644 --- a/pkg/query-service/collectorsimulator/inmemoryreceiver/factory.go +++ b/pkg/query-service/collectorsimulator/inmemoryreceiver/factory.go @@ -18,7 +18,7 @@ func createDefaultConfig() component.Config { func createLogsReceiver( _ context.Context, - _ receiver.CreateSettings, + _ receiver.Settings, config component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { diff --git a/pkg/query-service/collectorsimulator/inmemoryreceiver/factory_test.go b/pkg/query-service/collectorsimulator/inmemoryreceiver/factory_test.go index 7bdcd80bee..6c79622e92 100644 --- a/pkg/query-service/collectorsimulator/inmemoryreceiver/factory_test.go +++ b/pkg/query-service/collectorsimulator/inmemoryreceiver/factory_test.go @@ -22,7 +22,7 @@ func TestCreateLogsReceiver(t *testing.T) { cfg := factory.CreateDefaultConfig() te, err := factory.CreateLogsReceiver( - context.Background(), receiver.CreateSettings{}, cfg, consumertest.NewNop(), + context.Background(), receiver.Settings{}, cfg, consumertest.NewNop(), ) assert.NoError(t, err) assert.NotNil(t, te) diff --git a/pkg/query-service/collectorsimulator/inmemoryreceiver/receiver_test.go b/pkg/query-service/collectorsimulator/inmemoryreceiver/receiver_test.go index 9205147156..a3fa1b81ca 100644 --- a/pkg/query-service/collectorsimulator/inmemoryreceiver/receiver_test.go +++ b/pkg/query-service/collectorsimulator/inmemoryreceiver/receiver_test.go @@ -61,6 +61,6 @@ func makeTestLogReceiver(receiverId string) (receiver.Logs, error) { confmap.NewFromStringMap(map[string]any{"id": receiverId}).Unmarshal(&cfg) return factory.CreateLogsReceiver( - context.Background(), receiver.CreateSettings{}, cfg, consumertest.NewNop(), + context.Background(), receiver.Settings{}, cfg, consumertest.NewNop(), ) } diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 78ee31e1a1..05855b1ddd 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -80,6 +80,13 @@ var TimestampSortFeature = GetOrDefaultEnv("TIMESTAMP_SORT_FEATURE", "true") var PreferRPMFeature = GetOrDefaultEnv("PREFER_RPM_FEATURE", "false") +// TODO(srikanthccv): remove after backfilling is done +func UseMetricsPreAggregation() bool { + return GetOrDefaultEnv("USE_METRICS_PRE_AGGREGATION", "true") == "true" +} + +var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false") + func IsDurationSortFeatureEnabled() bool { isDurationSortFeatureEnabledStr := DurationSortFeature isDurationSortFeatureEnabledBool, err := strconv.ParseBool(isDurationSortFeatureEnabledStr) @@ -220,14 +227,19 @@ var GroupByColMap = map[string]struct{}{ } const ( - SIGNOZ_METRIC_DBNAME = "signoz_metrics" - SIGNOZ_SAMPLES_V4_TABLENAME = "distributed_samples_v4" - SIGNOZ_TRACE_DBNAME = "signoz_traces" - SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2" - SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4" - SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs" - SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day" - SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day" + SIGNOZ_METRIC_DBNAME = "signoz_metrics" + SIGNOZ_SAMPLES_V4_TABLENAME = "distributed_samples_v4" + SIGNOZ_SAMPLES_V4_AGG_5M_TABLENAME = "distributed_samples_v4_agg_5m" + SIGNOZ_SAMPLES_V4_AGG_30M_TABLENAME = "distributed_samples_v4_agg_30m" + SIGNOZ_EXP_HISTOGRAM_TABLENAME = "distributed_exp_hist" + SIGNOZ_TRACE_DBNAME = "signoz_traces" + SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2" + SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME = "signoz_index_v2" + SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4" + SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs" + SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day" + SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME = "time_series_v4_1week" + SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day" ) var TimeoutExcludedRoutes = map[string]bool{ @@ -322,8 +334,12 @@ const ( "attributes_number, " + "attributes_bool, " + "resources_string " - TracesExplorerViewSQLSelectWithSubQuery = "WITH subQuery AS (SELECT distinct on (traceID) traceID, durationNano, " + - "serviceName, name FROM %s.%s WHERE parentSpanID = '' AND %s %s ORDER BY durationNano DESC " + TracesExplorerViewSQLSelectWithSubQuery = "(SELECT traceID, durationNano, " + + "serviceName, name FROM %s.%s WHERE parentSpanID = '' AND %s %s ORDER BY durationNano DESC LIMIT 1 BY traceID " + TracesExplorerViewSQLSelectBeforeSubQuery = "SELECT subQuery.serviceName, subQuery.name, count() AS " + + "span_count, subQuery.durationNano, subQuery.traceID AS traceID FROM %s.%s INNER JOIN ( SELECT * FROM " + TracesExplorerViewSQLSelectAfterSubQuery = "AS inner_subquery ) AS subQuery ON %s.%s.traceID = subQuery.traceID WHERE %s " + + "GROUP BY subQuery.traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY subQuery.durationNano desc LIMIT 1 BY subQuery.traceID;" TracesExplorerViewSQLSelectQuery = "SELECT subQuery.serviceName, subQuery.name, count() AS " + "span_count, subQuery.durationNano, traceID FROM %s.%s GLOBAL INNER JOIN subQuery ON %s.traceID = subQuery.traceID GROUP " + "BY traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY subQuery.durationNano desc;" @@ -341,7 +357,9 @@ var ReservedColumnTargetAliases = map[string]struct{}{ } // logsPPLPfx is a short constant for logsPipelinePrefix -const LogsPPLPfx = "logstransform/pipeline_" +// TODO(Raj): Remove old prefix after new processor based pipelines have been rolled out +const LogsPPLPfx = "signozlogspipeline/pipeline_" +const OldLogsPPLPfx = "logstransform/pipeline_" const IntegrationPipelineIdPrefix = "integration" diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index 8e651e17ea..baad5f0a22 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -52,7 +52,7 @@ type Reader interface { SetTTL(ctx context.Context, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) FetchTemporality(ctx context.Context, metricNames []string) (map[string]map[v3.Temporality]bool, error) - GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error) + GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest, skipDotNames bool) (*v3.AggregateAttributeResponse, error) GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) diff --git a/pkg/query-service/model/auth.go b/pkg/query-service/model/auth.go index 69525def12..c9f5991472 100644 --- a/pkg/query-service/model/auth.go +++ b/pkg/query-service/model/auth.go @@ -27,6 +27,34 @@ type InvitationResponseObject struct { Organization string `json:"organization" db:"organization"` } +type BulkInviteRequest struct { + Users []InviteRequest `json:"users"` +} + +type BulkInviteResponse struct { + Status string `json:"status"` + Summary InviteSummary `json:"summary"` + SuccessfulInvites []SuccessfulInvite `json:"successful_invites"` + FailedInvites []FailedInvite `json:"failed_invites"` +} + +type InviteSummary struct { + TotalInvites int `json:"total_invites"` + SuccessfulInvites int `json:"successful_invites"` + FailedInvites int `json:"failed_invites"` +} + +type SuccessfulInvite struct { + Email string `json:"email"` + InviteLink string `json:"invite_link"` + Status string `json:"status"` +} + +type FailedInvite struct { + Email string `json:"email"` + Error string `json:"error"` +} + type LoginRequest struct { Email string `json:"email"` Password string `json:"password"` diff --git a/pkg/query-service/model/infra.go b/pkg/query-service/model/infra.go new file mode 100644 index 0000000000..6832113b0c --- /dev/null +++ b/pkg/query-service/model/infra.go @@ -0,0 +1,201 @@ +package model + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +type ( + ResponseType string +) + +const ( + ResponseTypeList ResponseType = "list" + ResponseTypeGroupedList ResponseType = "grouped_list" +) + +type HostListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type HostListRecord struct { + HostName string `json:"hostName"` + Active bool `json:"active"` + OS string `json:"os"` + CPU float64 `json:"cpu"` + CPUTimeSeries *v3.Series `json:"cpuTimeSeries"` + Memory float64 `json:"memory"` + MemoryTimeSeries *v3.Series `json:"memoryTimeSeries"` + Wait float64 `json:"wait"` + WaitTimeSeries *v3.Series `json:"waitTimeSeries"` + Load15 float64 `json:"load15"` + Load15TimeSeries *v3.Series `json:"load15TimeSeries"` + Meta map[string]string `json:"-"` +} + +type HostListGroup struct { + GroupValues []string `json:"groupValues"` + Active int `json:"active"` + Inactive int `json:"inactive"` + GroupCPUAvg float64 `json:"groupCPUAvg"` + GroupMemoryAvg float64 `json:"groupMemoryAvg"` + GroupWaitAvg float64 `json:"groupWaitAvg"` + GroupLoad15Avg float64 `json:"groupLoad15Avg"` + HostNames []string `json:"hostNames"` +} + +type HostListResponse struct { + Type string `json:"type"` + Records []HostListRecord `json:"records"` + Groups []HostListGroup `json:"groups"` + Total int `json:"total"` +} + +type ProcessListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type ProcessListResponse struct { + Type string `json:"type"` + Records []ProcessListRecord `json:"records"` + Groups []ProcessListGroup `json:"groups"` + Total int `json:"total"` +} + +type ProcessListRecord struct { + ProcessName string `json:"processName"` + ProcessID string `json:"processID"` + ProcessCMD string `json:"processCMD"` + ProcessCMDLine string `json:"processCMDLine"` + ProcessCPU float64 `json:"processCPU"` + ProcessCPUTimeSeries *v3.Series `json:"processCPUTimeSeries"` + ProcessMemory float64 `json:"processMemory"` + ProcessMemoryTimeSeries *v3.Series `json:"processMemoryTimeSeries"` + Meta map[string]string `json:"-"` +} + +type ProcessListGroup struct { + GroupValues []string `json:"groupValues"` + GroupCPUAvg float64 `json:"groupCPUAvg"` + GroupMemoryAvg float64 `json:"groupMemoryAvg"` + ProcessNames []string `json:"processNames"` +} + +type PodListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type PodListResponse struct { + Type ResponseType `json:"type"` + Records []PodListRecord `json:"records"` + Total int `json:"total"` +} + +type PodListRecord struct { + PodUID string `json:"podUID,omitempty"` + PodCPU float64 `json:"podCPU"` + PodCPURequest float64 `json:"podCPURequest"` + PodCPULimit float64 `json:"podCPULimit"` + PodMemory float64 `json:"podMemory"` + PodMemoryRequest float64 `json:"podMemoryRequest"` + PodMemoryLimit float64 `json:"podMemoryLimit"` + RestartCount int `json:"restartCount"` + Meta map[string]string `json:"meta"` + CountByPhase PodCountByPhase `json:"countByPhase"` +} + +type PodCountByPhase struct { + Pending int `json:"pending"` + Running int `json:"running"` + Succeeded int `json:"succeeded"` + Failed int `json:"failed"` + Unknown int `json:"unknown"` +} + +type NodeListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type NodeListResponse struct { + Type ResponseType `json:"type"` + Records []NodeListRecord `json:"records"` + Total int `json:"total"` +} + +type NodeListRecord struct { + NodeUID string `json:"nodeUID,omitempty"` + NodeCPUUsage float64 `json:"nodeCPUUsage"` + NodeCPUAllocatable float64 `json:"nodeCPUAllocatable"` + NodeMemoryUsage float64 `json:"nodeMemoryUsage"` + NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable"` + Meta map[string]string `json:"meta"` +} + +type NamespaceListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type NamespaceListResponse struct { + Type ResponseType `json:"type"` + Records []NamespaceListRecord `json:"records"` + Total int `json:"total"` +} + +type NamespaceListRecord struct { + NamespaceName string `json:"namespaceName"` + CPUUsage float64 `json:"cpuUsage"` + MemoryUsage float64 `json:"memoryUsage"` + Meta map[string]string `json:"meta"` +} + +type ClusterListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type ClusterListResponse struct { + Type ResponseType `json:"type"` + Records []ClusterListRecord `json:"records"` + Total int `json:"total"` +} + +type ClusterListRecord struct { + ClusterUID string `json:"clusterUID"` + CPUUsage float64 `json:"cpuUsage"` + CPUAllocatable float64 `json:"cpuAllocatable"` + MemoryUsage float64 `json:"memoryUsage"` + MemoryAllocatable float64 `json:"memoryAllocatable"` + Meta map[string]string `json:"meta"` +} diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go index a58575cd08..024a4c3dbb 100644 --- a/pkg/query-service/model/v3/v3.go +++ b/pkg/query-service/model/v3/v3.go @@ -11,6 +11,7 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" + "go.uber.org/zap" ) type DataSource string @@ -762,6 +763,11 @@ type Function struct { NamedArgs map[string]interface{} `json:"namedArgs,omitempty"` } +type MetricTableHints struct { + TimeSeriesTableName string + SamplesTableName string +} + type BuilderQuery struct { QueryName string `json:"queryName"` StepInterval int64 `json:"stepInterval"` @@ -787,6 +793,39 @@ type BuilderQuery struct { ShiftBy int64 IsAnomaly bool QueriesUsedInFormula []string + MetricTableHints *MetricTableHints `json:"-"` +} + +func (b *BuilderQuery) SetShiftByFromFunc() { + // Remove the time shift function from the list of functions and set the shift by value + var timeShiftBy int64 + if len(b.Functions) > 0 { + for idx := range b.Functions { + function := &b.Functions[idx] + if function.Name == FunctionNameTimeShift { + // move the function to the beginning of the list + // so any other function can use the shifted time + var fns []Function + fns = append(fns, *function) + fns = append(fns, b.Functions[:idx]...) + fns = append(fns, b.Functions[idx+1:]...) + b.Functions = fns + if len(function.Args) > 0 { + if shift, ok := function.Args[0].(float64); ok { + timeShiftBy = int64(shift) + } else if shift, ok := function.Args[0].(string); ok { + shiftBy, err := strconv.ParseFloat(shift, 64) + if err != nil { + zap.L().Error("failed to parse time shift by", zap.String("shift", shift), zap.Error(err)) + } + timeShiftBy = int64(shiftBy) + } + } + break + } + } + } + b.ShiftBy = timeShiftBy } func (b *BuilderQuery) Clone() *BuilderQuery { @@ -1075,9 +1114,16 @@ func (f *FilterItem) CacheKey() string { return fmt.Sprintf("key:%s,op:%s,value:%v", f.Key.CacheKey(), f.Operator, f.Value) } +type Direction string + +const ( + DirectionAsc Direction = "asc" + DirectionDesc Direction = "desc" +) + type OrderBy struct { ColumnName string `json:"columnName"` - Order string `json:"order"` + Order Direction `json:"order"` Key string `json:"-"` DataType AttributeKeyDataType `json:"-"` Type AttributeKeyType `json:"-"` @@ -1308,7 +1354,7 @@ type URLShareableOptions struct { SelectColumns []AttributeKey `json:"selectColumns"` } -type LogQBOptions struct { +type QBOptions struct { GraphLimitQtype string IsLivetailQuery bool PreferRPM bool diff --git a/pkg/query-service/postprocess/process.go b/pkg/query-service/postprocess/process.go index 1f9ace33eb..5523bb1176 100644 --- a/pkg/query-service/postprocess/process.go +++ b/pkg/query-service/postprocess/process.go @@ -28,12 +28,12 @@ func PostProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam // The function is named applyMetricLimit because it only applies to metrics data source // In traces and logs, the limit is achieved using subqueries ApplyMetricLimit(result, queryRangeParams) + // We apply the functions here it's easier to add new functions + ApplyFunctions(result, queryRangeParams) // Each series in the result produces N number of points, where N is (end - start) / step // For the panel type table, we need to show one point for each series in the row // We do that by applying a reduce function to each series applyReduceTo(result, queryRangeParams) - // We apply the functions here it's easier to add new functions - ApplyFunctions(result, queryRangeParams) // expressions are executed at query serivce so the value of time.now in the invdividual // queries will be different so for table panel we are making it same. diff --git a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go index 2460f90485..e2b17bb0a3 100644 --- a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go +++ b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go @@ -18,42 +18,42 @@ var testCases = []struct { Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "checkbody", Operator: "="}, }}, - Expr: `attributes.key == "checkbody"`, + Expr: `attributes["key"] == "checkbody"`, }, { Name: "not equal", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "checkbody", Operator: "!="}, }}, - Expr: `attributes.key != "checkbody"`, + Expr: `attributes["key"] != "checkbody"`, }, { Name: "less than", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: "<"}, }}, - Expr: "attributes.key < 10", + Expr: `attributes["key"] != nil && attributes["key"] < 10`, }, { Name: "greater than", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: ">"}, }}, - Expr: "attributes.key > 10", + Expr: `attributes["key"] != nil && attributes["key"] > 10`, }, { Name: "less than equal", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: "<="}, }}, - Expr: "attributes.key <= 10", + Expr: `attributes["key"] != nil && attributes["key"] <= 10`, }, { Name: "greater than equal", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: ">="}, }}, - Expr: "attributes.key >= 10", + Expr: `attributes["key"] != nil && attributes["key"] >= 10`, }, // case sensitive { @@ -61,42 +61,42 @@ var testCases = []struct { Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "checkbody", Operator: "contains"}, }}, - Expr: `body contains "checkbody"`, + Expr: `body != nil && lower(body) contains lower("checkbody")`, }, { Name: "body ncontains", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "checkbody", Operator: "ncontains"}, }}, - Expr: `body not contains "checkbody"`, + Expr: `body != nil && lower(body) not contains lower("checkbody")`, }, { Name: "body regex", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "regex"}, }}, - Expr: `body matches "[0-1]+regex$"`, + Expr: `body != nil && body matches "[0-1]+regex$"`, }, { Name: "body not regex", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "nregex"}, }}, - Expr: `body not matches "[0-1]+regex$"`, + Expr: `body != nil && body not matches "[0-1]+regex$"`, }, { Name: "regex with escape characters", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: `^Executing \[\S+@\S+:[0-9]+\] \S+".*`, Operator: "regex"}, }}, - Expr: `body matches "^Executing \\[\\S+@\\S+:[0-9]+\\] \\S+\".*"`, + Expr: `body != nil && body matches "^Executing \\[\\S+@\\S+:[0-9]+\\] \\S+\".*"`, }, { Name: "invalid regex", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-9]++", Operator: "nregex"}, }}, - Expr: `body not matches "[0-9]++"`, + Expr: `body != nil && lower(body) not matches "[0-9]++"`, ExpectError: true, }, { @@ -104,14 +104,14 @@ var testCases = []struct { Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{1, 2, 3, 4}, Operator: "in"}, }}, - Expr: "attributes.key in [1,2,3,4]", + Expr: `attributes["key"] != nil && attributes["key"] in [1,2,3,4]`, }, { Name: "not in", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"1", "2"}, Operator: "nin"}, }}, - Expr: "attributes.key not in ['1','2']", + Expr: `attributes["key"] != nil && attributes["key"] not in ['1','2']`, }, { Name: "exists", @@ -134,7 +134,7 @@ var testCases = []struct { {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "nregex"}, {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: "nexists"}, }}, - Expr: `attributes.key <= 10 and body not matches "[0-1]+regex$" and "key" not in attributes`, + Expr: `attributes["key"] != nil && attributes["key"] <= 10 and body != nil && body not matches "[0-1]+regex$" and "key" not in attributes`, }, { Name: "incorrect multi filter", @@ -143,7 +143,7 @@ var testCases = []struct { {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-9]++", Operator: "nregex"}, {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: "nexists"}, }}, - Expr: `attributes.key <= 10 and body not matches "[0-9]++" and "key" not in attributes`, + Expr: `attributes["key"] != nil && attributes["key"] <= 10 and body not matches "[0-9]++" and "key" not in attributes`, ExpectError: true, }, } diff --git a/pkg/query-service/querycache/query_range_cache.go b/pkg/query-service/querycache/query_range_cache.go index 3b3e3be93c..b2bde35eb5 100644 --- a/pkg/query-service/querycache/query_range_cache.go +++ b/pkg/query-service/querycache/query_range_cache.go @@ -142,9 +142,18 @@ func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3. } seriesesByLabels[h].Points = append(seriesesByLabels[h].Points, series.Points...) } + + hashes := make([]uint64, 0, len(seriesesByLabels)) + for h := range seriesesByLabels { + hashes = append(hashes, h) + } + sort.Slice(hashes, func(i, j int) bool { + return hashes[i] < hashes[j] + }) + // Sort the points in each series by timestamp - for idx := range seriesesByLabels { - series := seriesesByLabels[idx] + for _, h := range hashes { + series := seriesesByLabels[h] series.SortPoints() series.RemoveDuplicatePoints() mergedSeries = append(mergedSeries, series) diff --git a/pkg/query-service/rules/threshold_rule.go b/pkg/query-service/rules/threshold_rule.go index 72b00e2412..3971597ec2 100644 --- a/pkg/query-service/rules/threshold_rule.go +++ b/pkg/query-service/rules/threshold_rule.go @@ -152,6 +152,18 @@ func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep { q.StepInterval = minStep } + + q.SetShiftByFromFunc() + + if q.DataSource == v3.DataSourceMetrics && constants.UseMetricsPreAggregation() { + // if the time range is greater than 1 day, and less than 1 week set the step interval to be multiple of 5 minutes + // if the time range is greater than 1 week, set the step interval to be multiple of 30 mins + if end-start >= 24*time.Hour.Milliseconds() && end-start < 7*24*time.Hour.Milliseconds() { + q.StepInterval = int64(math.Round(float64(q.StepInterval)/300)) * 300 + } else if end-start >= 7*24*time.Hour.Milliseconds() { + q.StepInterval = int64(math.Round(float64(q.StepInterval)/1800)) * 1800 + } + } } } diff --git a/pkg/query-service/rules/threshold_rule_test.go b/pkg/query-service/rules/threshold_rule_test.go index d3d84f06a7..e75c82b1a0 100644 --- a/pkg/query-service/rules/threshold_rule_test.go +++ b/pkg/query-service/rules/threshold_rule_test.go @@ -1602,3 +1602,66 @@ func TestThresholdRuleLogsLink(t *testing.T) { } } } + +func TestThresholdRuleShiftBy(t *testing.T) { + target := float64(10) + postableRule := PostableRule{ + AlertName: "Logs link test", + AlertType: AlertTypeLogs, + RuleType: RuleTypeThreshold, + EvalWindow: Duration(5 * time.Minute), + Frequency: Duration(1 * time.Minute), + RuleCondition: &RuleCondition{ + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: 60, + AggregateAttribute: v3.AttributeKey{ + Key: "component", + }, + AggregateOperator: v3.AggregateOperatorCountDistinct, + DataSource: v3.DataSourceLogs, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "k8s.container.name", IsColumn: false, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}, + Value: "testcontainer", + Operator: v3.FilterOperatorEqual, + }, + }, + }, + Functions: []v3.Function{ + { + Name: v3.FunctionNameTimeShift, + Args: []interface{}{float64(10)}, + }, + }, + }, + }, + }, + Target: &target, + CompareOp: ValueAboveOrEq, + }, + } + + rule, err := NewThresholdRule("69", &postableRule, nil, nil, true) + if err != nil { + assert.NoError(t, err) + } + rule.TemporalityMap = map[string]map[v3.Temporality]bool{ + "signoz_calls_total": { + v3.Delta: true, + }, + } + + params, err := rule.prepareQueryRange(time.Now()) + if err != nil { + assert.NoError(t, err) + } + + assert.Equal(t, int64(10), params.CompositeQuery.BuilderQueries["A"].ShiftBy) +} diff --git a/pkg/query-service/tests/auth_test.go b/pkg/query-service/tests/auth_test.go deleted file mode 100644 index 7c7d5277b6..0000000000 --- a/pkg/query-service/tests/auth_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package tests - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - "go.signoz.io/signoz/pkg/query-service/auth" - "go.signoz.io/signoz/pkg/query-service/model" -) - -func invite(t *testing.T, email string) *model.InviteResponse { - q := endpoint + fmt.Sprintf("/api/v1/invite?email=%s", email) - resp, err := client.Get(q) - require.NoError(t, err) - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - var inviteResp model.InviteResponse - err = json.Unmarshal(b, &inviteResp) - require.NoError(t, err) - - return &inviteResp -} - -func register(email, password, token string) (string, error) { - q := endpoint + "/api/v1/register" - - req := auth.RegisterRequest{ - Email: email, - Password: password, - InviteToken: token, - } - - b, err := json.Marshal(req) - if err != nil { - return "", err - } - resp, err := client.Post(q, "application/json", bytes.NewBuffer(b)) - if err != nil { - return "", err - } - - defer resp.Body.Close() - b, err = io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return string(b), nil -} - -func login(email, password, refreshToken string) (*model.LoginResponse, error) { - q := endpoint + "/api/v1/login" - - req := model.LoginRequest{ - Email: email, - Password: password, - RefreshToken: refreshToken, - } - - b, err := json.Marshal(req) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal") - } - resp, err := client.Post(q, "application/json", bytes.NewBuffer(b)) - if err != nil { - return nil, errors.Wrap(err, "failed to post") - } - - defer resp.Body.Close() - b, err = io.ReadAll(resp.Body) - if err != nil { - return nil, errors.Wrap(err, "failed to read body") - } - - loginResp := &model.LoginResponse{} - err = json.Unmarshal(b, loginResp) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal") - } - - return loginResp, nil -} - -func TestAuthInviteAPI(t *testing.T) { - t.Skip() - email := "abc@signoz.io" - resp := invite(t, email) - require.Equal(t, email, resp.Email) - require.NotNil(t, resp.InviteToken) -} - -func TestAuthRegisterAPI(t *testing.T) { - email := "alice@signoz.io" - resp, err := register(email, "Password@123", "") - require.NoError(t, err) - require.Contains(t, resp, "user registered successfully") - -} - -func TestAuthLoginAPI(t *testing.T) { - t.Skip() - email := "abc-login@signoz.io" - password := "Password@123" - inv := invite(t, email) - - resp, err := register(email, password, inv.InviteToken) - require.NoError(t, err) - require.Contains(t, resp, "user registered successfully") - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - loginResp2, err := login("", "", loginResp.RefreshJwt) - require.NoError(t, err) - - require.NotNil(t, loginResp2.AccessJwt) -} diff --git a/pkg/query-service/tests/cold_storage_test.go b/pkg/query-service/tests/cold_storage_test.go deleted file mode 100644 index 87db1b6a93..0000000000 --- a/pkg/query-service/tests/cold_storage_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package tests - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.signoz.io/signoz/pkg/query-service/model" -) - -const ( - endpoint = "http://localhost:8180" -) - -var ( - client http.Client -) - -func setTTL(table, coldStorage, toColdTTL, deleteTTL string, jwtToken string) ([]byte, error) { - params := fmt.Sprintf("type=%s&duration=%s", table, deleteTTL) - if len(toColdTTL) > 0 { - params += fmt.Sprintf("&coldStorage=%s&toColdDuration=%s", coldStorage, toColdTTL) - } - var bearer = "Bearer " + jwtToken - req, err := http.NewRequest("POST", endpoint+"/api/v1/settings/ttl?"+params, nil) - if err != nil { - return nil, err - } - req.Header.Add("Authorization", bearer) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - if err != nil { - return b, err - } - - return b, nil -} - -func TestListDisks(t *testing.T) { - t.Skip() - email := "alice@signoz.io" - password := "Password@123" - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - var bearer = "Bearer " + loginResp.AccessJwt - req, err := http.NewRequest("POST", endpoint+"/api/v1/disks", nil) - req.Header.Add("Authorization", bearer) - - resp, err := client.Do(req) - require.NoError(t, err) - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.JSONEq(t, `[{"name":"default","type":"local"}, {"name":"s3","type":"s3"}]`, string(b)) -} - -func TestSetTTL(t *testing.T) { - email := "alice@signoz.io" - password := "Password@123" - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - testCases := []struct { - caseNo int - coldStorage string - table string - coldTTL string - deleteTTL string - expected string - }{ - { - 1, "s3", "traces", "100h", "60h", - "Delete TTL should be greater than cold storage move TTL.", - }, - { - 2, "s3", "traces", "100", "60s", - "Not a valid toCold TTL duration 100", - }, - { - 3, "s3", "traces", "100s", "100", - "Not a valid TTL duration 100", - }, - { - 4, "s3", "metrics", "1h", "2h", - "move ttl has been successfully set up", - }, - { - 5, "s3", "traces", "10s", "6h", - "move ttl has been successfully set up", - }, - } - - for _, tc := range testCases { - r, err := setTTL(tc.table, tc.coldStorage, tc.coldTTL, tc.deleteTTL, loginResp.AccessJwt) - require.NoErrorf(t, err, "Failed case: %d", tc.caseNo) - require.Containsf(t, string(r), tc.expected, "Failed case: %d", tc.caseNo) - } - - time.Sleep(20 * time.Second) - doneCh := make(chan struct{}) - defer close(doneCh) - - count := 0 - for range minioClient.ListObjects(bucketName, "", false, doneCh) { - count++ - } - - require.True(t, count > 0, "No objects are present in Minio") - fmt.Printf("=== Found %d objects in Minio\n", count) -} - -func getTTL(t *testing.T, table string, jwtToken string) *model.GetTTLResponseItem { - url := endpoint + fmt.Sprintf("/api/v1/settings/ttl?type=%s", table) - if len(table) == 0 { - url = endpoint + "/api/v1/settings/ttl" - } - - var bearer = "Bearer " + jwtToken - req, err := http.NewRequest("GET", url, nil) - require.NoError(t, err) - req.Header.Add("Authorization", bearer) - resp, err := client.Do(req) - - require.NoError(t, err) - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - res := &model.GetTTLResponseItem{} - require.NoError(t, json.Unmarshal(b, res)) - return res -} - -func TestGetTTL(t *testing.T) { - email := "alice@signoz.io" - password := "Password@123" - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - resp := getTTL(t, "traces", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - } - require.Equal(t, "success", resp.Status) - - r, err := setTTL("traces", "s3", "1h", "2h", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "successfully set up") - - resp = getTTL(t, "traces", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - resp = getTTL(t, "traces", loginResp.AccessJwt) - require.Equal(t, 1, resp.ExpectedTracesMoveTime) - require.Equal(t, 2, resp.ExpectedTracesTime) - } - resp = getTTL(t, "traces", loginResp.AccessJwt) - require.Equal(t, "success", resp.Status) - require.Equal(t, 1, resp.TracesMoveTime) - require.Equal(t, 2, resp.TracesTime) - - resp = getTTL(t, "metrics", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - } - require.Equal(t, "success", resp.Status) - - r, err = setTTL("traces", "s3", "10h", "20h", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "successfully set up") - - resp = getTTL(t, "traces", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - resp = getTTL(t, "traces", loginResp.AccessJwt) - } - require.Equal(t, "success", resp.Status) - require.Equal(t, 10, resp.TracesMoveTime) - require.Equal(t, 20, resp.TracesTime) - - resp = getTTL(t, "metrics", loginResp.AccessJwt) - for resp.Status != "success" && resp.Status != "failed" { - time.Sleep(time.Second) - resp = getTTL(t, "metrics", loginResp.AccessJwt) - } - require.Equal(t, "success", resp.Status) - require.Equal(t, 1, resp.MetricsMoveTime) - require.Equal(t, 2, resp.MetricsTime) - - r, err = setTTL("metrics", "s3", "0s", "0s", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "Not a valid TTL duration 0s") - - r, err = setTTL("traces", "s3", "0s", "0s", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "Not a valid TTL duration 0s") -} - -func TestMain(m *testing.M) { - if err := startCluster(); err != nil { - fmt.Println(err) - } - defer stopCluster() - - m.Run() -} diff --git a/pkg/query-service/tests/docker.go b/pkg/query-service/tests/docker.go deleted file mode 100644 index c65a627512..0000000000 --- a/pkg/query-service/tests/docker.go +++ /dev/null @@ -1,117 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "net/http" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "log" - - minio "github.com/minio/minio-go/v6" -) - -const ( - prefix = "signoz_test" - minioEndpoint = "localhost:9100" - accessKey = "ash" - secretKey = "password" - bucketName = "test" -) - -var ( - minioClient *minio.Client - composeFile string -) - -func init() { - goArch := runtime.GOARCH - if goArch == "arm64" { - composeFile = "./test-deploy/docker-compose.arm.yaml" - } else if goArch == "amd64" { - composeFile = "./test-deploy/docker-compose.yaml" - } else { - log.Fatalf("Unsupported architecture: %s", goArch) - } -} - -func getCmd(args ...string) *exec.Cmd { - cmd := exec.CommandContext(context.Background(), args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Env = os.Environ() - return cmd -} - -func startMinio() error { - log.Printf("Starting minio") - cmd := getCmd("docker", "run", "-d", "-p", "9100:9000", "-p", "9101:9001", - "--name", "signoz-minio-test", "-e", "MINIO_ROOT_USER=ash", - "-e", "MINIO_ROOT_PASSWORD=password", - "quay.io/minio/minio", "server", "/data", "--console-address", ":9001") - - if err := cmd.Run(); err != nil { - return err - } - - var err error - minioClient, err = minio.New(minioEndpoint, accessKey, secretKey, false) - if err != nil { - return err - } - if err = minioClient.MakeBucket(bucketName, ""); err != nil { - return err - } - return nil -} - -func startCluster() error { - if err := os.MkdirAll("./test-deploy/data/minio/test", 0777); err != nil { - return err - } - - if err := startMinio(); err != nil { - return err - } - - cmd := getCmd("docker-compose", "-f", composeFile, "-p", prefix, - "up", "--force-recreate", "--build", "--remove-orphans", "--detach") - - log.Printf("Starting signoz cluster...\n") - if err := cmd.Run(); err != nil { - log.Printf("While running command: %q Error: %v\n", strings.Join(cmd.Args, " "), err) - return err - } - - client := http.Client{} - for i := 0; i < 10; i++ { - if _, err := client.Get("http://localhost:8180/api/v1/health"); err != nil { - time.Sleep(2 * time.Second) - } else { - log.Printf("CLUSTER UP\n") - return nil - } - } - return fmt.Errorf("query-service is not healthy") -} - -func stopCluster() { - cmd := getCmd("docker-compose", "-f", composeFile, "-p", prefix, "down", "-v") - if err := cmd.Run(); err != nil { - log.Printf("Error while stopping the cluster. Error: %v\n", err) - } - if err := os.RemoveAll("./test-deploy/data"); err != nil { - log.Printf("Error while cleaning temporary dir. Error: %v\n", err) - } - - cmd = getCmd("docker", "container", "rm", "-f", "signoz-minio-test") - if err := cmd.Run(); err != nil { - log.Printf("While running command: %q Error: %v\n", strings.Join(cmd.Args, " "), err) - } - - log.Printf("CLUSTER DOWN: %s\n", prefix) -} diff --git a/pkg/query-service/tests/test-deploy/alertmanager.yml b/pkg/query-service/tests/test-deploy/alertmanager.yml deleted file mode 100644 index d69357f9dd..0000000000 --- a/pkg/query-service/tests/test-deploy/alertmanager.yml +++ /dev/null @@ -1,35 +0,0 @@ -global: - resolve_timeout: 1m - slack_api_url: 'https://hooks.slack.com/services/xxx' - -route: - receiver: 'slack-notifications' - -receivers: -- name: 'slack-notifications' - slack_configs: - - channel: '#alerts' - send_resolved: true - icon_url: https://avatars3.githubusercontent.com/u/3380462 - title: |- - [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }} - {{- if gt (len .CommonLabels) (len .GroupLabels) -}} - {{" "}}( - {{- with .CommonLabels.Remove .GroupLabels.Names }} - {{- range $index, $label := .SortedPairs -}} - {{ if $index }}, {{ end }} - {{- $label.Name }}="{{ $label.Value -}}" - {{- end }} - {{- end -}} - ) - {{- end }} - text: >- - {{ range .Alerts -}} - *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }} - - *Description:* {{ .Annotations.description }} - - *Details:* - {{ range .Labels.SortedPairs }} β€’ *{{ .Name }}:* `{{ .Value }}` - {{ end }} - {{ end }} \ No newline at end of file diff --git a/pkg/query-service/tests/test-deploy/alerts.yml b/pkg/query-service/tests/test-deploy/alerts.yml deleted file mode 100644 index 810a20750c..0000000000 --- a/pkg/query-service/tests/test-deploy/alerts.yml +++ /dev/null @@ -1,11 +0,0 @@ -groups: -- name: ExampleCPULoadGroup - rules: - - alert: HighCpuLoad - expr: system_cpu_load_average_1m > 0.1 - for: 0m - labels: - severity: warning - annotations: - summary: High CPU load - description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" diff --git a/pkg/query-service/tests/test-deploy/clickhouse-cluster.xml b/pkg/query-service/tests/test-deploy/clickhouse-cluster.xml deleted file mode 100644 index 0e3ddcdde0..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-cluster.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - zookeeper-1 - 2181 - - - - - - - - - - - - - - - - clickhouse - 9000 - - - - - - - - \ No newline at end of file diff --git a/pkg/query-service/tests/test-deploy/clickhouse-config.xml b/pkg/query-service/tests/test-deploy/clickhouse-config.xml deleted file mode 100644 index 4e8dc00b30..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-config.xml +++ /dev/null @@ -1,1139 +0,0 @@ - - - - - - information - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - - 1000M - 10 - - - - - - - - - - - - - - - - - - 8123 - - - 9000 - - - 9004 - - - 9005 - - - - - - - - - - - - 9009 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 4096 - - - 3 - - - - - false - - - /path/to/ssl_cert_file - /path/to/ssl_key_file - - - false - - - /path/to/ssl_ca_cert_file - - - none - - - 0 - - - -1 - -1 - - - false - - - - - - - - - - - none - true - true - sslv2,sslv3 - true - - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - - - - - - - 100 - - - 0 - - - - 10000 - - - - - - 0.9 - - - 4194304 - - - 0 - - - - - - 8589934592 - - - 5368709120 - - - - 1000 - - - 134217728 - - - 10000 - - - /var/lib/clickhouse/ - - - /var/lib/clickhouse/tmp/ - - - - ` - - - - - - /var/lib/clickhouse/user_files/ - - - - - - - - - - - - - users.xml - - - - /var/lib/clickhouse/access/ - - - - - - - default - - - - - - - - - - - - default - - - - - - - - - true - - - false - - ' | sed -e 's|.*>\(.*\)<.*|\1|') - wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb - apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb - clickhouse-jdbc-bridge & - - * [CentOS/RHEL] - export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge - export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|') - wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm - yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm - clickhouse-jdbc-bridge & - - Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information. - ]]> - - - - - - - - - - - - - - - 01 - example01-01-1 - - - - - - 3600 - - - - 3600 - - - 60 - - - - - - - - - - - - - system - query_log
- - toYYYYMM(event_date) - - - - - - 7500 -
- - - - system - trace_log
- - toYYYYMM(event_date) - 7500 -
- - - - system - query_thread_log
- toYYYYMM(event_date) - 7500 -
- - - - system - query_views_log
- toYYYYMM(event_date) - 7500 -
- - - - system - part_log
- toYYYYMM(event_date) - 7500 -
- - - - - - system - metric_log
- 7500 - 1000 -
- - - - system - asynchronous_metric_log
- - 7000 -
- - - - - - engine MergeTree - partition by toYYYYMM(finish_date) - order by (finish_date, finish_time_us, trace_id) - - system - opentelemetry_span_log
- 7500 -
- - - - - system - crash_log
- - - 1000 -
- - - - - - - system - processors_profile_log
- - toYYYYMM(event_date) - 7500 -
- - - - - - - - - *_dictionary.xml - - - *_function.xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /clickhouse/task_queue/ddl - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - click_cost - any - - 0 - 3600 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - - - /var/lib/clickhouse/format_schemas/ - - - - - hide encrypt/decrypt arguments - ((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\) - - \1(???) - - - - - - - - - - false - - false - - - https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277 - - - - - - - - - - - 268435456 - true - -
diff --git a/pkg/query-service/tests/test-deploy/clickhouse-storage.xml b/pkg/query-service/tests/test-deploy/clickhouse-storage.xml deleted file mode 100644 index f444bf43b4..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-storage.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - 10485760 - - - s3 - http://172.17.0.1:9100/test// - ash - password - - - - - - - default - - - s3 - 0 - - - - - - diff --git a/pkg/query-service/tests/test-deploy/clickhouse-users.xml b/pkg/query-service/tests/test-deploy/clickhouse-users.xml deleted file mode 100644 index f18562071d..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-users.xml +++ /dev/null @@ -1,123 +0,0 @@ - - - - - - - - - - 10000000000 - - - random - - - - - 1 - - - - - - - - - - - - - ::/0 - - - - default - - - default - - - - - - - - - - - - - - 3600 - - - 0 - 0 - 0 - 0 - 0 - - - - diff --git a/pkg/query-service/tests/test-deploy/docker-compose.yaml b/pkg/query-service/tests/test-deploy/docker-compose.yaml deleted file mode 100644 index 562f19d83a..0000000000 --- a/pkg/query-service/tests/test-deploy/docker-compose.yaml +++ /dev/null @@ -1,283 +0,0 @@ -version: "2.4" - -x-clickhouse-defaults: &clickhouse-defaults - restart: on-failure - image: clickhouse/clickhouse-server:24.1.2-alpine - tty: true - depends_on: - - zookeeper-1 - # - zookeeper-2 - # - zookeeper-3 - logging: - options: - max-size: 50m - max-file: "3" - healthcheck: - # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" - test: - [ - "CMD", - "wget", - "--spider", - "-q", - "0.0.0.0:8123/ping" - ] - interval: 30s - timeout: 5s - retries: 3 - ulimits: - nproc: 65535 - nofile: - soft: 262144 - hard: 262144 - -x-db-depend: &db-depend - depends_on: - clickhouse: - condition: service_healthy - otel-collector-migrator: - condition: service_completed_successfully - # clickhouse-2: - # condition: service_healthy - # clickhouse-3: - # condition: service_healthy - -services: - zookeeper-1: - image: bitnami/zookeeper:3.7.1 - container_name: signoz-zookeeper-1 - user: root - ports: - - "2181:2181" - - "2888:2888" - - "3888:3888" - volumes: - - ./data/zookeeper-1:/bitnami/zookeeper - environment: - - ZOO_SERVER_ID=1 - - ZOO_SERVERS=0.0.0.0:2888:3888 - # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888 - - ALLOW_ANONYMOUS_LOGIN=yes - - ZOO_AUTOPURGE_INTERVAL=1 - - # zookeeper-2: - # image: bitnami/zookeeper:3.7.0 - # container_name: signoz-zookeeper-2 - # user: root - # ports: - # - "2182:2181" - # - "2889:2888" - # - "3889:3888" - # volumes: - # - ./data/zookeeper-2:/bitnami/zookeeper - # environment: - # - ZOO_SERVER_ID=2 - # - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888 - # - ALLOW_ANONYMOUS_LOGIN=yes - # - ZOO_AUTOPURGE_INTERVAL=1 - - # zookeeper-3: - # image: bitnami/zookeeper:3.7.0 - # container_name: signoz-zookeeper-3 - # user: root - # ports: - # - "2183:2181" - # - "2890:2888" - # - "3890:3888" - # volumes: - # - ./data/zookeeper-3:/bitnami/zookeeper - # environment: - # - ZOO_SERVER_ID=3 - # - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888 - # - ALLOW_ANONYMOUS_LOGIN=yes - # - ZOO_AUTOPURGE_INTERVAL=1 - - clickhouse: - <<: *clickhouse-defaults - container_name: signoz-clickhouse - hostname: clickhouse - ports: - - "9000:9000" - - "8123:8123" - - "9181:9181" - volumes: - - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml - - ./data/clickhouse/:/var/lib/clickhouse/ - - # clickhouse-2: - # <<: *clickhouse-defaults - # container_name: signoz-clickhouse-2 - # hostname: clickhouse-2 - # ports: - # - "9001:9000" - # - "8124:8123" - # - "9182:9181" - # volumes: - # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml - # - ./data/clickhouse-2/:/var/lib/clickhouse/ - - # clickhouse-3: - # <<: *clickhouse-defaults - # container_name: signoz-clickhouse-3 - # hostname: clickhouse-3 - # ports: - # - "9002:9000" - # - "8125:8123" - # - "9183:9181" - # volumes: - # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml - # - ./data/clickhouse-3/:/var/lib/clickhouse/ - - alertmanager: - image: signoz/alertmanager:0.23.7 - container_name: signoz-alertmanager - volumes: - - ./data/alertmanager:/data - depends_on: - query-service: - condition: service_healthy - restart: on-failure - command: - - --queryService.url=http://query-service:8085 - - --storage.path=/data - - # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` - - query-service: - image: signoz/query-service:latest - container_name: signoz-query-service - command: - [ - "-config=/root/config/prometheus.yml", - "--use-logs-new-schema=true" - ] - # ports: - # - "6060:6060" # pprof port - # - "8080:8080" # query-service port - volumes: - - ./prometheus.yml:/root/config/prometheus.yml - - ../dashboards:/root/config/dashboards - - ./data/signoz/:/var/lib/signoz/ - environment: - - ClickHouseUrl=tcp://clickhouse:9000 - - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - - DASHBOARDS_PATH=/root/config/dashboards - - STORAGE=clickhouse - - GODEBUG=netdns=go - - TELEMETRY_ENABLED=true - - DEPLOYMENT_TYPE=docker-standalone-amd - restart: on-failure - healthcheck: - test: - [ - "CMD", - "wget", - "--spider", - "-q", - "localhost:8080/api/v1/health" - ] - interval: 30s - timeout: 5s - retries: 3 - <<: *db-depend - - otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10} - container_name: otel-migrator - command: - - "--dsn=tcp://clickhouse:9000" - depends_on: - clickhouse: - condition: service_healthy - # clickhouse-2: - # condition: service_healthy - # clickhouse-3: - # condition: service_healthy - - otel-collector: - image: signoz/signoz-otel-collector:0.102.12 - container_name: signoz-otel-collector - command: - [ - "--config=/etc/otel-collector-config.yaml", - "--manager-config=/etc/manager-config.yaml", - "--copy-path=/var/tmp/collector-config.yaml", - "--feature-gates=-pkg.translator.prometheus.NormalizeName" - ] - user: root # required for reading docker container logs - volumes: - - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml - - /var/lib/docker/containers:/var/lib/docker/containers:ro - - /:/hostfs:ro - environment: - - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - - DOCKER_MULTI_NODE_CLUSTER=false - - LOW_CARDINAL_EXCEPTION_GROUPING=false - ports: - # - "1777:1777" # pprof extension - - "4317:4317" # OTLP gRPC receiver - - "4318:4318" # OTLP HTTP receiver - # - "8888:8888" # OtelCollector internal metrics - # - "8889:8889" # signoz spanmetrics exposed by the agent - # - "9411:9411" # Zipkin port - # - "13133:13133" # health check extension - # - "14250:14250" # Jaeger gRPC - # - "14268:14268" # Jaeger thrift HTTP - # - "55678:55678" # OpenCensus receiver - # - "55679:55679" # zPages extension - restart: on-failure - depends_on: - clickhouse: - condition: service_healthy - otel-collector-migrator: - condition: service_completed_successfully - query-service: - condition: service_healthy - - logspout: - image: "gliderlabs/logspout:v3.2.14" - container_name: signoz-logspout - volumes: - - /etc/hostname:/etc/host_hostname:ro - - /var/run/docker.sock:/var/run/docker.sock - command: syslog+tcp://otel-collector:2255 - depends_on: - - otel-collector - restart: on-failure - - hotrod: - image: jaegertracing/example-hotrod:1.30 - container_name: hotrod - logging: - options: - max-size: 50m - max-file: "3" - command: [ "all" ] - environment: - - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces - - load-hotrod: - image: "signoz/locust:1.2.3" - container_name: load-hotrod - hostname: load-hotrod - environment: - ATTACKED_HOST: http://hotrod:8080 - LOCUST_MODE: standalone - NO_PROXY: standalone - TASK_DELAY_FROM: 5 - TASK_DELAY_TO: 30 - QUIET_MODE: "${QUIET_MODE:-false}" - LOCUST_OPTS: "--headless -u 10 -r 1" - volumes: - - ../common/locust-scripts:/locust diff --git a/pkg/query-service/tests/test-deploy/otel-collector-config.yaml b/pkg/query-service/tests/test-deploy/otel-collector-config.yaml deleted file mode 100644 index 52f9e2be94..0000000000 --- a/pkg/query-service/tests/test-deploy/otel-collector-config.yaml +++ /dev/null @@ -1,148 +0,0 @@ -receivers: - tcplog/docker: - listen_address: "0.0.0.0:2255" - operators: - - type: regex_parser - regex: '^<([0-9]+)>[0-9]+ (?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P\S+) (?P\S+) [0-9]+ - -( (?P.*))?' - timestamp: - parse_from: attributes.timestamp - layout: '%Y-%m-%dT%H:%M:%S.%LZ' - - type: move - from: attributes["body"] - to: body - - type: remove - field: attributes.timestamp - # please remove names from below if you want to collect logs from them - - type: filter - id: signoz_logs_filter - expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"' - opencensus: - endpoint: 0.0.0.0:55678 - otlp/spanmetrics: - protocols: - grpc: - endpoint: localhost:12345 - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:4317 - http: - endpoint: 0.0.0.0:4318 - jaeger: - protocols: - grpc: - endpoint: 0.0.0.0:14250 - thrift_http: - endpoint: 0.0.0.0:14268 - # thrift_compact: - # endpoint: 0.0.0.0:6831 - # thrift_binary: - # endpoint: 0.0.0.0:6832 - hostmetrics: - collection_interval: 30s - root_path: /hostfs - scrapers: - cpu: {} - load: {} - memory: {} - disk: {} - filesystem: {} - network: {} - prometheus: - config: - global: - scrape_interval: 60s - scrape_configs: - # otel-collector internal metrics - - job_name: otel-collector - static_configs: - - targets: - - otel-collector:8888 - -processors: - batch: - send_batch_size: 10000 - send_batch_max_size: 11000 - timeout: 10s - signozspanmetrics/cumulative: - metrics_exporter: clickhousemetricswrite - latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 100000 - dimensions: - - name: service.namespace - default: default - - name: deployment.environment - default: default - # memory_limiter: - # # 80% of maximum memory up to 2G - # limit_mib: 1500 - # # 25% of limit up to 2G - # spike_limit_mib: 512 - # check_interval: 5s - # - # # 50% of the maximum memory - # limit_percentage: 50 - # # 20% of max memory usage spike expected - # spike_limit_percentage: 20 - # queued_retry: - # num_workers: 4 - # queue_size: 100 - # retry_on_failure: true - resourcedetection: - detectors: [env, system] - timeout: 2s - -extensions: - health_check: - endpoint: 0.0.0.0:13133 - zpages: - endpoint: 0.0.0.0:55679 - pprof: - endpoint: 0.0.0.0:1777 - -exporters: - clickhousetraces: - datasource: tcp://clickhouse:9000/signoz_traces - docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} - low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING} - clickhousemetricswrite: - endpoint: tcp://clickhouse:9000/signoz_metrics - resource_to_telemetry_conversion: - enabled: true - prometheus: - endpoint: 0.0.0.0:8889 - clickhouselogsexporter: - dsn: tcp://clickhouse:9000/signoz_logs - docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} - timeout: 10s - use_new_schema: true - # logging: {} - -service: - telemetry: - metrics: - address: 0.0.0.0:8888 - extensions: - - health_check - - zpages - - pprof - pipelines: - traces: - receivers: [jaeger, otlp] - processors: [signozspanmetrics/cumulative, batch] - exporters: [clickhousetraces] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [clickhousemetricswrite] - metrics/generic: - receivers: [hostmetrics, prometheus] - processors: [resourcedetection, batch] - exporters: [clickhousemetricswrite] - metrics/spanmetrics: - receivers: [otlp/spanmetrics] - exporters: [prometheus] - logs: - receivers: [otlp, tcplog/docker] - processors: [batch] - exporters: [clickhouselogsexporter] diff --git a/pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml b/pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml deleted file mode 100644 index e408b55ef6..0000000000 --- a/pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml +++ /dev/null @@ -1 +0,0 @@ -server_endpoint: ws://query-service:4320/v1/opamp diff --git a/pkg/query-service/tests/test-deploy/prometheus.yml b/pkg/query-service/tests/test-deploy/prometheus.yml deleted file mode 100644 index d7c52893c5..0000000000 --- a/pkg/query-service/tests/test-deploy/prometheus.yml +++ /dev/null @@ -1,25 +0,0 @@ -# my global config -global: - scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute. - evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. - # scrape_timeout is set to the global default (10s). - -# Alertmanager configuration -alerting: - alertmanagers: - - static_configs: - - targets: - - alertmanager:9093 - -# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. -rule_files: - # - "first_rules.yml" - # - "second_rules.yml" - - 'alerts.yml' - -# A scrape configuration containing exactly one endpoint to scrape: -# Here it's Prometheus itself. -scrape_configs: [] - -remote_read: - - url: tcp://clickhouse:9000/signoz_metrics diff --git a/pkg/query-service/utils/format_test.go b/pkg/query-service/utils/format_test.go index 3a2a7f1265..e51d510a55 100644 --- a/pkg/query-service/utils/format_test.go +++ b/pkg/query-service/utils/format_test.go @@ -419,28 +419,28 @@ var testGetClickhouseColumnName = []struct { typeName: string(v3.AttributeKeyTypeTag), dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "attribute_int64_tag1", + want: "`attribute_int64_tag1`", }, { name: "resource", typeName: string(v3.AttributeKeyTypeResource), dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "resource_int64_tag1", + want: "`resource_int64_tag1`", }, { name: "attribute old parser", typeName: constants.Attributes, dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "attribute_int64_tag1", + want: "`attribute_int64_tag1`", }, { name: "resource old parser", typeName: constants.Resources, dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "resource_int64_tag1", + want: "`resource_int64_tag1`", }, }