From 4a6ae397b86ae1e151b764ccc37d90c0e2066989 Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Fri, 31 May 2024 22:35:13 +0530 Subject: [PATCH 1/7] [monitoring] Adding influxDB 2.x version support #274 Fixes #274 --- .github/workflows/ci.yml | 2 +- Dockerfile | 17 +- docker-compose.yml | 16 + openwisp_monitoring/db/backends/__init__.py | 12 +- openwisp_monitoring/db/backends/base.py | 42 ++ .../db/backends/influxdb2/client.py | 78 ++++ .../db/backends/influxdb2/queries.py | 277 +++++++++++ .../db/backends/influxdb2/tests.py | 433 ++++++++++++++++++ .../monitoring/tests/__init__.py | 66 ++- .../monitoring/tests/test_configuration.py | 7 +- requirements.txt | 1 + setup.py | 11 +- tests/openwisp2/settings.py | 17 +- 13 files changed, 961 insertions(+), 18 deletions(-) create mode 100644 openwisp_monitoring/db/backends/base.py create mode 100644 openwisp_monitoring/db/backends/influxdb2/client.py create mode 100644 openwisp_monitoring/db/backends/influxdb2/queries.py create mode 100644 openwisp_monitoring/db/backends/influxdb2/tests.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 604df8656..07a3be73b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,7 +62,7 @@ jobs: pip install -U pip wheel setuptools - name: Install npm dependencies - run: sudo npm install -g install jshint stylelint + run: sudo npm install -g jshint stylelint - name: Start InfluxDB container run: docker-compose up -d influxdb diff --git a/Dockerfile b/Dockerfile index 436180c3e..53db803dc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,25 +1,38 @@ FROM python:3.9.19-slim-bullseye +# Install system dependencies RUN apt update && \ apt install --yes zlib1g-dev libjpeg-dev gdal-bin libproj-dev \ libgeos-dev libspatialite-dev libsqlite3-mod-spatialite \ sqlite3 libsqlite3-dev openssl libssl-dev fping && \ rm -rf /var/lib/apt/lists/* /root/.cache/pip/* /tmp/* +# Upgrade pip and install Python dependencies RUN pip install -U pip setuptools wheel +# Copy and install project dependencies COPY requirements-test.txt requirements.txt /opt/openwisp/ RUN pip install -r /opt/openwisp/requirements.txt && \ pip install -r /opt/openwisp/requirements-test.txt && \ rm -rf /var/lib/apt/lists/* /root/.cache/pip/* /tmp/* +# Copy project files and install the project ADD . /opt/openwisp RUN pip install -U /opt/openwisp && \ rm -rf /var/lib/apt/lists/* /root/.cache/pip/* /tmp/* + +# Set working directory WORKDIR /opt/openwisp/tests/ + +# Set environment variables ENV NAME=openwisp-monitoring \ PYTHONBUFFERED=1 \ - INFLUXDB_HOST=influxdb \ + INFLUXDB1_HOST=influxdb \ + INFLUXDB2_HOST=influxdb2 \ REDIS_HOST=redis -CMD ["sh", "docker-entrypoint.sh"] + +# Expose the application port EXPOSE 8000 + +# Command to run the application +CMD ["sh", "docker-entrypoint.sh"] diff --git a/docker-compose.yml b/docker-compose.yml index a84213ddd..f98e72c59 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,6 +10,7 @@ services: - "8000:8000" depends_on: - influxdb + - influxdb2 - redis influxdb: @@ -28,6 +29,20 @@ services: INFLUXDB_USER: openwisp INFLUXDB_USER_PASSWORD: openwisp + influxdb2: + image: influxdb:2.0-alpine + volumes: + - influxdb2-data:/var/lib/influxdb2 + ports: + - "8087:8086" + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: openwisp + DOCKER_INFLUXDB_INIT_PASSWORD: openwisp + DOCKER_INFLUXDB_INIT_ORG: openwisp + DOCKER_INFLUXDB_INIT_BUCKET: openwisp2 + DOCKER_INFLUXDB_INIT_RETENTION: 1w + redis: image: redis:5.0-alpine ports: @@ -36,3 +51,4 @@ services: volumes: influxdb-data: {} + influxdb2-data: {} diff --git a/openwisp_monitoring/db/backends/__init__.py b/openwisp_monitoring/db/backends/__init__.py index 715b1113c..bac780e72 100644 --- a/openwisp_monitoring/db/backends/__init__.py +++ b/openwisp_monitoring/db/backends/__init__.py @@ -30,6 +30,16 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): """ try: assert 'BACKEND' in TIMESERIES_DB, 'BACKEND' + if 'BACKEND' in TIMESERIES_DB and '2' in TIMESERIES_DB['BACKEND']: + # InfluxDB 2.x specific checks + assert 'TOKEN' in TIMESERIES_DB, 'TOKEN' + assert 'ORG' in TIMESERIES_DB, 'ORG' + assert 'BUCKET' in TIMESERIES_DB, 'BUCKET' + else: + # InfluxDB 1.x specific checks + assert 'USER' in TIMESERIES_DB, 'USER' + assert 'PASSWORD' in TIMESERIES_DB, 'PASSWORD' + assert 'NAME' in TIMESERIES_DB, 'NAME' assert 'USER' in TIMESERIES_DB, 'USER' assert 'PASSWORD' in TIMESERIES_DB, 'PASSWORD' assert 'NAME' in TIMESERIES_DB, 'NAME' @@ -48,7 +58,7 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): except ImportError as e: # The database backend wasn't found. Display a helpful error message # listing all built-in database backends. - builtin_backends = ['influxdb'] + builtin_backends = ['influxdb', 'influxdb2'] if backend_name not in [ f'openwisp_monitoring.db.backends.{b}' for b in builtin_backends ]: diff --git a/openwisp_monitoring/db/backends/base.py b/openwisp_monitoring/db/backends/base.py new file mode 100644 index 000000000..da23e5282 --- /dev/null +++ b/openwisp_monitoring/db/backends/base.py @@ -0,0 +1,42 @@ +import logging + +from django.utils.functional import cached_property + +from openwisp_monitoring.utils import retry + +logger = logging.getLogger(__name__) + + +class BaseDatabaseClient: + def __init__(self, db_name=None): + self._db = None + self.db_name = db_name + + @cached_property + def db(self): + raise NotImplementedError("Subclasses must implement `db` method") + + @retry + def create_database(self): + raise NotImplementedError("Subclasses must implement `create_database` method") + + @retry + def drop_database(self): + raise NotImplementedError("Subclasses must implement `drop_database` method") + + @retry + def query(self, query): + raise NotImplementedError("Subclasses must implement `query` method") + + def write(self, name, values, **kwargs): + raise NotImplementedError("Subclasses must implement `write` method") + + def get_list_retention_policies(self, name=None): + raise NotImplementedError( + "Subclasses must implement `get_list_retention_policies` method" + ) + + def create_or_alter_retention_policy(self, name, duration): + raise NotImplementedError( + "Subclasses must implement `create_or_alter_retention_policy` method" + ) diff --git a/openwisp_monitoring/db/backends/influxdb2/client.py b/openwisp_monitoring/db/backends/influxdb2/client.py new file mode 100644 index 000000000..72e142534 --- /dev/null +++ b/openwisp_monitoring/db/backends/influxdb2/client.py @@ -0,0 +1,78 @@ +import logging + +from django.utils.functional import cached_property +from influxdb_client import InfluxDBClient, Point +from influxdb_client.client.exceptions import InfluxDBError +from influxdb_client.client.write_api import SYNCHRONOUS + +from openwisp_monitoring.utils import retry + +from ...exceptions import TimeseriesWriteException +from .. import TIMESERIES_DB +from ..base import BaseDatabaseClient + +logger = logging.getLogger(__name__) + + +class DatabaseClient(BaseDatabaseClient): + backend_name = 'influxdb2' + + def __init__(self, db_name=None): + super().__init__(db_name) + self.client_error = InfluxDBError + + @cached_property + def db(self): + return InfluxDBClient( + url=f"http://{TIMESERIES_DB['HOST']}:{TIMESERIES_DB['PORT']}", + token=TIMESERIES_DB['TOKEN'], + org=TIMESERIES_DB['ORG'], + bucket=self.db_name, + ) + + @retry + def create_database(self): + self.write_api = self.db.write_api(write_options=SYNCHRONOUS) + self.query_api = self.db.query_api() + logger.debug('Initialized APIs for InfluxDB 2.0') + + @retry + def drop_database(self): + pass # Implement as needed for InfluxDB 2.0 + + @retry + def query(self, query): + return self.query_api.query(query) + + def write(self, name, values, **kwargs): + point = Point(name).time(self._get_timestamp(kwargs.get('timestamp'))) + tags = kwargs.get('tags', {}) + for tag, value in tags.items(): + point.tag(tag, value) + for field, value in values.items(): + point.field(field, value) + try: + self.write_api.write(bucket=self.db_name, record=point) + except InfluxDBError as e: + raise TimeseriesWriteException(str(e)) + + @retry + def get_list_retention_policies(self, name=None): + bucket = self.db.buckets_api().find_bucket_by_name(name) + if bucket: + return bucket.retention_rules + return [] + + @retry + def create_or_alter_retention_policy(self, name, duration): + bucket = self.db.buckets_api().find_bucket_by_name(name) + retention_rules = [{"type": "expire", "everySeconds": duration}] + if bucket: + bucket.retention_rules = retention_rules + self.db.buckets_api().update_bucket(bucket=bucket) + else: + self.db.buckets_api().create_bucket( + bucket_name=name, + retention_rules=retention_rules, + org=TIMESERIES_DB["ORG"], + ) diff --git a/openwisp_monitoring/db/backends/influxdb2/queries.py b/openwisp_monitoring/db/backends/influxdb2/queries.py new file mode 100644 index 000000000..216ad1cf8 --- /dev/null +++ b/openwisp_monitoring/db/backends/influxdb2/queries.py @@ -0,0 +1,277 @@ +chart_query = { + 'uptime': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with uptime: r._value * 100 }))' + ) + }, + 'packet_loss': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "loss" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean()' + ) + }, + 'rtt': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "rtt_avg" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> yield(name: "RTT_average") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "rtt_max" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> yield(name: "RTT_max") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "rtt_min" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> yield(name: "RTT_min")' + ) + }, + 'wifi_clients': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}" and ' + 'r["ifname"] == "{ifname}") ' + '|> distinct() ' + '|> count()' + ) + }, + 'general_wifi_clients': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}"' + '{organization_id}{location_id}{floorplan_id}) ' + '|> distinct() ' + '|> count()' + ) + }, + 'traffic': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "tx_bytes" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}" and ' + 'r["ifname"] == "{ifname}") ' + '|> sum() ' + '|> map(fn: (r) => ({ r with upload: r._value / 1000000000 })) ' + '|> yield(name: "upload") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "rx_bytes" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}" and ' + 'r["ifname"] == "{ifname}") ' + '|> sum() ' + '|> map(fn: (r) => ({ r with download: r._value / 1000000000 })) ' + '|> yield(name: "download")' + ) + }, + 'general_traffic': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "tx_bytes"{organization_id}' + '{location_id}{floorplan_id}{ifname}) ' + '|> sum() ' + '|> map(fn: (r) => ({ r with upload: r._value / 1000000000 })) ' + '|> yield(name: "upload") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "rx_bytes"{organization_id}' + '{location_id}{floorplan_id}{ifname}) ' + '|> sum() ' + '|> map(fn: (r) => ({ r with download: r._value / 1000000000 })) ' + '|> yield(name: "download")' + ) + }, + 'memory': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "percent_used" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with memory_usage: r._value }))' + ) + }, + 'cpu': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "cpu_usage" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with CPU_load: r._value }))' + ) + }, + 'disk': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "used_disk" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with disk_usage: r._value }))' + ) + }, + 'signal_strength': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "signal_strength" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with signal_strength: round(r._value) })) ' + '|> yield(name: "signal_strength") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "signal_power" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with signal_power: round(r._value) })) ' + '|> yield(name: "signal_power")' + ) + }, + 'signal_quality': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "signal_quality" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with signal_quality: round(r._value) })) ' + '|> yield(name: "signal_quality") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "snr" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with signal_to_noise_ratio: round(r._value) })) ' + '|> yield(name: "signal_to_noise_ratio")' + ) + }, + 'access_tech': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}{end_date}) ' + '|> filter(fn: (r) => r["_measurement"] == "access_tech" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mode() ' + '|> map(fn: (r) => ({ r with access_tech: r._value }))' + ) + }, + 'bandwidth': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "sent_bps_tcp" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with TCP: r._value / 1000000000 })) ' + '|> yield(name: "TCP") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "sent_bps_udp" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with UDP: r._value / 1000000000 })) ' + '|> yield(name: "UDP")' + ) + }, + 'transfer': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "sent_bytes_tcp" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> sum() ' + '|> map(fn: (r) => ({ r with TCP: r._value / 1000000000 })) ' + '|> yield(name: "TCP") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "sent_bytes_udp" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> sum() ' + '|> map(fn: (r) => ({ r with UDP: r._value / 1000000000 })) ' + '|> yield(name: "UDP")' + ) + }, + 'retransmits': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "retransmits" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with retransmits: r._value }))' + ) + }, + 'jitter': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "jitter" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with jitter: r._value }))' + ) + }, + 'datagram': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "lost_packets" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with lost_datagram: r._value })) ' + '|> yield(name: "lost_datagram") ' + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "total_packets" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with total_datagram: r._value })) ' + '|> yield(name: "total_datagram")' + ) + }, + 'datagram_loss': { + 'influxdb2': ( + 'from(bucket: "{key}") ' + '|> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "lost_percent" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean() ' + '|> map(fn: (r) => ({ r with datagram_loss: r._value }))' + ) + }, +} + +default_chart_query = [ + 'from(bucket: "{key}") |> range(start: {time}{end_date}) ', + ( + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' + ), +] + +device_data_query = ( + 'from(bucket: "{0}") |> range(start: 0) ' + '|> filter(fn: (r) => r["_measurement"] == "{1}" and r["pk"] == "{2}") ' + '|> sort(columns: ["_time"], desc: true) ' + '|> limit(n: 1)' +) diff --git a/openwisp_monitoring/db/backends/influxdb2/tests.py b/openwisp_monitoring/db/backends/influxdb2/tests.py new file mode 100644 index 000000000..eb3cd5124 --- /dev/null +++ b/openwisp_monitoring/db/backends/influxdb2/tests.py @@ -0,0 +1,433 @@ +from datetime import datetime, timedelta +from unittest.mock import patch + +from celery.exceptions import Retry +from django.core.exceptions import ValidationError +from django.test import TestCase, tag +from django.utils.timezone import now +from freezegun import freeze_time +from influxdb_client import InfluxDBClient +from influxdb_client.client.exceptions import InfluxDBError +from pytz import timezone as tz +from swapper import load_model + +from openwisp_monitoring.device.settings import ( + DEFAULT_RETENTION_POLICY, + SHORT_RETENTION_POLICY, +) +from openwisp_monitoring.device.utils import ( + DEFAULT_RP, + SHORT_RP, + manage_default_retention_policy, + manage_short_retention_policy, +) +from openwisp_monitoring.monitoring.tests import TestMonitoringMixin +from openwisp_monitoring.settings import MONITORING_TIMESERIES_RETRY_OPTIONS +from openwisp_utils.tests import capture_stderr + +from ...exceptions import TimeseriesWriteException +from .. import timeseries_db + +Chart = load_model('monitoring', 'Chart') +Notification = load_model('openwisp_notifications', 'Notification') + + +@tag('timeseries_client') +class TestDatabaseClient(TestMonitoringMixin, TestCase): + def test_forbidden_queries(self): + queries = [ + 'DROP DATABASE openwisp2', + 'DROP MEASUREMENT test_metric', + 'CREATE DATABASE test', + 'DELETE MEASUREMENT test_metric', + 'ALTER RETENTION POLICY policy', + 'SELECT * INTO metric2 FROM test_metric', + ] + for q in queries: + try: + timeseries_db.validate_query(q) + except ValidationError as e: + self.assertIn('configuration', e.message_dict) + else: + self.fail('ValidationError not raised') + + def test_get_custom_query(self): + c = self._create_chart(test_data=None) + custom_q = c._default_query.replace('{field_name}', '{fields}') + q = c.get_query(query=custom_q, fields=['SUM(*)']) + self.assertIn('|> sum()', q) + + def test_is_aggregate_bug(self): + m = self._create_object_metric(name='summary_avg') + c = Chart(metric=m, configuration='dummy') + self.assertFalse(timeseries_db._is_aggregate(c.query)) + + def test_is_aggregate_fields_function(self): + m = self._create_object_metric(name='is_aggregate_func') + c = Chart(metric=m, configuration='uptime') + self.assertTrue(timeseries_db._is_aggregate(c.query)) + + def test_get_query_fields_function(self): + c = self._create_chart(test_data=None, configuration='histogram') + q = c.get_query(fields=['ssh', 'http2', 'apple-music']) + expected = ( + '|> sum(column: "ssh") ' + '|> sum(column: "http2") ' + '|> sum(column: "apple-music")' + ) + self.assertIn(expected, q) + + def test_default_query(self): + c = self._create_chart(test_data=False) + q = ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' + ) + self.assertEqual(c.query, q) + + def test_write(self): + timeseries_db.write('test_write', dict(value=2), database=self.TEST_DB) + result = timeseries_db.query( + f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' + f'filter(fn: (r) => r["_measurement"] == "test_write")' + ) + measurement = list(result)[0] + self.assertEqual(measurement['_value'], 2) + + def test_general_write(self): + m = self._create_general_metric(name='Sync test') + m.write(1) + result = timeseries_db.query( + f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' + f'filter(fn: (r) => r["_measurement"] == "sync_test")' + ) + measurement = list(result)[0] + self.assertEqual(measurement['_value'], 1) + + def test_object_write(self): + om = self._create_object_metric() + om.write(3) + content_type = '.'.join(om.content_type.natural_key()) + result = timeseries_db.query( + f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' + f'filter(fn: (r) => r["_measurement"] == "test_metric" and r["object_id"] == "{om.object_id}" ' + f'and r["content_type"] == "{content_type}")' + ) + measurement = list(result)[0] + self.assertEqual(measurement['_value'], 3) + + def test_general_same_key_different_fields(self): + down = self._create_general_metric( + name='traffic (download)', key='traffic', field_name='download' + ) + down.write(200) + up = self._create_general_metric( + name='traffic (upload)', key='traffic', field_name='upload' + ) + up.write(100) + result = timeseries_db.query( + f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' + f'filter(fn: (r) => r["_measurement"] == "traffic")' + ) + measurements = list(result) + download_measurement = next( + m for m in measurements if m['_field'] == 'download' + ) + upload_measurement = next(m for m in measurements if m['_field'] == 'upload') + self.assertEqual(download_measurement['_value'], 200) + self.assertEqual(upload_measurement['_value'], 100) + + def test_object_same_key_different_fields(self): + user = self._create_user() + user_down = self._create_object_metric( + name='traffic (download)', + key='traffic', + field_name='download', + content_object=user, + ) + user_down.write(200) + user_up = self._create_object_metric( + name='traffic (upload)', + key='traffic', + field_name='upload', + content_object=user, + ) + user_up.write(100) + content_type = '.'.join(user_down.content_type.natural_key()) + result = timeseries_db.query( + f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' + f'filter(fn: (r) => r["_measurement"] == "traffic" and ' + f'r["object_id"] == "{user_down.object_id}" and r["content_type"] == "{content_type}")' + ) + measurements = list(result) + download_measurement = next( + m for m in measurements if m['_field'] == 'download' + ) + upload_measurement = next(m for m in measurements if m['_field'] == 'upload') + self.assertEqual(download_measurement['_value'], 200) + self.assertEqual(upload_measurement['_value'], 100) + + def test_delete_metric_data(self): + m = self._create_general_metric(name='test_metric') + m.write(100) + self.assertEqual(m.read()[0]['value'], 100) + timeseries_db.delete_metric_data(key=m.key) + self.assertEqual(m.read(), []) + om = self._create_object_metric(name='dummy') + om.write(50) + m.write(100) + self.assertEqual(m.read()[0]['value'], 100) + self.assertEqual(om.read()[0]['value'], 50) + timeseries_db.delete_metric_data() + self.assertEqual(m.read(), []) + self.assertEqual(om.read(), []) + + def test_get_query_1d(self): + c = self._create_chart(test_data=None, configuration='uptime') + q = c.get_query(time='1d') + last24 = now() - timedelta(days=1) + self.assertIn(str(last24)[0:14], q) + self.assertIn('|> aggregateWindow(every: 10m, fn: mean)', q) + + def test_get_query_30d(self): + c = self._create_chart(test_data=None, configuration='uptime') + q = c.get_query(time='30d') + last30d = now() - timedelta(days=30) + self.assertIn(str(last30d)[0:10], q) + self.assertIn('|> aggregateWindow(every: 24h, fn: mean)', q) + + def test_group_by_tags(self): + self.assertEqual( + timeseries_db._group_by( + 'from(bucket: "measurement") |> range(start: -1d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'aggregateWindow(every: 1d, fn: count)', + time='30d', + chart_type='stackedbar+lines', + group_map={'30d': '30d'}, + strip=False, + ), + 'from(bucket: "measurement") |> range(start: -30d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'aggregateWindow(every: 30d, fn: count)', + ) + self.assertEqual( + timeseries_db._group_by( + 'from(bucket: "measurement") |> range(start: -1d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'aggregateWindow(every: 1d, fn: count)', + time='30d', + chart_type='stackedbar+lines', + group_map={'30d': '30d'}, + strip=True, + ), + 'from(bucket: "measurement") |> range(start: -30d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item")', + ) + self.assertEqual( + timeseries_db._group_by( + 'from(bucket: "measurement") |> range(start: -1d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'aggregateWindow(every: 1d, fn: count) |> group(columns: ["tag"])', + time='30d', + chart_type='stackedbar+lines', + group_map={'30d': '30d'}, + strip=False, + ), + 'from(bucket: "measurement") |> range(start: -30d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'aggregateWindow(every: 30d, fn: count) |> group(columns: ["tag"])', + ) + self.assertEqual( + timeseries_db._group_by( + 'from(bucket: "measurement") |> range(start: -1d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'aggregateWindow(every: 1d, fn: count) |> group(columns: ["tag"])', + time='30d', + chart_type='stackedbar+lines', + group_map={'30d': '30d'}, + strip=True, + ), + 'from(bucket: "measurement") |> range(start: -30d) |> ' + 'filter(fn: (r) => r["_measurement"] == "item") |> ' + 'group(columns: ["tag"])', + ) + + def test_retention_policy(self): + manage_short_retention_policy() + manage_default_retention_policy() + rp = timeseries_db.get_list_retention_policies() + self.assertEqual(len(rp), 2) + self.assertEqual(rp[0].name, DEFAULT_RP) + self.assertEqual(rp[0].every_seconds, DEFAULT_RETENTION_POLICY) + self.assertEqual(rp[1].name, SHORT_RP) + self.assertEqual(rp[1].every_seconds, SHORT_RETENTION_POLICY) + + def test_query_set(self): + c = self._create_chart(configuration='histogram') + expected = ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> aggregateWindow(every: {time}, fn: sum) ' + ) + self.assertEqual(c.query, expected) + self.assertEqual( + ''.join(timeseries_db.queries.default_chart_query[0:2]), c._default_query + ) + c.metric.object_id = None + self.assertEqual(timeseries_db.queries.default_chart_query[0], c._default_query) + + def test_read_order(self): + m = self._create_general_metric(name='dummy') + m.write(30) + m.write(40, time=now() - timedelta(days=2)) + with self.subTest('Test ascending read order'): + metric_data = m.read(limit=2, order='time') + self.assertEqual(metric_data[0]['value'], 40) + self.assertEqual(metric_data[1]['value'], 30) + with self.subTest('Test descending read order'): + metric_data = m.read(limit=2, order='-time') + self.assertEqual(metric_data[0]['value'], 30) + self.assertEqual(metric_data[1]['value'], 40) + with self.subTest('Test invalid read order'): + with self.assertRaises(timeseries_db.client_error) as e: + metric_data = m.read(limit=2, order='invalid') + self.assertIn('Invalid order "invalid" passed.', str(e)) + + def test_read_with_rp(self): + self._create_admin() + manage_short_retention_policy() + with self.subTest( + 'Test metric write on short retention_policy immediate alert' + ): + m = self._create_general_metric(name='dummy') + self._create_alert_settings( + metric=m, custom_operator='<', custom_threshold=1, custom_tolerance=0 + ) + m.write(0, retention_policy=SHORT_RP) + self.assertEqual(m.read(retention_policy=SHORT_RP)[0][m.field_name], 0) + m.refresh_from_db() + self.assertEqual(m.is_healthy, False) + self.assertEqual(m.is_healthy_tolerant, False) + self.assertEqual(Notification.objects.count(), 1) + with self.subTest( + 'Test metric write on short retention_policy with deferred alert' + ): + m2 = self._create_general_metric(name='dummy2') + self._create_alert_settings( + metric=m2, custom_operator='<', custom_threshold=1, custom_tolerance=1 + ) + m.write(0, retention_policy=SHORT_RP, time=now() - timedelta(minutes=2)) + self.assertEqual(m.read(retention_policy=SHORT_RP)[0][m.field_name], 0) + m.refresh_from_db() + self.assertEqual(m.is_healthy, False) + self.assertEqual(m.is_healthy_tolerant, False) + self.assertEqual(Notification.objects.count(), 1) + + def test_metric_write_microseconds_precision(self): + m = self._create_object_metric( + name='wlan0', key='wlan0', configuration='clients' + ) + m.write('00:14:5c:00:00:00', time=datetime(2020, 7, 31, 22, 5, 47, 235142)) + m.write('00:23:4a:00:00:00', time=datetime(2020, 7, 31, 22, 5, 47, 235152)) + self.assertEqual(len(m.read()), 2) + + @patch.object( + InfluxDBClient, 'write_api', side_effect=InfluxDBError('Server error') + ) + @capture_stderr() + def test_write_retry(self, mock_write): + with self.assertRaises(TimeseriesWriteException): + timeseries_db.write('test_write', {'value': 1}) + m = self._create_general_metric(name='Test metric') + with self.assertRaises(Retry): + m.write(1) + + @patch.object( + InfluxDBClient, + 'write_api', + side_effect=InfluxDBError( + content='{"error":"partial write: points beyond retention policy dropped=1"}', + code=400, + ), + ) + @capture_stderr() + def test_write_skip_retry_for_retention_policy(self, mock_write): + try: + timeseries_db.write('test_write', {'value': 1}) + except TimeseriesWriteException: + self.fail( + 'TimeseriesWriteException should not be raised when data ' + 'points crosses retention policy' + ) + m = self._create_general_metric(name='Test metric') + try: + m.write(1) + except Retry: + self.fail( + 'Writing metric should not be retried when data ' + 'points crosses retention policy' + ) + + @patch.object( + InfluxDBClient, 'write_api', side_effect=InfluxDBError('Server error') + ) + @capture_stderr() + def test_timeseries_write_params(self, mock_write): + with freeze_time('Jan 14th, 2020') as frozen_datetime: + m = self._create_general_metric(name='Test metric') + with self.assertRaises(Retry) as e: + m.write(1) + frozen_datetime.tick(delta=timedelta(minutes=10)) + self.assertEqual( + now(), datetime(2020, 1, 14, tzinfo=tz('UTC')) + timedelta(minutes=10) + ) + task_signature = e.exception.sig + with patch.object(timeseries_db, 'write') as mock_write: + self._retry_task(task_signature) + mock_write.assert_called_with( + 'test_metric', + {'value': 1}, + database=None, + retention_policy=None, + tags={}, + timestamp=datetime(2020, 1, 14, tzinfo=tz('UTC')).isoformat(), + current=False, + ) + + def _retry_task(self, task_signature): + task_kwargs = task_signature.kwargs + task_signature.type.run(**task_kwargs) + + @patch.object( + InfluxDBClient, 'query_api', side_effect=InfluxDBError('Server error') + ) + def test_retry_mechanism(self, mock_query): + max_retries = MONITORING_TIMESERIES_RETRY_OPTIONS.get('max_retries') + with patch('logging.Logger.info') as mocked_logger: + try: + self.test_get_query_fields_function() + except Exception: + pass + self.assertEqual(mocked_logger.call_count, max_retries) + mocked_logger.assert_called_with( + 'Error while executing method "query":\nServer error\n' + f'Attempt {max_retries} out of {max_retries}.\n' + ) + + +class TestDatabaseClientUdp(TestMonitoringMixin, TestCase): + def test_exceed_udp_packet_limit(self): + # InfluxDB 2.x does not use UDP for writing data, but this test is kept + # for backward compatibility reference + timeseries_db.write( + 'test_udp_write', dict(value='O' * 66000), database=self.TEST_DB + ) + result = timeseries_db.query( + f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' + f'filter(fn: (r) => r["_measurement"] == "test_udp_write")' + ) + measurement = list(result) + self.assertEqual(len(measurement), 1) diff --git a/openwisp_monitoring/monitoring/tests/__init__.py b/openwisp_monitoring/monitoring/tests/__init__.py index 8f50774e1..eb3e3243c 100644 --- a/openwisp_monitoring/monitoring/tests/__init__.py +++ b/openwisp_monitoring/monitoring/tests/__init__.py @@ -91,7 +91,13 @@ "SELECT {fields|SUM|/ 1} FROM {key} " "WHERE time >= '{time}' AND content_type = " "'{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> sum()' + ), }, }, 'dummy': { @@ -108,7 +114,7 @@ 'description': 'Bugged chart for testing purposes.', 'unit': 'bugs', 'order': 999, - 'query': {'influxdb': "BAD"}, + 'query': {'influxdb': "BAD", 'influxdb2': "BAD"}, }, 'default': { 'type': 'line', @@ -120,7 +126,12 @@ 'influxdb': ( "SELECT {field_name} FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' + ), }, }, 'multiple_test': { @@ -133,26 +144,43 @@ 'influxdb': ( "SELECT {field_name}, value2 FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" or ' + 'r["_measurement"] == "value2" and ' + 'r["content_type"] == "{content_type}" and ' + 'r["object_id"] == "{object_id}")' + ), }, }, 'group_by_tag': { 'type': 'stackedbars', 'title': 'Group by tag', - 'description': 'Query is groupped by tag along with time', + 'description': 'Query is grouped by tag along with time', 'unit': 'n.', 'order': 999, 'query': { 'influxdb': ( "SELECT CUMULATIVE_SUM(SUM({field_name})) FROM {key} WHERE time >= '{time}'" " GROUP BY time(1d), metric_num" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}") ' + '|> group(columns: ["metric_num"]) |> sum() |> cumulativeSum() |> window(every: 1d)' + ), }, 'summary_query': { 'influxdb': ( "SELECT SUM({field_name}) FROM {key} WHERE time >= '{time}'" " GROUP BY time(30d), metric_num" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}") ' + '|> group(columns: ["metric_num"]) |> sum() |> window(every: 30d)' + ), }, }, 'mean_test': { @@ -165,7 +193,13 @@ 'influxdb': ( "SELECT MEAN({field_name}) AS {field_name} FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean()' + ), }, }, 'sum_test': { @@ -178,7 +212,13 @@ 'influxdb': ( "SELECT SUM({field_name}) AS {field_name} FROM {key} WHERE time >= '{time}' AND " "content_type = '{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> sum()' + ), }, }, 'top_fields_mean': { @@ -192,7 +232,13 @@ "SELECT {fields|MEAN} FROM {key} " "WHERE time >= '{time}' AND content_type = " "'{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' + '|> mean()' + ), }, }, } diff --git a/openwisp_monitoring/monitoring/tests/test_configuration.py b/openwisp_monitoring/monitoring/tests/test_configuration.py index 4ee11b669..bd276870c 100644 --- a/openwisp_monitoring/monitoring/tests/test_configuration.py +++ b/openwisp_monitoring/monitoring/tests/test_configuration.py @@ -34,7 +34,12 @@ def _get_new_metric(self): "SELECT {fields|SUM|/ 1} FROM {key} " "WHERE time >= '{time}' AND content_type = " "'{content_type}' AND object_id = '{object_id}'" - ) + ), + 'influxdb2': ( + 'from(bucket: "{key}") |> range(start: {time}) ' + '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' + 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' + ), }, } }, diff --git a/requirements.txt b/requirements.txt index 90feaac61..cc87d6f08 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ django-nested-admin~=4.0.2 netaddr~=0.8.0 python-dateutil>=2.7.0,<3.0.0 openwisp-utils[rest] @ https://github.com/openwisp/openwisp-utils/tarball/master +influxdb-client~=1.21.0 diff --git a/setup.py b/setup.py index 43ca4bb97..3b935de4b 100755 --- a/setup.py +++ b/setup.py @@ -55,6 +55,10 @@ def get_install_requires(): include_package_data=True, zip_safe=False, install_requires=get_install_requires(), + extras_require={ + 'influxdb': ['influxdb>=5.2,<5.3'], + 'influxdb2': ['influxdb-client>=1.17.0,<2.0.0'], + }, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', @@ -64,7 +68,10 @@ def get_install_requires(): 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System :: OS Independent', 'Framework :: Django', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', ], ) diff --git a/tests/openwisp2/settings.py b/tests/openwisp2/settings.py index c7772d5cd..3d3dbf309 100644 --- a/tests/openwisp2/settings.py +++ b/tests/openwisp2/settings.py @@ -21,7 +21,7 @@ } } -TIMESERIES_DATABASE = { +INFLUXDB_1x_DATABASE = { 'BACKEND': 'openwisp_monitoring.db.backends.influxdb', 'USER': 'openwisp', 'PASSWORD': 'openwisp', @@ -31,6 +31,21 @@ # UDP writes are disabled by default 'OPTIONS': {'udp_writes': False, 'udp_port': 8089}, } + +INFLUXDB_2x_DATABASE = { + 'BACKEND': 'openwisp_monitoring.db.backends.influxdb2', + 'TOKEN': 'your-influxdb-2.0-token', + 'ORG': 'your-org', + 'BUCKET': 'your-bucket', + 'HOST': os.getenv('INFLUXDB2_HOST', 'localhost'), + 'PORT': '8087', +} + +if os.environ.get('USE_INFLUXDB2', False): + TIMESERIES_DATABASE = INFLUXDB_2x_DATABASE +else: + TIMESERIES_DATABASE = INFLUXDB_1x_DATABASE + if TESTING: if os.environ.get('TIMESERIES_UDP', False): TIMESERIES_DATABASE['OPTIONS'] = {'udp_writes': True, 'udp_port': 8091} From d02db9c2671462410274ae7252b55676996c3512 Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Sat, 1 Jun 2024 13:17:52 +0530 Subject: [PATCH 2/7] [fix] Workflow and test updated #274 Fixes #274 --- docker-compose.yml | 20 ++++--------------- .../db/backends/influxdb2/tests.py | 2 +- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index f98e72c59..4df296e03 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,9 +8,12 @@ services: dockerfile: Dockerfile ports: - "8000:8000" + - "8089:8089/udp" + - "8090:8090/udp" + - "8091:8091/udp" + - "8092:8092/udp" depends_on: - influxdb - - influxdb2 - redis influxdb: @@ -29,20 +32,6 @@ services: INFLUXDB_USER: openwisp INFLUXDB_USER_PASSWORD: openwisp - influxdb2: - image: influxdb:2.0-alpine - volumes: - - influxdb2-data:/var/lib/influxdb2 - ports: - - "8087:8086" - environment: - DOCKER_INFLUXDB_INIT_MODE: setup - DOCKER_INFLUXDB_INIT_USERNAME: openwisp - DOCKER_INFLUXDB_INIT_PASSWORD: openwisp - DOCKER_INFLUXDB_INIT_ORG: openwisp - DOCKER_INFLUXDB_INIT_BUCKET: openwisp2 - DOCKER_INFLUXDB_INIT_RETENTION: 1w - redis: image: redis:5.0-alpine ports: @@ -51,4 +40,3 @@ services: volumes: influxdb-data: {} - influxdb2-data: {} diff --git a/openwisp_monitoring/db/backends/influxdb2/tests.py b/openwisp_monitoring/db/backends/influxdb2/tests.py index eb3cd5124..9bf8aca98 100644 --- a/openwisp_monitoring/db/backends/influxdb2/tests.py +++ b/openwisp_monitoring/db/backends/influxdb2/tests.py @@ -420,7 +420,7 @@ def test_retry_mechanism(self, mock_query): class TestDatabaseClientUdp(TestMonitoringMixin, TestCase): def test_exceed_udp_packet_limit(self): - # InfluxDB 2.x does not use UDP for writing data, but this test is kept + # InfluxDB 2.x does not use UDP for writing data, but this is kept # for backward compatibility reference timeseries_db.write( 'test_udp_write', dict(value='O' * 66000), database=self.TEST_DB From 0aa3c6d010370dc33f7140830c66f7b89fa07659 Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Mon, 3 Jun 2024 00:26:54 +0530 Subject: [PATCH 3/7] [monitoring] Updated files and corrected failing qa checks #274 Fixes #274 --- Dockerfile | 12 +- README.rst | 163 +++++-- docker-compose.yml | 21 +- openwisp_monitoring/db/backends/__init__.py | 16 +- openwisp_monitoring/db/backends/base.py | 42 -- .../db/backends/influxdb2/__init__.py | 0 .../db/backends/influxdb2/client.py | 257 +++++++--- .../db/backends/influxdb2/queries.py | 459 +++++++++--------- .../db/backends/influxdb2/tests.py | 433 ----------------- openwisp_monitoring/db/exceptions.py | 4 + tests/__init__.py | 0 tests/docker-entrypoint.sh | 0 tests/openwisp2/settings.py | 13 +- 13 files changed, 579 insertions(+), 841 deletions(-) delete mode 100644 openwisp_monitoring/db/backends/base.py create mode 100644 openwisp_monitoring/db/backends/influxdb2/__init__.py create mode 100644 tests/__init__.py mode change 100644 => 100755 tests/docker-entrypoint.sh diff --git a/Dockerfile b/Dockerfile index 53db803dc..8cf5a1a1e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,23 +16,15 @@ RUN pip install -r /opt/openwisp/requirements.txt && \ pip install -r /opt/openwisp/requirements-test.txt && \ rm -rf /var/lib/apt/lists/* /root/.cache/pip/* /tmp/* -# Copy project files and install the project ADD . /opt/openwisp RUN pip install -U /opt/openwisp && \ rm -rf /var/lib/apt/lists/* /root/.cache/pip/* /tmp/* - -# Set working directory WORKDIR /opt/openwisp/tests/ - # Set environment variables ENV NAME=openwisp-monitoring \ PYTHONBUFFERED=1 \ - INFLUXDB1_HOST=influxdb \ - INFLUXDB2_HOST=influxdb2 \ + INFLUXDB_HOST=influxdb \ REDIS_HOST=redis - -# Expose the application port +CMD ["sh", "docker-entrypoint.sh"] EXPOSE 8000 - # Command to run the application -CMD ["sh", "docker-entrypoint.sh"] diff --git a/README.rst b/README.rst index 223c4b980..9e332053c 100644 --- a/README.rst +++ b/README.rst @@ -326,20 +326,23 @@ Follow the setup instructions of `openwisp-controller # Make sure you change them in production # You can select one of the backends located in openwisp_monitoring.db.backends - TIMESERIES_DATABASE = { + INFLUXDB_1x_DATABASE = { 'BACKEND': 'openwisp_monitoring.db.backends.influxdb', 'USER': 'openwisp', 'PASSWORD': 'openwisp', 'NAME': 'openwisp2', - 'HOST': 'localhost', + 'HOST': 'influxdb', 'PORT': '8086', - 'OPTIONS': { - # Specify additional options to be used while initializing - # database connection. - # Note: These options may differ based on the backend used. - 'udp_writes': True, - 'udp_port': 8089, - } + 'OPTIONS': {'udp_writes': False, 'udp_port': 8089}, + } + + INFLUXDB_2x_DATABASE = { + 'BACKEND': 'openwisp_monitoring.db.backends.influxdb2', + 'TOKEN': 'my-super-secret-auth-token', + 'ORG': 'openwisp', + 'BUCKET': 'openwisp2', + 'HOST': 'influxdb2', + 'PORT': '9999', } ``urls.py``: @@ -1413,56 +1416,109 @@ Settings | **default**: | see below | +--------------+-----------+ +Timeseries Database Configuration +--------------------------------- + +The ``TIMESERIES_DATABASE`` setting allows configuring the timeseries +database backend used by OpenWISP Monitoring. The configuration supports +both InfluxDB 1.x and 2.x versions. + +Configuration for InfluxDB 1.x +------------------------------ + .. code-block:: python - TIMESERIES_DATABASE = { + INFLUXDB_1x_DATABASE = { 'BACKEND': 'openwisp_monitoring.db.backends.influxdb', 'USER': 'openwisp', 'PASSWORD': 'openwisp', 'NAME': 'openwisp2', - 'HOST': 'localhost', + 'HOST': 'influxdb', 'PORT': '8086', - 'OPTIONS': { - 'udp_writes': False, - 'udp_port': 8089, - } + 'OPTIONS': {'udp_writes': False, 'udp_port': 8089}, } -The following table describes all keys available in ``TIMESERIES_DATABASE`` -setting: - -+---------------+--------------------------------------------------------------------------------------+ -| **Key** | ``Description`` | -+---------------+--------------------------------------------------------------------------------------+ -| ``BACKEND`` | The timeseries database backend to use. You can select one of the backends | -| | located in ``openwisp_monitoring.db.backends`` | -+---------------+--------------------------------------------------------------------------------------+ -| ``USER`` | User for logging into the timeseries database | -+---------------+--------------------------------------------------------------------------------------+ -| ``PASSWORD`` | Password of the timeseries database user | -+---------------+--------------------------------------------------------------------------------------+ -| ``NAME`` | Name of the timeseries database | -+---------------+--------------------------------------------------------------------------------------+ -| ``HOST`` | IP address/hostname of machine where the timeseries database is running | -+---------------+--------------------------------------------------------------------------------------+ -| ``PORT`` | Port for connecting to the timeseries database | -+---------------+--------------------------------------------------------------------------------------+ -| ``OPTIONS`` | These settings depends on the timeseries backend: | -| | | -| | +-----------------+----------------------------------------------------------------+ | -| | | ``udp_writes`` | Whether to use UDP for writing data to the timeseries database | | -| | +-----------------+----------------------------------------------------------------+ | -| | | ``udp_port`` | Timeseries database port for writing data using UDP | | -| | +-----------------+----------------------------------------------------------------+ | -+---------------+--------------------------------------------------------------------------------------+ - -**Note:** UDP packets can have a maximum size of 64KB. When using UDP for writing timeseries -data, if the size of the data exceeds 64KB, TCP mode will be used instead. - -**Note:** If you want to use the ``openwisp_monitoring.db.backends.influxdb`` backend -with UDP writes enabled, then you need to enable two different ports for UDP -(each for different retention policy) in your InfluxDB configuration. The UDP configuration -section of your InfluxDB should look similar to the following: +Configuration for InfluxDB 2.x +------------------------------ + +.. code-block:: python + + INFLUXDB_2x_DATABASE = { + 'BACKEND': 'openwisp_monitoring.db.backends.influxdb2', + 'TOKEN': 'my-super-secret-auth-token', + 'ORG': 'openwisp', + 'BUCKET': 'openwisp2', + 'HOST': 'influxdb2', + 'PORT': '9999', + } + +Dynamic Configuration Based on Environment +------------------------------------------ + +You can dynamically switch between InfluxDB 1.x and 2.x configurations +using environment variables: + +.. code-block:: python + + import os + + if os.environ.get('USE_INFLUXDB2', 'False') == 'True': + TIMESERIES_DATABASE = INFLUXDB_2x_DATABASE + else: + TIMESERIES_DATABASE = INFLUXDB_1x_DATABASE + + if TESTING: + if os.environ.get('TIMESERIES_UDP', False): + TIMESERIES_DATABASE['OPTIONS'] = {'udp_writes': True, 'udp_port': 8091} + +Explanation of Settings +----------------------- + ++---------------+---------------------------------------------------------------+ +| **Key** | **Description** | ++-------------------------------------------------------------------------------+ +| ``BACKEND`` | The timeseries database backend to use. You can select one | +| | of the backends located in ``openwisp_monitoring.db.backends``| ++---------------+---------------------------------------------------------------+ +| ``USER`` | User for logging into the timeseries database (only for | +| | InfluxDB 1.x) | ++---------------+---------------------------------------------------------------+ +| ``PASSWORD`` | Password of the timeseries database user (only for InfluxDB | +| | 1.x) | ++---------------+---------------------------------------------------------------+ +| ``NAME`` | Name of the timeseries database (only for InfluxDB 1.x) | ++---------------+---------------------------------------------------------------+ +| ``TOKEN`` | Authentication token for InfluxDB 2.x | ++---------------+---------------------------------------------------------------+ +| ``ORG`` | Organization name for InfluxDB 2.x | ++---------------+---------------------------------------------------------------+ +| ``BUCKET`` | Bucket name for InfluxDB 2.x | ++---------------+---------------------------------------------------------------+ +| ``HOST`` | IP address/hostname of machine where the timeseries | +| | database is running | ++---------------+---------------------------------------------------------------+ +| ``PORT`` | Port for connecting to the timeseries database | ++---------------+---------------------------------------------------------------+ +| ``OPTIONS`` | Additional options for the timeseries backend | +| | | +| | +-----------------+-----------------------------------------+ | +| | | ``udp_writes`` | Whether to use UDP for writing data | | +| | | | to the timeseries database | | +| | +-----------------+-----------------------------------------+ | +| | | ``udp_port`` | Timeseries database port for writing | | +| | | | data using UDP | | +| | +-----------------+-----------------------------------------+ | ++---------------+---------------------------------------------------------------+ + +UDP Configuration for InfluxDB 1.x +---------------------------------- + +If you want to use the ``openwisp_monitoring.db.backends.influxdb`` backend +with UDP writes enabled, you need to enable two different ports for UDP +(each for a different retention policy) in your InfluxDB configuration. + +Here is an example of the UDP configuration section in your InfluxDB +configuration file: .. code-block:: text @@ -1479,6 +1535,13 @@ section of your InfluxDB should look similar to the following: database = "openwisp2" retention-policy = 'short' +**Note:** UDP packets can have a maximum size of 64KB. When using UDP for +writing timeseries data, if the size of the data exceeds 64KB, TCP mode +will be used instead. + +Deploying with Ansible +---------------------- + If you are using `ansible-openwisp2 `_ for deploying OpenWISP, you can set the ``influxdb_udp_mode`` ansible variable to ``true`` in your playbook, this will make the ansible role automatically configure the InfluxDB UDP listeners. diff --git a/docker-compose.yml b/docker-compose.yml index 4df296e03..3a8a3f39f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,10 +8,6 @@ services: dockerfile: Dockerfile ports: - "8000:8000" - - "8089:8089/udp" - - "8090:8090/udp" - - "8091:8091/udp" - - "8092:8092/udp" depends_on: - influxdb - redis @@ -32,6 +28,22 @@ services: INFLUXDB_USER: openwisp INFLUXDB_USER_PASSWORD: openwisp + influxdb2: + image: influxdb:2.0 + container_name: influxdb2 + ports: + # Map the 9086 port on host machine to 8086 in container + - "9086:8086" + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: myuser + DOCKER_INFLUXDB_INIT_PASSWORD: mypassword + DOCKER_INFLUXDB_INIT_ORG: myorg + DOCKER_INFLUXDB_INIT_BUCKET: mybucket + DOCKER_INFLUXDB_INIT_RETENTION: 1w + volumes: + - influxdb-storage:/var/lib/influxdb2 + redis: image: redis:5.0-alpine ports: @@ -40,3 +52,4 @@ services: volumes: influxdb-data: {} + influxdb-storage: diff --git a/openwisp_monitoring/db/backends/__init__.py b/openwisp_monitoring/db/backends/__init__.py index bac780e72..e2399bf6f 100644 --- a/openwisp_monitoring/db/backends/__init__.py +++ b/openwisp_monitoring/db/backends/__init__.py @@ -40,9 +40,6 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): assert 'USER' in TIMESERIES_DB, 'USER' assert 'PASSWORD' in TIMESERIES_DB, 'PASSWORD' assert 'NAME' in TIMESERIES_DB, 'NAME' - assert 'USER' in TIMESERIES_DB, 'USER' - assert 'PASSWORD' in TIMESERIES_DB, 'PASSWORD' - assert 'NAME' in TIMESERIES_DB, 'NAME' assert 'HOST' in TIMESERIES_DB, 'HOST' assert 'PORT' in TIMESERIES_DB, 'PORT' if module: @@ -67,7 +64,18 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): "Try using 'openwisp_monitoring.db.backends.XXX', where XXX is one of:\n" f"{builtin_backends}" ) from e + else: + raise e + +if '2' in TIMESERIES_DB['BACKEND']: + timeseries_db = load_backend_module(module='client').DatabaseClient( + bucket=TIMESERIES_DB['BUCKET'], + org=TIMESERIES_DB['ORG'], + token=TIMESERIES_DB['TOKEN'], + url=f"http://{TIMESERIES_DB['HOST']}:{TIMESERIES_DB['PORT']}", + ) +else: + timeseries_db = load_backend_module(module='client').DatabaseClient() -timeseries_db = load_backend_module(module='client').DatabaseClient() timeseries_db.queries = load_backend_module(module='queries') diff --git a/openwisp_monitoring/db/backends/base.py b/openwisp_monitoring/db/backends/base.py deleted file mode 100644 index da23e5282..000000000 --- a/openwisp_monitoring/db/backends/base.py +++ /dev/null @@ -1,42 +0,0 @@ -import logging - -from django.utils.functional import cached_property - -from openwisp_monitoring.utils import retry - -logger = logging.getLogger(__name__) - - -class BaseDatabaseClient: - def __init__(self, db_name=None): - self._db = None - self.db_name = db_name - - @cached_property - def db(self): - raise NotImplementedError("Subclasses must implement `db` method") - - @retry - def create_database(self): - raise NotImplementedError("Subclasses must implement `create_database` method") - - @retry - def drop_database(self): - raise NotImplementedError("Subclasses must implement `drop_database` method") - - @retry - def query(self, query): - raise NotImplementedError("Subclasses must implement `query` method") - - def write(self, name, values, **kwargs): - raise NotImplementedError("Subclasses must implement `write` method") - - def get_list_retention_policies(self, name=None): - raise NotImplementedError( - "Subclasses must implement `get_list_retention_policies` method" - ) - - def create_or_alter_retention_policy(self, name, duration): - raise NotImplementedError( - "Subclasses must implement `create_or_alter_retention_policy` method" - ) diff --git a/openwisp_monitoring/db/backends/influxdb2/__init__.py b/openwisp_monitoring/db/backends/influxdb2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openwisp_monitoring/db/backends/influxdb2/client.py b/openwisp_monitoring/db/backends/influxdb2/client.py index 72e142534..ab21775f7 100644 --- a/openwisp_monitoring/db/backends/influxdb2/client.py +++ b/openwisp_monitoring/db/backends/influxdb2/client.py @@ -1,78 +1,221 @@ import logging +import re +from datetime import datetime -from django.utils.functional import cached_property +from django.conf import settings +from django.core.exceptions import ValidationError +from django.utils.translation import gettext_lazy as _ from influxdb_client import InfluxDBClient, Point -from influxdb_client.client.exceptions import InfluxDBError from influxdb_client.client.write_api import SYNCHRONOUS -from openwisp_monitoring.utils import retry - from ...exceptions import TimeseriesWriteException -from .. import TIMESERIES_DB -from ..base import BaseDatabaseClient logger = logging.getLogger(__name__) -class DatabaseClient(BaseDatabaseClient): - backend_name = 'influxdb2' - - def __init__(self, db_name=None): - super().__init__(db_name) - self.client_error = InfluxDBError +class DatabaseClient(object): + _AGGREGATE = [ + 'COUNT', + 'DISTINCT', + 'INTEGRAL', + 'MEAN', + 'MEDIAN', + 'MODE', + 'SPREAD', + 'STDDEV', + 'SUM', + 'BOTTOM', + 'FIRST', + 'LAST', + 'MAX', + 'MIN', + 'PERCENTILE', + 'SAMPLE', + 'TOP', + 'CEILING', + 'CUMULATIVE_SUM', + 'DERIVATIVE', + 'DIFFERENCE', + 'ELAPSED', + 'FLOOR', + 'HISTOGRAM', + 'MOVING_AVERAGE', + 'NON_NEGATIVE_DERIVATIVE', + 'HOLT_WINTERS', + ] + _FORBIDDEN = ['drop', 'create', 'delete', 'alter', 'into'] + backend_name = 'influxdb' - @cached_property - def db(self): - return InfluxDBClient( - url=f"http://{TIMESERIES_DB['HOST']}:{TIMESERIES_DB['PORT']}", - token=TIMESERIES_DB['TOKEN'], - org=TIMESERIES_DB['ORG'], - bucket=self.db_name, - ) + def __init__(self, bucket, org, token, url): + self.bucket = bucket + self.org = org + self.token = token + self.url = url + self.client = InfluxDBClient(url=url, token=token, org=org) + self.write_api = self.client.write_api(write_options=SYNCHRONOUS) + self.query_api = self.client.query_api() - @retry def create_database(self): - self.write_api = self.db.write_api(write_options=SYNCHRONOUS) - self.query_api = self.db.query_api() - logger.debug('Initialized APIs for InfluxDB 2.0') + logger.debug('InfluxDB 2.0 does not require explicit database creation.') - @retry def drop_database(self): - pass # Implement as needed for InfluxDB 2.0 + logger.debug('InfluxDB 2.0 does not support dropping databases via the client.') - @retry - def query(self, query): - return self.query_api.query(query) + def create_or_alter_retention_policy(self, name, duration): + logger.debug('InfluxDB 2.0 handles retention policies via bucket settings.') def write(self, name, values, **kwargs): - point = Point(name).time(self._get_timestamp(kwargs.get('timestamp'))) - tags = kwargs.get('tags', {}) - for tag, value in tags.items(): - point.tag(tag, value) - for field, value in values.items(): - point.field(field, value) + timestamp = kwargs.get('timestamp', datetime.utcnow().isoformat()) + point = ( + Point(name) + .tag("object_id", kwargs.get('tags').get('object_id')) + .field(kwargs.get('field'), values) + .time(timestamp) + ) try: - self.write_api.write(bucket=self.db_name, record=point) - except InfluxDBError as e: - raise TimeseriesWriteException(str(e)) - - @retry - def get_list_retention_policies(self, name=None): - bucket = self.db.buckets_api().find_bucket_by_name(name) - if bucket: - return bucket.retention_rules - return [] - - @retry - def create_or_alter_retention_policy(self, name, duration): - bucket = self.db.buckets_api().find_bucket_by_name(name) - retention_rules = [{"type": "expire", "everySeconds": duration}] - if bucket: - bucket.retention_rules = retention_rules - self.db.buckets_api().update_bucket(bucket=bucket) + self.write_api.write(bucket=self.bucket, org=self.org, record=point) + except Exception as exception: + logger.warning(f'got exception while writing to tsdb: {exception}') + raise TimeseriesWriteException + + def batch_write(self, metric_data): + points = [] + for data in metric_data: + timestamp = data.get('timestamp', datetime.utcnow().isoformat()) + point = ( + Point(data.get('name')) + .tag("object_id", data.get('tags').get('object_id')) + .field(data.get('field'), data.get('values')) + .time(timestamp) + ) + points.append(point) + try: + self.write_api.write(bucket=self.bucket, org=self.org, record=points) + except Exception as exception: + logger.warning(f'got exception while writing to tsdb: {exception}') + raise TimeseriesWriteException + + def read(self, key, fields, tags=None, **kwargs): + since = kwargs.get('since') + order = kwargs.get('order') + limit = kwargs.get('limit') + query = ( + f'from(bucket: "{self.bucket}")' + f' |> range(start: {since if since else "-1h"})' # Use since or default + f' |> filter(fn: (r) => r._measurement == "{key}")' + ) + if tags: + tag_query = ' and '.join( + [f'r.{tag} == "{value}"' for tag, value in tags.items()] + ) + query += f' |> filter(fn: (r) => {tag_query})' + if fields: + field_query = ' or '.join([f'r._field == "{field}"' for field in fields]) + query += f' |> filter(fn: (r) => {field_query})' + if order: + query += f' |> sort(columns: ["_time"], desc: {order == "-time"})' + if limit: + query += f' |> limit(n: {limit})' + result = self.query_api.query(org=self.org, query=query) + return [record.values for table in result for record in table.records] + + def delete_metric_data(self, key=None, tags=None): + logger.debug( + 'InfluxDB 2.0 does not support deleting specific data points via the client.' + ) + + def validate_query(self, query): + for word in self._FORBIDDEN: + if word in query.lower(): + msg = _(f'the word "{word.upper()}" is not allowed') + raise ValidationError({'configuration': msg}) + return self._is_aggregate(query) + + def _is_aggregate(self, q): + q = q.upper() + for word in self._AGGREGATE: + if any(['%s(' % word in q, '|%s}' % word in q, '|%s|' % word in q]): + return True + return False + + def get_query( + self, + chart_type, + params, + time, + group_map, + summary=False, + fields=None, + query=None, + timezone=settings.TIME_ZONE, + ): + query = self._fields(fields, query, params['field_name']) + params = self._clean_params(params) + query = query.format(**params) + query = self._group_by(query, time, chart_type, group_map, strip=summary) + if summary: + query = f'{query} |> limit(n: 1)' + return query + + def _fields(self, fields, query, field_name): + matches = re.search(self._fields_regex, query) + if not matches and not fields: + return query + elif matches and not fields: + groups = matches.groupdict() + fields_key = groups.get('group') + fields = [field_name] + if fields and matches: + groups = matches.groupdict() + function = groups['func'] # required + operation = groups.get('op') # optional + fields = [self.__transform_field(f, function, operation) for f in fields] + fields_key = groups.get('group') else: - self.db.buckets_api().create_bucket( - bucket_name=name, - retention_rules=retention_rules, - org=TIMESERIES_DB["ORG"], + fields_key = '{fields}' + if fields: + selected_fields = ', '.join(fields) + return query.replace(fields_key, selected_fields) + + def __transform_field(self, field, function, operation=None): + if operation: + operation = f' {operation}' + else: + operation = '' + return f'{function}("{field}"){operation} AS {field.replace("-", "_")}' + + def _group_by(self, query, time, chart_type, group_map, strip=False): + if not self.validate_query(query): + return query + if not strip and not chart_type == 'histogram': + value = group_map[time] + group_by = ( + f'|> aggregateWindow(every: {value}, fn: mean, createEmpty: false)' ) + else: + group_by = '' + if 'aggregateWindow' not in query: + query = f'{query} {group_by}' + return query + + +# Example usage +if __name__ == "__main__": + bucket = "mybucket" + org = "myorg" + token = "t8Q3Y5mTWuqqTRdGyVxZuyVLO-8pl3I8KaNTR3jV7uTDr_GVECP5Z7LsrZwILGw79Xp4O8pAWkdqTREgIk073Q==" + url = "http://localhost:9086" + + client = DatabaseClient(bucket=bucket, org=org, token=token, url=url) + client.create_database() + + # Write example + client.write( + "example_measurement", 99.5, tags={"object_id": "server_01"}, field="uptime" + ) + + # Read example + result = client.read( + "example_measurement", ["uptime"], tags={"object_id": "server_01"} + ) + print(result) diff --git a/openwisp_monitoring/db/backends/influxdb2/queries.py b/openwisp_monitoring/db/backends/influxdb2/queries.py index 216ad1cf8..a41a0524b 100644 --- a/openwisp_monitoring/db/backends/influxdb2/queries.py +++ b/openwisp_monitoring/db/backends/influxdb2/queries.py @@ -1,277 +1,266 @@ +import logging + +logger = logging.getLogger(__name__) + chart_query = { 'uptime': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with uptime: r._value * 100 }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" and r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ r with _value: r._value * 100 })) + |> rename(columns: {_value: "uptime"}) + + ''' }, 'packet_loss': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "loss" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean()' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" and r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> rename(columns: {_value: "packet_loss"}) + + ''' }, 'rtt': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "rtt_avg" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> yield(name: "RTT_average") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "rtt_max" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> yield(name: "RTT_max") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "rtt_min" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> yield(name: "RTT_min")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + RTT_average: r.rtt_avg, + RTT_max: r.rtt_max, + RTT_min: r.rtt_min + })) + ''' }, 'wifi_clients': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}" and ' - 'r["ifname"] == "{ifname}") ' - '|> distinct() ' - '|> count()' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && + r.object_id == "{object_id}" && r.ifname == "{ifname}") + |> group(columns: ["{field_name}"]) + |> count(column: "{field_name}") + |> map(fn: (r) => ({ r with wifi_clients: r._value })) + |> group() // Ungroup to summarize across the selected range + ''' }, 'general_wifi_clients': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}"' - '{organization_id}{location_id}{floorplan_id}) ' - '|> distinct() ' - '|> count()' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r.organization_id == "{organization_id}" && + r.location_id == "{location_id}" && r.floorplan_id == "{floorplan_id}") + |> group(columns: ["{field_name}"]) + |> count(column: "{field_name}") + |> map(fn: (r) => ({ r with wifi_clients: r._value })) + |> group() // Ungroup to summarize across the selected range + ''' }, 'traffic': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "tx_bytes" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}" and ' - 'r["ifname"] == "{ifname}") ' - '|> sum() ' - '|> map(fn: (r) => ({ r with upload: r._value / 1000000000 })) ' - '|> yield(name: "upload") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "rx_bytes" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}" and ' - 'r["ifname"] == "{ifname}") ' - '|> sum() ' - '|> map(fn: (r) => ({ r with download: r._value / 1000000000 })) ' - '|> yield(name: "download")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && + r.object_id == "{object_id}" && r.ifname == "{ifname}") + |> aggregateWindow(every: 1d, fn: sum, createEmpty: false) + |> map(fn: (r) => ({ + upload: r.tx_bytes / 1000000000, + download: r.rx_bytes / 1000000000 + })) + ''' }, 'general_traffic': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "tx_bytes"{organization_id}' - '{location_id}{floorplan_id}{ifname}) ' - '|> sum() ' - '|> map(fn: (r) => ({ r with upload: r._value / 1000000000 })) ' - '|> yield(name: "upload") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "rx_bytes"{organization_id}' - '{location_id}{floorplan_id}{ifname}) ' - '|> sum() ' - '|> map(fn: (r) => ({ r with download: r._value / 1000000000 })) ' - '|> yield(name: "download")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r.organization_id == "{organization_id}" && + r.location_id == "{location_id}" && + r.floorplan_id == "{floorplan_id}" && r.ifname == "{ifname}") + |> aggregateWindow(every: 1d, fn: sum, createEmpty: false) + |> map(fn: (r) => ({ + upload: r.tx_bytes / 1000000000, + download: r.rx_bytes / 1000000000 + })) + ''' }, 'memory': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "percent_used" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with memory_usage: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + memory_usage: r.percent_used + })) + ''' }, 'cpu': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "cpu_usage" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with CPU_load: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + CPU_load: r.cpu_usage + })) + ''' }, 'disk': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "used_disk" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with disk_usage: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + disk_usage: r.used_disk + })) + ''' }, 'signal_strength': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "signal_strength" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with signal_strength: round(r._value) })) ' - '|> yield(name: "signal_strength") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "signal_power" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with signal_power: round(r._value) })) ' - '|> yield(name: "signal_power")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + signal_strength: math.round(r.signal_strength), + signal_power: math.round(r.signal_power) + })) + + ''' }, 'signal_quality': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "signal_quality" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with signal_quality: round(r._value) })) ' - '|> yield(name: "signal_quality") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "snr" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with signal_to_noise_ratio: round(r._value) })) ' - '|> yield(name: "signal_to_noise_ratio")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + signal_quality: math.round(r.signal_quality), + signal_to_noise_ratio: math.round(r.snr) + })) + ''' }, 'access_tech': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}{end_date}) ' - '|> filter(fn: (r) => r["_measurement"] == "access_tech" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mode() ' - '|> map(fn: (r) => ({ r with access_tech: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mode, createEmpty: false) + |> map(fn: (r) => ({ + access_tech: r.access_tech + })) + ''' }, 'bandwidth': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "sent_bps_tcp" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with TCP: r._value / 1000000000 })) ' - '|> yield(name: "TCP") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "sent_bps_udp" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with UDP: r._value / 1000000000 })) ' - '|> yield(name: "UDP")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + TCP: r.sent_bps_tcp / 1000000000, + UDP: r.sent_bps_udp / 1000000000 + })) + ''' }, 'transfer': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "sent_bytes_tcp" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> sum() ' - '|> map(fn: (r) => ({ r with TCP: r._value / 1000000000 })) ' - '|> yield(name: "TCP") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "sent_bytes_udp" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> sum() ' - '|> map(fn: (r) => ({ r with UDP: r._value / 1000000000 })) ' - '|> yield(name: "UDP")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: sum, createEmpty: false) + |> map(fn: (r) => ({ + TCP: r.sent_bytes_tcp / 1000000000, + UDP: r.sent_bytes_udp / 1000000000 + })) + ''' }, 'retransmits': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "retransmits" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with retransmits: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + retransmits: r.retransmits + })) + ''' }, 'jitter': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "jitter" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with jitter: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + jitter: r.jitter + })) + ''' }, 'datagram': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "lost_packets" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with lost_datagram: r._value })) ' - '|> yield(name: "lost_datagram") ' - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "total_packets" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with total_datagram: r._value })) ' - '|> yield(name: "total_datagram")' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + lost_datagram: r.lost_packets, + total_datagram: r.total_packets + })) + ''' }, 'datagram_loss': { - 'influxdb2': ( - 'from(bucket: "{key}") ' - '|> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "lost_percent" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean() ' - '|> map(fn: (r) => ({ r with datagram_loss: r._value }))' - ) + 'influxdb2': ''' + from(bucket: "{key}") + |> range(start: {time}) + |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") + |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) + |> map(fn: (r) => ({ + datagram_loss: r.lost_percent + })) + ''' }, } -default_chart_query = [ - 'from(bucket: "{key}") |> range(start: {time}{end_date}) ', - ( - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' - ), -] +default_chart_query = ''' + from(bucket: "{key}") + |> range(start: {time}{end_date}) + |> filter(fn: (r) => + r._measurement == "{content_type}" && + r.object_id == "{object_id}" + ) + |> keep(columns: ["{field_name}"]) +''' + +device_data_query = ''' + from(bucket: "{key}") + |> range(start: -inf) + |> filter(fn: (r) => + r._measurement == "{content_type}" && + r.pk == "{pk}" + ) + |> last() +''' + + +def get_chart_query(chart_type, **params): + """Fetches and formats a specific chart query based on the chart type and provided parameters.""" + try: + query = chart_query[chart_type].format(**params) + except KeyError: + logger.warning( + f"No specific query found for chart type '{chart_type}'. Using default query." + ) + query = default_chart_query.format(**params) + return query + -device_data_query = ( - 'from(bucket: "{0}") |> range(start: 0) ' - '|> filter(fn: (r) => r["_measurement"] == "{1}" and r["pk"] == "{2}") ' - '|> sort(columns: ["_time"], desc: true) ' - '|> limit(n: 1)' -) +def get_device_data_query(**params): + """Formats the device data query based on provided parameters.""" + return device_data_query.format(**params) diff --git a/openwisp_monitoring/db/backends/influxdb2/tests.py b/openwisp_monitoring/db/backends/influxdb2/tests.py index 9bf8aca98..e69de29bb 100644 --- a/openwisp_monitoring/db/backends/influxdb2/tests.py +++ b/openwisp_monitoring/db/backends/influxdb2/tests.py @@ -1,433 +0,0 @@ -from datetime import datetime, timedelta -from unittest.mock import patch - -from celery.exceptions import Retry -from django.core.exceptions import ValidationError -from django.test import TestCase, tag -from django.utils.timezone import now -from freezegun import freeze_time -from influxdb_client import InfluxDBClient -from influxdb_client.client.exceptions import InfluxDBError -from pytz import timezone as tz -from swapper import load_model - -from openwisp_monitoring.device.settings import ( - DEFAULT_RETENTION_POLICY, - SHORT_RETENTION_POLICY, -) -from openwisp_monitoring.device.utils import ( - DEFAULT_RP, - SHORT_RP, - manage_default_retention_policy, - manage_short_retention_policy, -) -from openwisp_monitoring.monitoring.tests import TestMonitoringMixin -from openwisp_monitoring.settings import MONITORING_TIMESERIES_RETRY_OPTIONS -from openwisp_utils.tests import capture_stderr - -from ...exceptions import TimeseriesWriteException -from .. import timeseries_db - -Chart = load_model('monitoring', 'Chart') -Notification = load_model('openwisp_notifications', 'Notification') - - -@tag('timeseries_client') -class TestDatabaseClient(TestMonitoringMixin, TestCase): - def test_forbidden_queries(self): - queries = [ - 'DROP DATABASE openwisp2', - 'DROP MEASUREMENT test_metric', - 'CREATE DATABASE test', - 'DELETE MEASUREMENT test_metric', - 'ALTER RETENTION POLICY policy', - 'SELECT * INTO metric2 FROM test_metric', - ] - for q in queries: - try: - timeseries_db.validate_query(q) - except ValidationError as e: - self.assertIn('configuration', e.message_dict) - else: - self.fail('ValidationError not raised') - - def test_get_custom_query(self): - c = self._create_chart(test_data=None) - custom_q = c._default_query.replace('{field_name}', '{fields}') - q = c.get_query(query=custom_q, fields=['SUM(*)']) - self.assertIn('|> sum()', q) - - def test_is_aggregate_bug(self): - m = self._create_object_metric(name='summary_avg') - c = Chart(metric=m, configuration='dummy') - self.assertFalse(timeseries_db._is_aggregate(c.query)) - - def test_is_aggregate_fields_function(self): - m = self._create_object_metric(name='is_aggregate_func') - c = Chart(metric=m, configuration='uptime') - self.assertTrue(timeseries_db._is_aggregate(c.query)) - - def test_get_query_fields_function(self): - c = self._create_chart(test_data=None, configuration='histogram') - q = c.get_query(fields=['ssh', 'http2', 'apple-music']) - expected = ( - '|> sum(column: "ssh") ' - '|> sum(column: "http2") ' - '|> sum(column: "apple-music")' - ) - self.assertIn(expected, q) - - def test_default_query(self): - c = self._create_chart(test_data=False) - q = ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' - ) - self.assertEqual(c.query, q) - - def test_write(self): - timeseries_db.write('test_write', dict(value=2), database=self.TEST_DB) - result = timeseries_db.query( - f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' - f'filter(fn: (r) => r["_measurement"] == "test_write")' - ) - measurement = list(result)[0] - self.assertEqual(measurement['_value'], 2) - - def test_general_write(self): - m = self._create_general_metric(name='Sync test') - m.write(1) - result = timeseries_db.query( - f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' - f'filter(fn: (r) => r["_measurement"] == "sync_test")' - ) - measurement = list(result)[0] - self.assertEqual(measurement['_value'], 1) - - def test_object_write(self): - om = self._create_object_metric() - om.write(3) - content_type = '.'.join(om.content_type.natural_key()) - result = timeseries_db.query( - f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' - f'filter(fn: (r) => r["_measurement"] == "test_metric" and r["object_id"] == "{om.object_id}" ' - f'and r["content_type"] == "{content_type}")' - ) - measurement = list(result)[0] - self.assertEqual(measurement['_value'], 3) - - def test_general_same_key_different_fields(self): - down = self._create_general_metric( - name='traffic (download)', key='traffic', field_name='download' - ) - down.write(200) - up = self._create_general_metric( - name='traffic (upload)', key='traffic', field_name='upload' - ) - up.write(100) - result = timeseries_db.query( - f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' - f'filter(fn: (r) => r["_measurement"] == "traffic")' - ) - measurements = list(result) - download_measurement = next( - m for m in measurements if m['_field'] == 'download' - ) - upload_measurement = next(m for m in measurements if m['_field'] == 'upload') - self.assertEqual(download_measurement['_value'], 200) - self.assertEqual(upload_measurement['_value'], 100) - - def test_object_same_key_different_fields(self): - user = self._create_user() - user_down = self._create_object_metric( - name='traffic (download)', - key='traffic', - field_name='download', - content_object=user, - ) - user_down.write(200) - user_up = self._create_object_metric( - name='traffic (upload)', - key='traffic', - field_name='upload', - content_object=user, - ) - user_up.write(100) - content_type = '.'.join(user_down.content_type.natural_key()) - result = timeseries_db.query( - f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' - f'filter(fn: (r) => r["_measurement"] == "traffic" and ' - f'r["object_id"] == "{user_down.object_id}" and r["content_type"] == "{content_type}")' - ) - measurements = list(result) - download_measurement = next( - m for m in measurements if m['_field'] == 'download' - ) - upload_measurement = next(m for m in measurements if m['_field'] == 'upload') - self.assertEqual(download_measurement['_value'], 200) - self.assertEqual(upload_measurement['_value'], 100) - - def test_delete_metric_data(self): - m = self._create_general_metric(name='test_metric') - m.write(100) - self.assertEqual(m.read()[0]['value'], 100) - timeseries_db.delete_metric_data(key=m.key) - self.assertEqual(m.read(), []) - om = self._create_object_metric(name='dummy') - om.write(50) - m.write(100) - self.assertEqual(m.read()[0]['value'], 100) - self.assertEqual(om.read()[0]['value'], 50) - timeseries_db.delete_metric_data() - self.assertEqual(m.read(), []) - self.assertEqual(om.read(), []) - - def test_get_query_1d(self): - c = self._create_chart(test_data=None, configuration='uptime') - q = c.get_query(time='1d') - last24 = now() - timedelta(days=1) - self.assertIn(str(last24)[0:14], q) - self.assertIn('|> aggregateWindow(every: 10m, fn: mean)', q) - - def test_get_query_30d(self): - c = self._create_chart(test_data=None, configuration='uptime') - q = c.get_query(time='30d') - last30d = now() - timedelta(days=30) - self.assertIn(str(last30d)[0:10], q) - self.assertIn('|> aggregateWindow(every: 24h, fn: mean)', q) - - def test_group_by_tags(self): - self.assertEqual( - timeseries_db._group_by( - 'from(bucket: "measurement") |> range(start: -1d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'aggregateWindow(every: 1d, fn: count)', - time='30d', - chart_type='stackedbar+lines', - group_map={'30d': '30d'}, - strip=False, - ), - 'from(bucket: "measurement") |> range(start: -30d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'aggregateWindow(every: 30d, fn: count)', - ) - self.assertEqual( - timeseries_db._group_by( - 'from(bucket: "measurement") |> range(start: -1d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'aggregateWindow(every: 1d, fn: count)', - time='30d', - chart_type='stackedbar+lines', - group_map={'30d': '30d'}, - strip=True, - ), - 'from(bucket: "measurement") |> range(start: -30d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item")', - ) - self.assertEqual( - timeseries_db._group_by( - 'from(bucket: "measurement") |> range(start: -1d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'aggregateWindow(every: 1d, fn: count) |> group(columns: ["tag"])', - time='30d', - chart_type='stackedbar+lines', - group_map={'30d': '30d'}, - strip=False, - ), - 'from(bucket: "measurement") |> range(start: -30d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'aggregateWindow(every: 30d, fn: count) |> group(columns: ["tag"])', - ) - self.assertEqual( - timeseries_db._group_by( - 'from(bucket: "measurement") |> range(start: -1d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'aggregateWindow(every: 1d, fn: count) |> group(columns: ["tag"])', - time='30d', - chart_type='stackedbar+lines', - group_map={'30d': '30d'}, - strip=True, - ), - 'from(bucket: "measurement") |> range(start: -30d) |> ' - 'filter(fn: (r) => r["_measurement"] == "item") |> ' - 'group(columns: ["tag"])', - ) - - def test_retention_policy(self): - manage_short_retention_policy() - manage_default_retention_policy() - rp = timeseries_db.get_list_retention_policies() - self.assertEqual(len(rp), 2) - self.assertEqual(rp[0].name, DEFAULT_RP) - self.assertEqual(rp[0].every_seconds, DEFAULT_RETENTION_POLICY) - self.assertEqual(rp[1].name, SHORT_RP) - self.assertEqual(rp[1].every_seconds, SHORT_RETENTION_POLICY) - - def test_query_set(self): - c = self._create_chart(configuration='histogram') - expected = ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> aggregateWindow(every: {time}, fn: sum) ' - ) - self.assertEqual(c.query, expected) - self.assertEqual( - ''.join(timeseries_db.queries.default_chart_query[0:2]), c._default_query - ) - c.metric.object_id = None - self.assertEqual(timeseries_db.queries.default_chart_query[0], c._default_query) - - def test_read_order(self): - m = self._create_general_metric(name='dummy') - m.write(30) - m.write(40, time=now() - timedelta(days=2)) - with self.subTest('Test ascending read order'): - metric_data = m.read(limit=2, order='time') - self.assertEqual(metric_data[0]['value'], 40) - self.assertEqual(metric_data[1]['value'], 30) - with self.subTest('Test descending read order'): - metric_data = m.read(limit=2, order='-time') - self.assertEqual(metric_data[0]['value'], 30) - self.assertEqual(metric_data[1]['value'], 40) - with self.subTest('Test invalid read order'): - with self.assertRaises(timeseries_db.client_error) as e: - metric_data = m.read(limit=2, order='invalid') - self.assertIn('Invalid order "invalid" passed.', str(e)) - - def test_read_with_rp(self): - self._create_admin() - manage_short_retention_policy() - with self.subTest( - 'Test metric write on short retention_policy immediate alert' - ): - m = self._create_general_metric(name='dummy') - self._create_alert_settings( - metric=m, custom_operator='<', custom_threshold=1, custom_tolerance=0 - ) - m.write(0, retention_policy=SHORT_RP) - self.assertEqual(m.read(retention_policy=SHORT_RP)[0][m.field_name], 0) - m.refresh_from_db() - self.assertEqual(m.is_healthy, False) - self.assertEqual(m.is_healthy_tolerant, False) - self.assertEqual(Notification.objects.count(), 1) - with self.subTest( - 'Test metric write on short retention_policy with deferred alert' - ): - m2 = self._create_general_metric(name='dummy2') - self._create_alert_settings( - metric=m2, custom_operator='<', custom_threshold=1, custom_tolerance=1 - ) - m.write(0, retention_policy=SHORT_RP, time=now() - timedelta(minutes=2)) - self.assertEqual(m.read(retention_policy=SHORT_RP)[0][m.field_name], 0) - m.refresh_from_db() - self.assertEqual(m.is_healthy, False) - self.assertEqual(m.is_healthy_tolerant, False) - self.assertEqual(Notification.objects.count(), 1) - - def test_metric_write_microseconds_precision(self): - m = self._create_object_metric( - name='wlan0', key='wlan0', configuration='clients' - ) - m.write('00:14:5c:00:00:00', time=datetime(2020, 7, 31, 22, 5, 47, 235142)) - m.write('00:23:4a:00:00:00', time=datetime(2020, 7, 31, 22, 5, 47, 235152)) - self.assertEqual(len(m.read()), 2) - - @patch.object( - InfluxDBClient, 'write_api', side_effect=InfluxDBError('Server error') - ) - @capture_stderr() - def test_write_retry(self, mock_write): - with self.assertRaises(TimeseriesWriteException): - timeseries_db.write('test_write', {'value': 1}) - m = self._create_general_metric(name='Test metric') - with self.assertRaises(Retry): - m.write(1) - - @patch.object( - InfluxDBClient, - 'write_api', - side_effect=InfluxDBError( - content='{"error":"partial write: points beyond retention policy dropped=1"}', - code=400, - ), - ) - @capture_stderr() - def test_write_skip_retry_for_retention_policy(self, mock_write): - try: - timeseries_db.write('test_write', {'value': 1}) - except TimeseriesWriteException: - self.fail( - 'TimeseriesWriteException should not be raised when data ' - 'points crosses retention policy' - ) - m = self._create_general_metric(name='Test metric') - try: - m.write(1) - except Retry: - self.fail( - 'Writing metric should not be retried when data ' - 'points crosses retention policy' - ) - - @patch.object( - InfluxDBClient, 'write_api', side_effect=InfluxDBError('Server error') - ) - @capture_stderr() - def test_timeseries_write_params(self, mock_write): - with freeze_time('Jan 14th, 2020') as frozen_datetime: - m = self._create_general_metric(name='Test metric') - with self.assertRaises(Retry) as e: - m.write(1) - frozen_datetime.tick(delta=timedelta(minutes=10)) - self.assertEqual( - now(), datetime(2020, 1, 14, tzinfo=tz('UTC')) + timedelta(minutes=10) - ) - task_signature = e.exception.sig - with patch.object(timeseries_db, 'write') as mock_write: - self._retry_task(task_signature) - mock_write.assert_called_with( - 'test_metric', - {'value': 1}, - database=None, - retention_policy=None, - tags={}, - timestamp=datetime(2020, 1, 14, tzinfo=tz('UTC')).isoformat(), - current=False, - ) - - def _retry_task(self, task_signature): - task_kwargs = task_signature.kwargs - task_signature.type.run(**task_kwargs) - - @patch.object( - InfluxDBClient, 'query_api', side_effect=InfluxDBError('Server error') - ) - def test_retry_mechanism(self, mock_query): - max_retries = MONITORING_TIMESERIES_RETRY_OPTIONS.get('max_retries') - with patch('logging.Logger.info') as mocked_logger: - try: - self.test_get_query_fields_function() - except Exception: - pass - self.assertEqual(mocked_logger.call_count, max_retries) - mocked_logger.assert_called_with( - 'Error while executing method "query":\nServer error\n' - f'Attempt {max_retries} out of {max_retries}.\n' - ) - - -class TestDatabaseClientUdp(TestMonitoringMixin, TestCase): - def test_exceed_udp_packet_limit(self): - # InfluxDB 2.x does not use UDP for writing data, but this is kept - # for backward compatibility reference - timeseries_db.write( - 'test_udp_write', dict(value='O' * 66000), database=self.TEST_DB - ) - result = timeseries_db.query( - f'from(bucket: "{self.TEST_DB}") |> range(start: -1h) |> ' - f'filter(fn: (r) => r["_measurement"] == "test_udp_write")' - ) - measurement = list(result) - self.assertEqual(len(measurement), 1) diff --git a/openwisp_monitoring/db/exceptions.py b/openwisp_monitoring/db/exceptions.py index 3296400a1..3aef4d377 100644 --- a/openwisp_monitoring/db/exceptions.py +++ b/openwisp_monitoring/db/exceptions.py @@ -1,2 +1,6 @@ class TimeseriesWriteException(Exception): pass + + +class WriteError(Exception): + pass diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/docker-entrypoint.sh b/tests/docker-entrypoint.sh old mode 100644 new mode 100755 diff --git a/tests/openwisp2/settings.py b/tests/openwisp2/settings.py index 3d3dbf309..4d0186252 100644 --- a/tests/openwisp2/settings.py +++ b/tests/openwisp2/settings.py @@ -32,16 +32,17 @@ 'OPTIONS': {'udp_writes': False, 'udp_port': 8089}, } +# For InfluxDB 2.x INFLUXDB_2x_DATABASE = { 'BACKEND': 'openwisp_monitoring.db.backends.influxdb2', - 'TOKEN': 'your-influxdb-2.0-token', - 'ORG': 'your-org', - 'BUCKET': 'your-bucket', - 'HOST': os.getenv('INFLUXDB2_HOST', 'localhost'), - 'PORT': '8087', + 'TOKEN': 't8Q3Y5mTWuqqTRdGyVxZuyVLO-8pl3I8KaNTR3jV7uTDr_GVECP5Z7LsrZwILGw79Xp4O8pAWkdqTREgIk073Q==', + 'ORG': 'myorg', + 'BUCKET': 'mybucket', + 'HOST': os.getenv('INFLUXDB_HOST', 'localhost'), + 'PORT': '9086', } -if os.environ.get('USE_INFLUXDB2', False): +if os.environ.get('USE_INFLUXDB2', 'False') == 'True': TIMESERIES_DATABASE = INFLUXDB_2x_DATABASE else: TIMESERIES_DATABASE = INFLUXDB_1x_DATABASE From cfc9b582fbfa8120cc44f1666e1937c4ed73f52a Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Tue, 25 Jun 2024 12:22:35 +0530 Subject: [PATCH 4/7] [monitoring] Changes #274 Fixes #274 --- openwisp_monitoring/db/backends/__init__.py | 48 +- .../db/backends/influxdb2/client.py | 544 +++++++++---- .../db/backends/influxdb2/queries.py | 744 ++++++++++++------ .../db/backends/influxdb2/tests.py | 261 ++++++ openwisp_monitoring/db/exceptions.py | 4 - openwisp_monitoring/device/base/models.py | 22 +- openwisp_monitoring/device/settings.py | 2 +- openwisp_monitoring/device/utils.py | 9 +- openwisp_monitoring/monitoring/base/models.py | 64 +- .../monitoring/migrations/__init__.py | 61 +- .../influxdb/{__ini__.py => __init__.py} | 0 .../migrations/influxdb2/__init__.py | 0 .../influxdb2_alter_structure_0006.py | 112 +++ openwisp_monitoring/monitoring/tasks.py | 53 +- .../monitoring/tests/__init__.py | 41 +- tests/openwisp2/settings.py | 7 +- 16 files changed, 1517 insertions(+), 455 deletions(-) rename openwisp_monitoring/monitoring/migrations/influxdb/{__ini__.py => __init__.py} (100%) create mode 100644 openwisp_monitoring/monitoring/migrations/influxdb2/__init__.py create mode 100644 openwisp_monitoring/monitoring/migrations/influxdb2/influxdb2_alter_structure_0006.py diff --git a/openwisp_monitoring/db/backends/__init__.py b/openwisp_monitoring/db/backends/__init__.py index e2399bf6f..be0cd843c 100644 --- a/openwisp_monitoring/db/backends/__init__.py +++ b/openwisp_monitoring/db/backends/__init__.py @@ -9,19 +9,36 @@ TIMESERIES_DB = getattr(settings, 'TIMESERIES_DATABASE', None) if not TIMESERIES_DB: - TIMESERIES_DB = { - 'BACKEND': 'openwisp_monitoring.db.backends.influxdb', - 'USER': getattr(settings, 'INFLUXDB_USER', 'openwisp'), - 'PASSWORD': getattr(settings, 'INFLUXDB_PASSWORD', 'openwisp'), - 'NAME': getattr(settings, 'INFLUXDB_DATABASE', 'openwisp2'), - 'HOST': getattr(settings, 'INFLUXDB_HOST', 'localhost'), - 'PORT': getattr(settings, 'INFLUXDB_PORT', '8086'), - } - logger.warning( - 'The previous method to define Timeseries Database has been deprecated. Please refer to the docs:\n' - 'https://github.com/openwisp/openwisp-monitoring#setup-integrate-in-an-existing-django-project' - ) + INFLUXDB_BACKEND = getattr(settings, 'INFLUXDB_BACKEND', 'openwisp_monitoring.db.backends.influxdb') + + if INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb': + # InfluxDB 1.x configuration + TIMESERIES_DB = { + 'BACKEND': INFLUXDB_BACKEND, + 'USER': getattr(settings, 'INFLUXDB_USER', 'openwisp'), + 'PASSWORD': getattr(settings, 'INFLUXDB_PASSWORD', 'openwisp'), + 'NAME': getattr(settings, 'INFLUXDB_DATABASE', 'openwisp2'), + 'HOST': getattr(settings, 'INFLUXDB_HOST', 'localhost'), + 'PORT': getattr(settings, 'INFLUXDB_PORT', '8086'), + } + elif INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb2': + # InfluxDB 2.x configuration + TIMESERIES_DB = { + 'BACKEND': INFLUXDB_BACKEND, + 'TOKEN': getattr(settings, 'INFLUXDB_TOKEN', 'dltiEmsmMKU__9SoBE0ingFdMTS3UksrESwIQDNtW_3WOgn8bQGdyYzPcx_aDtvZkqvR8RbMkwVVlzUJxpm62w=='), + 'ORG': getattr(settings, 'INFLUXDB_ORG', 'myorg'), + 'BUCKET': getattr(settings, 'INFLUXDB_BUCKET', 'mybucket'), + 'HOST': getattr(settings, 'INFLUXDB_HOST', 'localhost'), + 'PORT': getattr(settings, 'INFLUXDB_PORT', '8086'), + } + else: + logger.warning('Invalid INFLUXDB_BACKEND setting. Please check the documentation.') + if INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb': + logger.warning( + 'The previous method to define Timeseries Database has been deprecated. Please refer to the docs:\n' + 'https://github.com/openwisp/openwisp-monitoring#setup-integrate-in-an-existing-django-project' + ) def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): """ @@ -30,7 +47,8 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): """ try: assert 'BACKEND' in TIMESERIES_DB, 'BACKEND' - if 'BACKEND' in TIMESERIES_DB and '2' in TIMESERIES_DB['BACKEND']: + is_influxdb2 = '2' in TIMESERIES_DB['BACKEND'] + if is_influxdb2: # InfluxDB 2.x specific checks assert 'TOKEN' in TIMESERIES_DB, 'TOKEN' assert 'ORG' in TIMESERIES_DB, 'ORG' @@ -75,7 +93,7 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): token=TIMESERIES_DB['TOKEN'], url=f"http://{TIMESERIES_DB['HOST']}:{TIMESERIES_DB['PORT']}", ) + timeseries_db.queries = load_backend_module(TIMESERIES_DB['BACKEND'], module='queries') else: timeseries_db = load_backend_module(module='client').DatabaseClient() - -timeseries_db.queries = load_backend_module(module='queries') + timeseries_db.queries = load_backend_module(module='queries') \ No newline at end of file diff --git a/openwisp_monitoring/db/backends/influxdb2/client.py b/openwisp_monitoring/db/backends/influxdb2/client.py index ab21775f7..9af567a57 100644 --- a/openwisp_monitoring/db/backends/influxdb2/client.py +++ b/openwisp_monitoring/db/backends/influxdb2/client.py @@ -1,62 +1,48 @@ -import logging -import re -from datetime import datetime - +from datetime import datetime, time, timezone from django.conf import settings +from influxdb_client import InfluxDBClient, Point, WritePrecision +from influxdb_client.client.write_api import SYNCHRONOUS +import re +import pytz +from django.utils.timezone import now +import logging +from .. import TIMESERIES_DB from django.core.exceptions import ValidationError +from influxdb_client.rest import ApiException as InfluxDBClientError from django.utils.translation import gettext_lazy as _ -from influxdb_client import InfluxDBClient, Point -from influxdb_client.client.write_api import SYNCHRONOUS +from django.utils.dateparse import parse_datetime -from ...exceptions import TimeseriesWriteException logger = logging.getLogger(__name__) - -class DatabaseClient(object): +class DatabaseClient: _AGGREGATE = [ - 'COUNT', - 'DISTINCT', - 'INTEGRAL', - 'MEAN', - 'MEDIAN', - 'MODE', - 'SPREAD', - 'STDDEV', - 'SUM', - 'BOTTOM', - 'FIRST', - 'LAST', - 'MAX', - 'MIN', - 'PERCENTILE', - 'SAMPLE', - 'TOP', - 'CEILING', - 'CUMULATIVE_SUM', - 'DERIVATIVE', - 'DIFFERENCE', - 'ELAPSED', - 'FLOOR', - 'HISTOGRAM', - 'MOVING_AVERAGE', - 'NON_NEGATIVE_DERIVATIVE', - 'HOLT_WINTERS', + 'COUNT', 'DISTINCT', 'INTEGRAL', 'MEAN', 'MEDIAN', 'MODE', + 'SPREAD', 'STDDEV', 'SUM', 'BOTTOM', 'FIRST', 'LAST', + 'MAX', 'MIN', 'PERCENTILE', 'SAMPLE', 'TOP', 'CEILING', + 'CUMULATIVE_SUM', 'DERIVATIVE', 'DIFFERENCE', 'ELAPSED', + 'FLOOR', 'HISTOGRAM', 'MOVING_AVERAGE', 'NON_NEGATIVE_DERIVATIVE', + 'HOLT_WINTERS' ] - _FORBIDDEN = ['drop', 'create', 'delete', 'alter', 'into'] - backend_name = 'influxdb' - - def __init__(self, bucket, org, token, url): - self.bucket = bucket - self.org = org - self.token = token - self.url = url - self.client = InfluxDBClient(url=url, token=token, org=org) + _FORBIDDEN = ['drop', 'delete', 'alter', 'into'] + backend_name = 'influxdb2' + + def __init__(self, bucket=None, org=None, token=None, url=None): + self.bucket = bucket or TIMESERIES_DB['BUCKET'] + self.org = org or TIMESERIES_DB['ORG'] + self.token = token or TIMESERIES_DB['TOKEN'] + self.url = url + self.client = InfluxDBClient(url=self.url, token=self.token, org=self.org) self.write_api = self.client.write_api(write_options=SYNCHRONOUS) self.query_api = self.client.query_api() + self.forbidden_pattern = re.compile( + r'\b(' + '|'.join(self._FORBIDDEN) + r')\b', re.IGNORECASE + ) + self.client_error = InfluxDBClientError def create_database(self): logger.debug('InfluxDB 2.0 does not require explicit database creation.') + # self.create_bucket(self.bucket) def drop_database(self): logger.debug('InfluxDB 2.0 does not support dropping databases via the client.') @@ -64,65 +50,231 @@ def drop_database(self): def create_or_alter_retention_policy(self, name, duration): logger.debug('InfluxDB 2.0 handles retention policies via bucket settings.') - def write(self, name, values, **kwargs): - timestamp = kwargs.get('timestamp', datetime.utcnow().isoformat()) - point = ( - Point(name) - .tag("object_id", kwargs.get('tags').get('object_id')) - .field(kwargs.get('field'), values) - .time(timestamp) - ) + def create_bucket(self, bucket, retention_rules=None): + bucket_api = self.client.buckets_api() + try: + existing_bucket = bucket_api.find_bucket_by_name(bucket) + if existing_bucket: + logger.info(f'Bucket "{bucket}" already exists.') + return + except Exception as e: + logger.error(f"Error checking for existing bucket: {e}") + try: + bucket_api.create_bucket(bucket_name=bucket, retention_rules=retention_rules, org=self.org) + logger.info(f'Created bucket "{bucket}"') + except self.client_error as e: + if "already exists" in str(e): + logger.info(f'Bucket "{bucket}" already exists.') + else: + logger.error(f"Error creating bucket: {e}") + raise + + def drop_bucket(self): + bucket_api = self.client.buckets_api() + bucket = bucket_api.find_bucket_by_name(self.bucket) + if bucket: + bucket_api.delete_bucket(bucket.id) + logger.debug(f'Dropped InfluxDB bucket "{self.bucket}"') + + def _get_timestamp(self, timestamp=None): + timestamp = timestamp or now() + if isinstance(timestamp, datetime): + return timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + return timestamp + + def write(self, name, values, **kwargs): + timestamp = self._get_timestamp(timestamp=kwargs.get('timestamp')) + try: + tags = kwargs.get('tags', {}) + if 'content_type' in kwargs: + tags['content_type'] = kwargs['content_type'] + if 'object_id' in kwargs: + tags['object_id'] = kwargs['object_id'] + point = { + 'measurement': name, + 'tags': tags, + 'fields': values, + 'time': timestamp, + } + # import pdb; pdb.set_trace() + print(f"Writing point to InfluxDB: {point}") self.write_api.write(bucket=self.bucket, org=self.org, record=point) - except Exception as exception: - logger.warning(f'got exception while writing to tsdb: {exception}') - raise TimeseriesWriteException + print(f"Successfully wrote point to bucket {self.bucket}") + except Exception as e: + print(f"Error writing to InfluxDB: {e}") def batch_write(self, metric_data): + print(f"Batch writing to InfluxDB - Data: {metric_data}") points = [] for data in metric_data: - timestamp = data.get('timestamp', datetime.utcnow().isoformat()) - point = ( - Point(data.get('name')) - .tag("object_id", data.get('tags').get('object_id')) - .field(data.get('field'), data.get('values')) - .time(timestamp) - ) + timestamp = self._get_timestamp(timestamp=data.get('timestamp')) + point = Point(data.get('name')).tag(**data.get('tags', {})).field(**data.get('values')).time(timestamp, WritePrecision.NS) points.append(point) + try: self.write_api.write(bucket=self.bucket, org=self.org, record=points) - except Exception as exception: - logger.warning(f'got exception while writing to tsdb: {exception}') - raise TimeseriesWriteException + logger.debug(f'Written batch of {len(points)} points to bucket {self.bucket}') + except Exception as e: + logger.error(f"Error writing batch to InfluxDB: {e}") + + # def query(self, query): + # print(f"Executing query: {query}") + # try: + # tables = self.query_api.query(query) + # print(f"Query result: {tables}") + # result = [] + # for table in tables: + # for record in table.records: + # record_dict = { + # 'time': record.get_time(), + # 'measurement': record.get_measurement(), + # 'field': record.get_field(), + # 'value': record.get_value() + # } + # result.append(record_dict) + # print(f"Record: {record_dict}") + # print(f"Query result: {result}") + # if not result: + # print("Query returned no data") + # return result + # except Exception as e: + # logger.error(f"Error querying InfluxDB: {e}") + # print(f"Error querying InfluxDB: {e}") + # return [] + def _format_date(self, date_str): + if date_str is None or date_str == 'now()': + return date_str + try: + date = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') + return date.strftime('%Y-%m-%dT%H:%M:%SZ') + except ValueError: + # If the date_str is not in the expected format, return it as is + return date_str + + def get_query(self, chart_type, params, time, group_map, summary=False, fields=None, query=None, timezone=settings.TIME_ZONE): + print(f"get_query called with params: {params}") + measurement = params.get('measurement') or params.get('key') + if not measurement or measurement == 'None': + logger.error(f"Invalid or missing measurement in params: {params}") + return None + + start_date = self._format_date(params.get('start_date', f'-{time}')) + end_date = self._format_date(params.get('end_date', 'now()')) + content_type = params.get('content_type') + object_id = params.get('object_id') + - def read(self, key, fields, tags=None, **kwargs): - since = kwargs.get('since') + window = group_map.get(time, '1h') + + flux_query = f''' + from(bucket: "{self.bucket}") + |> range(start: {start_date}, stop: {end_date}) + |> filter(fn: (r) => r["_measurement"] == "{measurement}") + ''' + + if content_type and object_id: + flux_query += f' |> filter(fn: (r) => r.content_type == "{content_type}" and r.object_id == "{object_id}")\n' + + if fields: + field_filters = ' or '.join([f'r["_field"] == "{field}"' for field in fields]) + flux_query += f' |> filter(fn: (r) => {field_filters})\n' + + flux_query += f' |> aggregateWindow(every: {window}, fn: mean, createEmpty: false)\n' + flux_query += ' |> yield(name: "mean")' + + print(f"Generated Flux query: {flux_query}") + return flux_query + + def query(self, query): + print(f"Executing query: {query}") + try: + result = self.query_api.query(query) + return result + except Exception as e: + logger.error(f"Error executing query: {e}") + return None + + def read(self, measurement, fields, tags, **kwargs): + extra_fields = kwargs.get('extra_fields') + since = kwargs.get('since', '-30d') order = kwargs.get('order') limit = kwargs.get('limit') - query = ( - f'from(bucket: "{self.bucket}")' - f' |> range(start: {since if since else "-1h"})' # Use since or default - f' |> filter(fn: (r) => r._measurement == "{key}")' - ) + + flux_query = f''' + from(bucket: "{self.bucket}") + |> range(start: {since}) + |> filter(fn: (r) => r._measurement == "{measurement}") + ''' + if fields and fields != '*': + field_filters = ' or '.join([f'r._field == "{field}"' for field in fields.split(', ')]) + flux_query += f' |> filter(fn: (r) => {field_filters})' + if tags: - tag_query = ' and '.join( - [f'r.{tag} == "{value}"' for tag, value in tags.items()] - ) - query += f' |> filter(fn: (r) => {tag_query})' - if fields: - field_query = ' or '.join([f'r._field == "{field}"' for field in fields]) - query += f' |> filter(fn: (r) => {field_query})' + tag_filters = ' and '.join([f'r["{tag}"] == "{value}"' for tag, value in tags.items()]) + flux_query += f' |> filter(fn: (r) => {tag_filters})' + + flux_query += ''' + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map(fn: (r) => ({r with _value: float(v: r._value)})) + |> keep(columns: ["_time", "_value", "_field", "content_type", "object_id"]) + |> rename(columns: {_time: "time"}) + ''' + if order: - query += f' |> sort(columns: ["_time"], desc: {order == "-time"})' + if order == 'time': + flux_query += ' |> sort(columns: ["time"], desc: false)' + elif order == '-time': + flux_query += ' |> sort(columns: ["time"], desc: true)' + else: + raise ValueError(f'Invalid order "{order}" passed.\nYou may pass "time" / "-time" to get result sorted in ascending /descending order respectively.') + if limit: - query += f' |> limit(n: {limit})' - result = self.query_api.query(org=self.org, query=query) - return [record.values for table in result for record in table.records] + flux_query += f' |> limit(n: {limit})' + + return self.query(flux_query) + def get_list_query(self, query, precision=None): + print(f"get_list_query called with query: {query}") + result = self.query(query) + result_points = [] + + if result is None: + print("Query returned None") + return result_points + + for table in result: + for record in table.records: + time = record.get_time() + if precision is not None: + # Truncate the time based on the specified precision + time = time.isoformat()[:precision] + else: + time = time.isoformat() + + values = {col: record.values.get(col) for col in record.values if col != '_time'} + values['time'] = time + values['_value'] = record.get_value() + values['_field'] = record.get_field() + result_points.append(values) + + print(f"get_list_query returned {len(result_points)} points") + print(f"Processed result points: {result_points}") + return result_points + def delete_metric_data(self, key=None, tags=None): - logger.debug( - 'InfluxDB 2.0 does not support deleting specific data points via the client.' - ) + start = "1970-01-01T00:00:00Z" + stop = "2100-01-01T00:00:00Z" + predicate = "" + if key: + predicate += f'r._measurement == "{key}"' + if tags: + tag_filters = ' and '.join([f'r["{tag}"] == "{value}"' for tag, value in tags.items()]) + if predicate: + predicate += f' and {tag_filters}' + else: + predicate = tag_filters + self.client.delete_api().delete(start, stop, predicate, bucket=self.bucket, org=self.org) def validate_query(self, query): for word in self._FORBIDDEN: @@ -137,25 +289,146 @@ def _is_aggregate(self, q): if any(['%s(' % word in q, '|%s}' % word in q, '|%s|' % word in q]): return True return False + + def _clean_params(self, params): + if params.get('end_date'): + params['end_date'] = f"stop: {params['end_date']}" + else: + params['end_date'] = '' + + for key, value in params.items(): + if isinstance(value, (list, tuple)): + params[key] = self._get_filter_query(key, value) + + return params + + def _get_filter_query(self, field, items): + if not items: + return '' + filters = [] + for item in items: + filters.append(f'r["{field}"] == "{item}"') + return f'|> filter(fn: (r) => {" or ".join(filters)})' + + # def get_query(self, chart_type, params, time, group_map, summary=False, fields=None, query=None, timezone=settings.TIME_ZONE): + bucket = self.bucket + measurement = params.get('measurement') + if not measurement or measurement == 'None': + logger.error("Invalid or missing measurement in params") + return None + + start_date = params.get('start_date') + end_date = params.get('end_date') + content_type = params.get('content_type') + object_id = params.get('object_id') + print(f"get_query called with params: {params}") + import pdb; pdb.set_trace() + def format_time(time_str): + if time_str: + try: + if isinstance(time_str, str): + # Try parsing as ISO format first + try: + dt = datetime.fromisoformat(time_str.replace('Z', '+00:00')) + except ValueError: + # If that fails, try parsing as a different format + dt = datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S') + else: + dt = time_str + return dt.strftime('%Y-%m-%dT%H:%M:%SZ') + except Exception as e: + print(f"Error parsing time: {e}") + return None + + start_date = format_time(start_date) if start_date else f'-{time}' + end_date = format_time(end_date) if end_date else 'now()' + + flux_query = f''' + from(bucket: "{bucket}") + |> range(start: {start_date}, stop: {end_date}) + |> filter(fn: (r) => r._measurement == "{measurement}") + |> filter(fn: (r) => r.content_type == "{content_type}" and r.object_id == "{object_id}") + ''' + + if not summary: + window = group_map.get(time, '1h') + flux_query += f'|> aggregateWindow(every: {window}, fn: mean, createEmpty: false)' + + flux_query += ''' + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + ''' - def get_query( - self, - chart_type, - params, - time, - group_map, - summary=False, - fields=None, - query=None, - timezone=settings.TIME_ZONE, - ): - query = self._fields(fields, query, params['field_name']) - params = self._clean_params(params) - query = query.format(**params) - query = self._group_by(query, time, chart_type, group_map, strip=summary) if summary: - query = f'{query} |> limit(n: 1)' - return query + flux_query += '|> last()' + + flux_query += '|> yield(name: "result")' + + print(f"Generated Flux query: {flux_query}") + return flux_query + # def get_query( + # self, + # chart_type, + # params, + # time_range, + # group_map, + # summary=False, + # fields=None, + # query=None, + # timezone=settings.TIME_ZONE, + # ): + # flux_query = f'from(bucket: "{self.bucket}")' + + # def format_date(date): + # if date is None: + # return None + # if isinstance(date, str): + # try: + # dt = datetime.strptime(date, "%Y-%m-%d %H:%M:%S") + # return str(int(dt.timestamp())) + # except ValueError: + # return date + # if isinstance(date, datetime): + # return str(int(date.timestamp())) + # return str(date) + + # start_date = format_date(params.get('start_date')) + # end_date = format_date(params.get('end_date')) + + # if start_date: + # flux_query += f' |> range(start: {start_date}' + # else: + # flux_query += f' |> range(start: -{time_range}' + + # if end_date: + # flux_query += f', stop: {end_date})' + # else: + # flux_query += ')' + + # if 'key' in params: + # flux_query += f' |> filter(fn: (r) => r._measurement == "{params["key"]}")' + + # if fields and fields != '*': + # field_filters = ' or '.join([f'r._field == "{field.strip()}"' for field in fields.split(',')]) + # flux_query += f' |> filter(fn: (r) => {field_filters})' + + # if 'content_type' in params and 'object_id' in params: + # flux_query += f' |> filter(fn: (r) => r.content_type == "{params["content_type"]}" and r.object_id == "{params["object_id"]}")' + + # window_period = group_map.get(time_range, '1h') + # if chart_type in ['line', 'stackedbar']: + # flux_query += f' |> aggregateWindow(every: {window_period}, fn: mean, createEmpty: false)' + + # flux_query += ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + + # if summary: + # flux_query += ' |> last()' + + # flux_query = f'import "timezone"\n\noption location = timezone.location(name: "{timezone}")\n\n{flux_query}' + + # flux_query += ' |> yield(name: "result")' + + # print(f"Generated Flux query: {flux_query}") # Debug print + # return flux_query def _fields(self, fields, query, field_name): matches = re.search(self._fields_regex, query) @@ -167,8 +440,8 @@ def _fields(self, fields, query, field_name): fields = [field_name] if fields and matches: groups = matches.groupdict() - function = groups['func'] # required - operation = groups.get('op') # optional + function = groups['func'] + operation = groups.get('op') fields = [self.__transform_field(f, function, operation) for f in fields] fields_key = groups.get('group') else: @@ -179,43 +452,28 @@ def _fields(self, fields, query, field_name): def __transform_field(self, field, function, operation=None): if operation: - operation = f' {operation}' + operation = f' |> {operation}' else: operation = '' - return f'{function}("{field}"){operation} AS {field.replace("-", "_")}' + return f'{function}(r.{field}){operation} |> rename(columns: {{_{field}: "{field}"}})' - def _group_by(self, query, time, chart_type, group_map, strip=False): - if not self.validate_query(query): - return query - if not strip and not chart_type == 'histogram': - value = group_map[time] - group_by = ( - f'|> aggregateWindow(every: {value}, fn: mean, createEmpty: false)' - ) - else: - group_by = '' - if 'aggregateWindow' not in query: - query = f'{query} {group_by}' - return query - - -# Example usage -if __name__ == "__main__": - bucket = "mybucket" - org = "myorg" - token = "t8Q3Y5mTWuqqTRdGyVxZuyVLO-8pl3I8KaNTR3jV7uTDr_GVECP5Z7LsrZwILGw79Xp4O8pAWkdqTREgIk073Q==" - url = "http://localhost:9086" - - client = DatabaseClient(bucket=bucket, org=org, token=token, url=url) - client.create_database() - - # Write example - client.write( - "example_measurement", 99.5, tags={"object_id": "server_01"}, field="uptime" - ) - - # Read example - result = client.read( - "example_measurement", ["uptime"], tags={"object_id": "server_01"} - ) - print(result) + def _get_top_fields(self, query, params, chart_type, group_map, number, time, timezone=settings.TIME_ZONE): + q = self.get_query(query=query, params=params, chart_type=chart_type, group_map=group_map, summary=True, fields=['SUM(*)'], time=time, timezone=timezone) + flux_query = f''' + {q} + |> aggregateWindow(every: {time}, fn: sum, createEmpty: false) + |> group(columns: ["_field"]) + |> sum() + |> sort(columns: ["_value"], desc: true) + |> limit(n: {number}) + |> map(fn: (r) => ({{ r with _field: r._field }})) + ''' + result = list(self.query_api.query(flux_query)) + top_fields = [record["_field"] for table in result for record in table.records] + return top_fields + + def close(self): + self.client.close() + +#todo +# bucket_api.find_bucket_by_name("openwisp") diff --git a/openwisp_monitoring/db/backends/influxdb2/queries.py b/openwisp_monitoring/db/backends/influxdb2/queries.py index a41a0524b..057ec6e34 100644 --- a/openwisp_monitoring/db/backends/influxdb2/queries.py +++ b/openwisp_monitoring/db/backends/influxdb2/queries.py @@ -1,266 +1,564 @@ -import logging +# chart_query = { +# 'uptime': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "{field_name}")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> mean()' +# ' |> map(fn: (r) => ({ r with _value: r._value * 100.0 }))' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "uptime")' +# ) +# }, +# 'packet_loss': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "loss")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "packet_loss")' +# ) +# }, +# 'rtt': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "rtt_avg" or r._field == "rtt_max" or r._field == "rtt_min")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> yield(name: "rtt")' +# ) +# }, +# 'wifi_clients': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "{field_name}")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> filter(fn: (r) => r.ifname == "{ifname}")' +# ' |> group()' +# ' |> distinct()' +# ' |> count()' +# ' |> set(key: "_field", value: "wifi_clients")' +# ' |> aggregateWindow(every: 1d, fn: max)' +# ) +# }, +# 'general_wifi_clients': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "{field_name}")' +# ' |> filter(fn: (r) => r.organization_id == "{organization_id}")' +# ' |> filter(fn: (r) => r.location_id == "{location_id}")' +# ' |> filter(fn: (r) => r.floorplan_id == "{floorplan_id}")' +# ' |> group()' +# ' |> distinct()' +# ' |> count()' +# ' |> set(key: "_field", value: "wifi_clients")' +# ' |> aggregateWindow(every: 1d, fn: max)' +# ) +# }, +# 'traffic': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "tx_bytes" or r._field == "rx_bytes")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> filter(fn: (r) => r.ifname == "{ifname}")' +# ' |> sum()' +# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' +# ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' +# ' |> yield(name: "traffic")' +# ) +# }, +# 'general_traffic': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "tx_bytes" or r._field == "rx_bytes")' +# ' |> filter(fn: (r) => r.organization_id == "{organization_id}")' +# ' |> filter(fn: (r) => r.location_id == "{location_id}")' +# ' |> filter(fn: (r) => r.floorplan_id == "{floorplan_id}")' +# ' |> filter(fn: (r) => r.ifname == "{ifname}")' +# ' |> sum()' +# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' +# ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' +# ' |> yield(name: "general_traffic")' +# ) +# }, +# 'memory': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "percent_used")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "memory_usage")' +# ) +# }, +# 'cpu': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "cpu_usage")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "CPU_load")' +# ) +# }, +# 'disk': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "used_disk")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "disk_usage")' +# ) +# }, +# 'signal_strength': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "signal_strength" or r._field == "signal_power")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> yield(name: "signal_strength")' +# ) +# }, +# 'signal_quality': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "signal_quality" or r._field == "snr")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> yield(name: "signal_quality")' +# ) +# }, +# 'access_tech': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "access_tech")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: (column) => mode(column: "_value"), createEmpty: false)' +# ' |> yield(name: "access_tech")' +# ) +# }, +# 'bandwidth': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "sent_bps_tcp" or r._field == "sent_bps_udp")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> rename(columns: {sent_bps_tcp: "TCP", sent_bps_udp: "UDP"})' +# ' |> yield(name: "bandwidth")' +# ) +# }, +# 'transfer': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "sent_bytes_tcp" or r._field == "sent_bytes_udp")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> sum()' +# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> rename(columns: {sent_bytes_tcp: "TCP", sent_bytes_udp: "UDP"})' +# ' |> yield(name: "transfer")' +# ) +# }, +# 'retransmits': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "retransmits")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "retransmits")' +# ) +# }, +# 'jitter': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "jitter")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "jitter")' +# ) +# }, +# 'datagram': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "lost_packets" or r._field == "total_packets")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' +# ' |> rename(columns: {lost_packets: "lost_datagram", total_packets: "total_datagram"})' +# ' |> yield(name: "datagram")' +# ) +# }, +# 'datagram_loss': { +# 'flux': ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "lost_percent")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' +# ' |> yield(name: "datagram_loss")' +# ) +# } +# } -logger = logging.getLogger(__name__) +# default_chart_query = ( +# 'from(bucket: "mybucket")' +# ' |> range(start: {time}, stop: {end_date})' +# ' |> filter(fn: (r) => r._measurement == "{key}")' +# ' |> filter(fn: (r) => r._field == "{field_name}")' +# ' |> filter(fn: (r) => r.content_type == "{content_type}")' +# ' |> filter(fn: (r) => r.object_id == "{object_id}")' +# ) + +# device_data_query = ( +# 'from(bucket: "mybucket")' +# ' |> range(start: -30d)' +# ' |> filter(fn: (r) => r._measurement == "{0}")' +# ' |> filter(fn: (r) => r.pk == "{1}")' +# ' |> last()' +# ) chart_query = { 'uptime': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" and r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ r with _value: r._value * 100 })) - |> rename(columns: {_value: "uptime"}) - - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> mean()' + ' |> map(fn: (r) => ({ r with _value: r._value * 100.0 }))' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "uptime")' + ) }, 'packet_loss': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" and r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> rename(columns: {_value: "packet_loss"}) - - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "loss")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "packet_loss")' + ) }, 'rtt': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - RTT_average: r.rtt_avg, - RTT_max: r.rtt_max, - RTT_min: r.rtt_min - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "rtt_avg" or r._field == "rtt_max" or r._field == "rtt_min")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> yield(name: "rtt")' + ) }, 'wifi_clients': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && - r.object_id == "{object_id}" && r.ifname == "{ifname}") - |> group(columns: ["{field_name}"]) - |> count(column: "{field_name}") - |> map(fn: (r) => ({ r with wifi_clients: r._value })) - |> group() // Ungroup to summarize across the selected range - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> filter(fn: (r) => r.ifname == "{ifname}")' + ' |> group()' + ' |> distinct()' + ' |> count()' + ' |> set(key: "_field", value: "wifi_clients")' + ' |> aggregateWindow(every: 1d, fn: max)' + ) }, 'general_wifi_clients': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r.organization_id == "{organization_id}" && - r.location_id == "{location_id}" && r.floorplan_id == "{floorplan_id}") - |> group(columns: ["{field_name}"]) - |> count(column: "{field_name}") - |> map(fn: (r) => ({ r with wifi_clients: r._value })) - |> group() // Ungroup to summarize across the selected range - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.organization_id == "{organization_id}")' + ' |> filter(fn: (r) => r.location_id == "{location_id}")' + ' |> filter(fn: (r) => r.floorplan_id == "{floorplan_id}")' + ' |> group()' + ' |> distinct()' + ' |> count()' + ' |> set(key: "_field", value: "wifi_clients")' + ' |> aggregateWindow(every: 1d, fn: max)' + ) }, 'traffic': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && - r.object_id == "{object_id}" && r.ifname == "{ifname}") - |> aggregateWindow(every: 1d, fn: sum, createEmpty: false) - |> map(fn: (r) => ({ - upload: r.tx_bytes / 1000000000, - download: r.rx_bytes / 1000000000 - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "tx_bytes" or r._field == "rx_bytes")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> filter(fn: (r) => r.ifname == "{ifname}")' + ' |> sum()' + ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' + ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' + ' |> yield(name: "traffic")' + ) }, 'general_traffic': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r.organization_id == "{organization_id}" && - r.location_id == "{location_id}" && - r.floorplan_id == "{floorplan_id}" && r.ifname == "{ifname}") - |> aggregateWindow(every: 1d, fn: sum, createEmpty: false) - |> map(fn: (r) => ({ - upload: r.tx_bytes / 1000000000, - download: r.rx_bytes / 1000000000 - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "tx_bytes" or r._field == "rx_bytes")' + ' |> filter(fn: (r) => r.organization_id == "{organization_id}")' + ' |> filter(fn: (r) => r.location_id == "{location_id}")' + ' |> filter(fn: (r) => r.floorplan_id == "{floorplan_id}")' + ' |> filter(fn: (r) => r.ifname == "{ifname}")' + ' |> sum()' + ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' + ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' + ' |> yield(name: "general_traffic")' + ) }, 'memory': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - memory_usage: r.percent_used - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "percent_used")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "memory_usage")' + ) }, 'cpu': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - CPU_load: r.cpu_usage - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "cpu_usage")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "CPU_load")' + ) }, 'disk': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - disk_usage: r.used_disk - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "used_disk")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "disk_usage")' + ) }, 'signal_strength': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - signal_strength: math.round(r.signal_strength), - signal_power: math.round(r.signal_power) - })) - - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "signal_strength" or r._field == "signal_power")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> yield(name: "signal_strength")' + ) }, 'signal_quality': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - signal_quality: math.round(r.signal_quality), - signal_to_noise_ratio: math.round(r.snr) - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "signal_quality" or r._field == "snr")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> yield(name: "signal_quality")' + ) }, 'access_tech': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mode, createEmpty: false) - |> map(fn: (r) => ({ - access_tech: r.access_tech - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "access_tech")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: (column) => mode(column: "_value"), createEmpty: false)' + ' |> yield(name: "access_tech")' + ) }, 'bandwidth': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - TCP: r.sent_bps_tcp / 1000000000, - UDP: r.sent_bps_udp / 1000000000 - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "sent_bps_tcp" or r._field == "sent_bps_udp")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {sent_bps_tcp: "TCP", sent_bps_udp: "UDP"})' + ' |> yield(name: "bandwidth")' + ) }, 'transfer': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: sum, createEmpty: false) - |> map(fn: (r) => ({ - TCP: r.sent_bytes_tcp / 1000000000, - UDP: r.sent_bytes_udp / 1000000000 - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "sent_bytes_tcp" or r._field == "sent_bytes_udp")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> sum()' + ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {sent_bytes_tcp: "TCP", sent_bytes_udp: "UDP"})' + ' |> yield(name: "transfer")' + ) }, 'retransmits': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - retransmits: r.retransmits - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "retransmits")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "retransmits")' + ) }, 'jitter': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - jitter: r.jitter - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "jitter")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "jitter")' + ) }, 'datagram': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - lost_datagram: r.lost_packets, - total_datagram: r.total_packets - })) - ''' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "lost_packets" or r._field == "total_packets")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {lost_packets: "lost_datagram", total_packets: "total_datagram"})' + ' |> yield(name: "datagram")' + ) }, 'datagram_loss': { - 'influxdb2': ''' - from(bucket: "{key}") - |> range(start: {time}) - |> filter(fn: (r) => r._measurement == "{content_type}" && r.object_id == "{object_id}") - |> aggregateWindow(every: 1d, fn: mean, createEmpty: false) - |> map(fn: (r) => ({ - datagram_loss: r.lost_percent - })) - ''' - }, -} - -default_chart_query = ''' - from(bucket: "{key}") - |> range(start: {time}{end_date}) - |> filter(fn: (r) => - r._measurement == "{content_type}" && - r.object_id == "{object_id}" - ) - |> keep(columns: ["{field_name}"]) -''' - -device_data_query = ''' - from(bucket: "{key}") - |> range(start: -inf) - |> filter(fn: (r) => - r._measurement == "{content_type}" && - r.pk == "{pk}" + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "lost_percent")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' + ' |> yield(name: "datagram_loss")' ) - |> last() -''' - - -def get_chart_query(chart_type, **params): - """Fetches and formats a specific chart query based on the chart type and provided parameters.""" - try: - query = chart_query[chart_type].format(**params) - except KeyError: - logger.warning( - f"No specific query found for chart type '{chart_type}'. Using default query." - ) - query = default_chart_query.format(**params) - return query + } +} +default_chart_query = ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' +) -def get_device_data_query(**params): - """Formats the device data query based on provided parameters.""" - return device_data_query.format(**params) +device_data_query = ( + 'from(bucket: "{bucket}")' + ' |> range(start: -30d)' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> last()' + ' |> yield(name: "last")' +) diff --git a/openwisp_monitoring/db/backends/influxdb2/tests.py b/openwisp_monitoring/db/backends/influxdb2/tests.py index e69de29bb..5283bda83 100644 --- a/openwisp_monitoring/db/backends/influxdb2/tests.py +++ b/openwisp_monitoring/db/backends/influxdb2/tests.py @@ -0,0 +1,261 @@ +import unittest +from unittest.mock import patch, MagicMock +from datetime import datetime, timedelta +from django.utils.timezone import now +from django.core.exceptions import ValidationError +from freezegun import freeze_time +from influxdb_client.client.write_api import SYNCHRONOUS +from influxdb_client.rest import ApiException +from openwisp_monitoring.db.backends.influxdb2.client import DatabaseClient +from openwisp_monitoring.monitoring.tests import TestMonitoringMixin +from openwisp_monitoring.device.settings import DEFAULT_RETENTION_POLICY, SHORT_RETENTION_POLICY +from openwisp_monitoring.device.utils import DEFAULT_RP, SHORT_RP +from openwisp_monitoring.views import Chart + +from ...exceptions import TimeseriesWriteException +from django.conf import settings + +class TestDatabaseClient(TestMonitoringMixin, unittest.TestCase): + def setUp(self): + self.client = DatabaseClient(bucket="mybucket", org="myorg", token="dltiEmsmMKU__9SoBE0ingFdMTS3UksrESwIQDNtW_3WOgn8bQGdyYzPcx_aDtvZkqvR8RbMkwVVlzUJxpm62w==", url="http://localhost:8086") + + def test_forbidden_queries(self): + queries = [ + 'DROP DATABASE openwisp2', + 'DROP MEASUREMENT test_metric', + 'CREATE DATABASE test', + 'DELETE MEASUREMENT test_metric', + 'ALTER RETENTION POLICY policy', + 'SELECT * INTO metric2 FROM test_metric', + ] + for q in queries: + with self.assertRaises(ValidationError): + self.client.validate_query(q) + + @patch('influxdb_client.InfluxDBClient') + def test_write(self, mock_influxdb_client): + mock_write_api = MagicMock() + mock_influxdb_client.return_value.write_api.return_value = mock_write_api + + self.client.write('test_write', {'value': 2}) + + mock_write_api.write.assert_called_once() + call_args = mock_write_api.write.call_args[1] + self.assertEqual(call_args['bucket'], 'mybucket') + self.assertEqual(call_args['org'], 'myorg') + self.assertIn('record', call_args) + self.assertEqual(call_args['record']['measurement'], 'ping') + self.assertEqual(call_args['record']['fields'], {'value': 2}) + + @patch('influxdb_client.InfluxDBClient') + def test_read(self, mock_influxdb_client): + mock_query_api = MagicMock() + mock_influxdb_client.return_value.query_api.return_value = mock_query_api + + self.client.read('ping', 'field1, field2', {'tag1': 'value1'}) + + mock_query_api.query.assert_called_once() + query = mock_query_api.query.call_args[0][0] + self.assertIn('from(bucket: "mybucket")', query) + self.assertIn('|> filter(fn: (r) => r._measurement == "ping")', query) + self.assertIn('|> filter(fn: (r) => r._field == "field1" or r._field == "field2")', query) + self.assertIn('|> filter(fn: (r) => r["tag1"] == "value1")', query) + + def test_validate_query(self): + valid_query = 'from(bucket:"mybucket") |> range(start: -1h) |> filter(fn: (r) => r._measurement == "cpu")' + self.assertTrue(self.client.validate_query(valid_query)) + + invalid_query = 'DROP DATABASE test' + with self.assertRaises(ValidationError): + self.client.validate_query(invalid_query) + + def test_get_query_with_pdb(self): + # Create a metric + metric = self._create_object_metric( + name='Ping', + key='ping', + field_name='rtt_avg', + content_type='config.device', + ) + chart = self._create_chart( + metric=metric, + configuration='line', + test_data=False + ) + + time = '30d' + group_map = Chart._get_group_map(time) + query = chart.get_query( + time=time, + summary=False, + fields=['loss', 'reachable', 'rtt_avg'], + timezone='UTC' + ) + self.assertIsNotNone(query) + self.assertIn('from(bucket: "mybucket")', query) + self.assertIn('range(start: -30d', query) + self.assertIn('filter(fn: (r) => r._measurement == "ping")', query) + + @patch('influxdb_client.InfluxDBClient') + def test_create_database(self, mock_influxdb_client): + mock_bucket_api = MagicMock() + mock_influxdb_client.return_value.buckets_api.return_value = mock_bucket_api + + self.client.create_database() + mock_bucket_api.find_bucket_by_name.assert_called_once_with('mybucket') + mock_bucket_api.create_bucket.assert_called_once() + + @patch('influxdb_client.InfluxDBClient') + def test_drop_database(self, mock_influxdb_client): + mock_bucket_api = MagicMock() + mock_influxdb_client.return_value.buckets_api.return_value = mock_bucket_api + + self.client.drop_database() + + mock_bucket_api.find_bucket_by_name.assert_called_once_with('mybucket') + mock_bucket_api.delete_bucket.assert_called_once() + + @patch('influxdb_client.InfluxDBClient') + def test_query(self, mock_influxdb_client): + mock_query_api = MagicMock() + mock_influxdb_client.return_value.query_api.return_value = mock_query_api + + test_query = 'from(bucket:"mybucket") |> range(start: -1h) |> filter(fn: (r) => r._measurement == "cpu")' + self.client.query(test_query) + + mock_query_api.query.assert_called_once_with(test_query) + + def test_get_timestamp(self): + timestamp = datetime(2023, 1, 1, 12, 0, 0) + result = self.client._get_timestamp(timestamp) + self.assertEqual(result, '2023-01-01T12:00:00.000000') + + @patch('influxdb_client.InfluxDBClient') + def test_write_exception(self, mock_influxdb_client): + mock_write_api = MagicMock() + mock_write_api.write.side_effect = ApiException(status=500, reason="Server Error") + mock_influxdb_client.return_value.write_api.return_value = mock_write_api + + with self.assertRaises(Exception): + self.client.write('ping', {'value': 2}) + + def test_get_custom_query(self): + c = self._create_chart(test_data=None) + custom_q = c._default_query.replace('{field_name}', '{fields}') + q = c.get_query(query=custom_q, fields=['SUM(*)']) + self.assertIn('SELECT SUM(*) FROM', q) + + def test_is_aggregate_bug(self): + m = self._create_object_metric(name='summary_avg') + c = self._create_chart(metric=m, configuration='dummy') + self.assertFalse(self.client._is_aggregate(c.query)) + + def test_is_aggregate_fields_function(self): + m = self._create_object_metric(name='is_aggregate_func') + c = self._create_chart(metric=m, configuration='uptime') + self.assertTrue(self.client._is_aggregate(c.query)) + + def test_get_query_fields_function(self): + c = self._create_chart(test_data=None, configuration='histogram') + q = c.get_query(fields=['ssh', 'http2', 'apple-music']) + expected = ( + 'SELECT SUM("ssh") / 1 AS ssh, ' + 'SUM("http2") / 1 AS http2, ' + 'SUM("apple-music") / 1 AS apple_music FROM' + ) + self.assertIn(expected, q) + + @patch('influxdb_client.InfluxDBClient') + def test_general_write(self, mock_influxdb_client): + mock_write_api = MagicMock() + mock_influxdb_client.return_value.write_api.return_value = mock_write_api + + m = self._create_general_metric(name='Sync test') + m.write(1) + + mock_write_api.write.assert_called_once() + call_args = mock_write_api.write.call_args[1] + self.assertEqual(call_args['record']['measurement'], 'sync_test') + self.assertEqual(call_args['record']['fields']['value'], 1) + + @patch('influxdb_client.InfluxDBClient') + def test_object_write(self, mock_influxdb_client): + mock_write_api = MagicMock() + mock_influxdb_client.return_value.write_api.return_value = mock_write_api + + om = self._create_object_metric() + om.write(3) + + mock_write_api.write.assert_called_once() + call_args = mock_write_api.write.call_args[1] + self.assertEqual(call_args['record']['measurement'], 'ping') + self.assertEqual(call_args['record']['fields']['value'], 3) + self.assertEqual(call_args['record']['tags']['object_id'], str(om.object_id)) + self.assertEqual(call_args['record']['tags']['content_type'], '.'.join(om.content_type.natural_key())) + + @patch('influxdb_client.InfluxDBClient') + def test_delete_metric_data(self, mock_influxdb_client): + mock_delete_api = MagicMock() + mock_influxdb_client.return_value.delete_api.return_value = mock_delete_api + + self.client.delete_metric_data(key='ping') + + mock_delete_api.delete.assert_called_once() + call_args = mock_delete_api.delete.call_args[1] + self.assertIn('_measurement="ping"', call_args['predicate']) + + def test_get_query_1d(self): + c = self._create_chart(test_data=None, configuration='uptime') + q = c.get_query(time='1d') + last24 = now() - timedelta(days=1) + self.assertIn(str(last24)[0:14], q) + self.assertIn('aggregateWindow(every: 10m', q) + + def test_get_query_30d(self): + c = self._create_chart(test_data=None, configuration='uptime') + q = c.get_query(time='30d') + last30d = now() - timedelta(days=30) + self.assertIn(str(last30d)[0:10], q) + self.assertIn('aggregateWindow(every: 24h', q) + + @patch('influxdb_client.InfluxDBClient') + @freeze_time("2023-01-01") + def test_read_order(self, mock_influxdb_client): + mock_query_api = MagicMock() + mock_influxdb_client.return_value.query_api.return_value = mock_query_api + + m = self._create_general_metric(name='dummy') + m.write(30) + m.write(40, time=now() - timedelta(days=2)) + + # Test ascending read order + m.read(limit=2, order='time') + query = mock_query_api.query.call_args[0][0] + self.assertIn('|> sort(columns: ["_time"], desc: false)', query) + + # Test descending read order + m.read(limit=2, order='-time') + query = mock_query_api.query.call_args[0][0] + self.assertIn('|> sort(columns: ["_time"], desc: true)', query) + + # Test invalid read order + with self.assertRaises(ValueError): + m.read(limit=2, order='invalid') + + @patch('influxdb_client.InfluxDBClient') + def ping_write_microseconds_precision(self, mock_influxdb_client): + mock_write_api = MagicMock() + mock_influxdb_client.return_value.write_api.return_value = mock_write_api + + m = self._create_object_metric(name='wlan0', key='wlan0', configuration='clients') + m.write('00:14:5c:00:00:00', time=datetime(2020, 7, 31, 22, 5, 47, 235142)) + m.write('00:23:4a:00:00:00', time=datetime(2020, 7, 31, 22, 5, 47, 235152)) + + self.assertEqual(mock_write_api.write.call_count, 2) + call_args_1 = mock_write_api.write.call_args_list[0][1] + call_args_2 = mock_write_api.write.call_args_list[1][1] + self.assertEqual(call_args_1['record']['time'], '2020-07-31T22:05:47.235142') + self.assertEqual(call_args_2['record']['time'], '2020-07-31T22:05:47.235152') + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/openwisp_monitoring/db/exceptions.py b/openwisp_monitoring/db/exceptions.py index 3aef4d377..3296400a1 100644 --- a/openwisp_monitoring/db/exceptions.py +++ b/openwisp_monitoring/db/exceptions.py @@ -1,6 +1,2 @@ class TimeseriesWriteException(Exception): pass - - -class WriteError(Exception): - pass diff --git a/openwisp_monitoring/device/base/models.py b/openwisp_monitoring/device/base/models.py index 4c7803175..a9a5ced78 100644 --- a/openwisp_monitoring/device/base/models.py +++ b/openwisp_monitoring/device/base/models.py @@ -3,6 +3,7 @@ from collections import OrderedDict from datetime import datetime +from django.conf import settings import swapper from cache_memoize import cache_memoize from dateutil.relativedelta import relativedelta @@ -155,7 +156,18 @@ def data(self): """ if self.__data: return self.__data - q = device_data_query.format(SHORT_RP, self.__key, self.pk) + + if settings.TIMESERIES_DATABASE['BACKEND'] == 'openwisp_monitoring.db.backends.influxdb2': + # InfluxDB 2.x query + q = device_data_query.format( + bucket=settings.TIMESERIES_DATABASE['BUCKET'], + measurement=self.__key, + object_id=self.pk + ) + else: + # InfluxDB 1.x query (kept for backward compatibility) + q = "SELECT data FROM {0}.{1} WHERE pk = '{2}' ORDER BY time DESC LIMIT 1".format(SHORT_RP, self.__key, self.pk) + cache_key = get_device_cache_key(device=self, context='current-data') points = cache.get(cache_key) if not points: @@ -379,11 +391,11 @@ def update_status(self, value): self.full_clean() self.save() # clear device management_ip when device is offline - if self.status == 'critical' and app_settings.AUTO_CLEAR_MANAGEMENT_IP: - self.device.management_ip = None - self.device.save(update_fields=['management_ip']) + # if self.status == 'critical' and app_settings.AUTO_CLEAR_MANAGEMENT_IP: + # self.device.management_ip = None + # self.device.save(update_fields=['management_ip']) - health_status_changed.send(sender=self.__class__, instance=self, status=value) + # health_status_changed.send(sender=self.__class__, instance=self, status=value) @property def related_metrics(self): diff --git a/openwisp_monitoring/device/settings.py b/openwisp_monitoring/device/settings.py index d239e3eac..e4f54da6e 100644 --- a/openwisp_monitoring/device/settings.py +++ b/openwisp_monitoring/device/settings.py @@ -46,7 +46,7 @@ def get_health_status_labels(): DEFAULT_RETENTION_POLICY = get_settings_value('DEFAULT_RETENTION_POLICY', '26280h0m0s') CRITICAL_DEVICE_METRICS = get_critical_device_metrics() HEALTH_STATUS_LABELS = get_health_status_labels() -AUTO_CLEAR_MANAGEMENT_IP = get_settings_value('AUTO_CLEAR_MANAGEMENT_IP', True) +AUTO_CLEAR_MANAGEMENT_IP = get_settings_value('AUTO_CLEAR_MANAGEMENT_IP', False) # Triggers spontaneous recovery of device based on corresponding signals DEVICE_RECOVERY_DETECTION = get_settings_value('DEVICE_RECOVERY_DETECTION', True) MAC_VENDOR_DETECTION = get_settings_value('MAC_VENDOR_DETECTION', True) diff --git a/openwisp_monitoring/device/utils.py b/openwisp_monitoring/device/utils.py index 151b62609..ae3c6bb0e 100644 --- a/openwisp_monitoring/device/utils.py +++ b/openwisp_monitoring/device/utils.py @@ -14,7 +14,7 @@ def manage_short_retention_policy(): creates or updates the "short" retention policy """ duration = app_settings.SHORT_RETENTION_POLICY - timeseries_db.create_or_alter_retention_policy(SHORT_RP, duration) + _manage_retention_policy(SHORT_RP, duration) def manage_default_retention_policy(): @@ -22,4 +22,9 @@ def manage_default_retention_policy(): creates or updates the "default" retention policy """ duration = app_settings.DEFAULT_RETENTION_POLICY - timeseries_db.create_or_alter_retention_policy(DEFAULT_RP, duration) + _manage_retention_policy(DEFAULT_RP, duration) + +def _manage_retention_policy(name, duration): + # For InfluxDB 2.x, we're not managing retention policies directly + # Instead, we ensure the bucket exists + timeseries_db.create_bucket(timeseries_db.bucket) diff --git a/openwisp_monitoring/monitoring/base/models.py b/openwisp_monitoring/monitoring/base/models.py index 5d7bf0ebe..89c289159 100644 --- a/openwisp_monitoring/monitoring/base/models.py +++ b/openwisp_monitoring/monitoring/base/models.py @@ -421,9 +421,14 @@ def write( current=current, ) pre_metric_write.send(**signal_kwargs) - timestamp = time or timezone.now() - if isinstance(timestamp, str): - timestamp = parse_date(timestamp) + if time is None: + timestamp = timezone.now() + elif isinstance(time, str): + timestamp = parse_date(time) + else: + timestamp = time + if timezone.is_naive(timestamp): + timestamp = timezone.make_aware(timestamp) options = dict( tags=self.tags, timestamp=timestamp.isoformat(), @@ -467,6 +472,11 @@ def batch_write(cls, raw_data): for metric, kwargs in raw_data: try: write_data.append(metric.write(**kwargs, write=False)) + if kwargs.get('check', True): + check_value = kwargs['value'] + if metric.alert_on_related_field and kwargs.get('extra_values'): + check_value = kwargs['extra_values'][metric.alert_field] + metric.check_threshold(check_value, kwargs.get('time'), kwargs.get('retention_policy'), kwargs.get('send_alert', True)) except ValueError as error: error_dict[metric.key] = str(error) _timeseries_batch_write(write_data) @@ -476,7 +486,7 @@ def batch_write(cls, raw_data): def read(self, **kwargs): """reads timeseries data""" return timeseries_db.read( - key=self.key, fields=self.field_name, tags=self.tags, **kwargs + measurement=self.key, fields=self.field_name, tags=self.tags, **kwargs ) def _notify_users(self, notification_type, alert_settings): @@ -656,6 +666,16 @@ def _get_group_map(cls, time=None): group = '7d' custom_group_map.update({time: group}) return custom_group_map + + def _format_date(self, date_str): + if date_str is None or date_str == 'now()': + return date_str + try: + date = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') + return date.strftime('%Y-%m-%dT%H:%M:%SZ') + except ValueError: + # If the date_str is not in the expected format, return it as is + return date_str def get_query( self, @@ -675,8 +695,13 @@ def get_query( params = self._get_query_params(time, start_date, end_date) params.update(additional_params) params.update({'start_date': start_date, 'end_date': end_date}) + params.update({ + 'start_date': self._format_date(start_date) if start_date else None, + 'end_date': self._format_date(end_date) if end_date else None + }) if not params.get('organization_id') and self.config_dict.get('__all__', False): params['organization_id'] = ['__all__'] + params['measurement'] = params.get('measurement') or params.get('key') return timeseries_db.get_query( self.type, params, @@ -707,6 +732,7 @@ def get_top_fields(self, number): def _get_query_params(self, time, start_date=None, end_date=None): m = self.metric params = dict( + measurement=m.key, field_name=m.field_name, key=m.key, time=self._get_time(time, start_date, end_date), @@ -754,8 +780,7 @@ def read( ): additional_query_kwargs = additional_query_kwargs or {} traces = {} - if x_axys: - x = [] + x = [] try: query_kwargs = dict( time=time, timezone=timezone, start_date=start_date, end_date=end_date @@ -771,37 +796,44 @@ def read( data_query = self.get_query(**query_kwargs) summary_query = self.get_query(summary=True, **query_kwargs) points = timeseries_db.get_list_query(data_query) + logging.debug(f"Data points: {points}") + logging.debug(f"Data query: {data_query}") summary = timeseries_db.get_list_query(summary_query) + logging.debug(f"Summary query: {summary_query}") except timeseries_db.client_error as e: logging.error(e, exc_info=True) raise e for point in points: + time_value = point.get('time') or point.get('_time') + if not time_value: + logging.warning(f"Point missing time value: {point}") + continue for key, value in point.items(): - if key == 'time': + if key in ['time', '_time']: continue traces.setdefault(key, []) if decimal_places and isinstance(value, (int, float)): value = self._round(value, decimal_places) traces[key].append(value) - time = datetime.fromtimestamp(point['time'], tz=tz(timezone)).strftime( - '%Y-%m-%d %H:%M' - ) - if x_axys: - x.append(time) + if isinstance(time_value, str): + time = datetime.fromisoformat(time_value.rstrip('Z')).replace(tzinfo=utc).astimezone(tz(timezone)) + else: + time = datetime.fromtimestamp(time_value, tz=tz(timezone)) + formatted_time = time.strftime('%Y-%m-%d %H:%M') + x.append(formatted_time) # prepare result to be returned # (transform chart data so its order is not random) result = {'traces': sorted(traces.items())} - if x_axys: - result['x'] = x + result['x'] = x # add summary if len(summary) > 0: result['summary'] = {} for key, value in summary[0].items(): - if key == 'time': + if key in ['time', '_time']: continue if not timeseries_db.validate_query(self.query): value = None - elif value: + elif value is not None: value = self._round(value, decimal_places) result['summary'][key] = value return result diff --git a/openwisp_monitoring/monitoring/migrations/__init__.py b/openwisp_monitoring/monitoring/migrations/__init__.py index 58c517a90..747840018 100644 --- a/openwisp_monitoring/monitoring/migrations/__init__.py +++ b/openwisp_monitoring/monitoring/migrations/__init__.py @@ -1,7 +1,10 @@ +from asyncio.log import logger + import swapper from django.contrib.auth.models import Permission from openwisp_controller.migrations import create_default_permissions, get_swapped_model +from django.db import transaction def assign_permissions_to_groups(apps, schema_editor): @@ -72,30 +75,42 @@ def create_general_metrics(apps, schema_editor): Chart = swapper.load_model('monitoring', 'Chart') Metric = swapper.load_model('monitoring', 'Metric') - metric, created = Metric._get_or_create( - configuration='general_clients', - name='General Clients', - key='wifi_clients', - object_id=None, - content_type_id=None, - ) - if created: - chart = Chart(metric=metric, configuration='gen_wifi_clients') - chart.full_clean() - chart.save() - - metric, created = Metric._get_or_create( - configuration='general_traffic', - name='General Traffic', - key='traffic', - object_id=None, - content_type_id=None, - ) - if created: - chart = Chart(metric=metric, configuration='general_traffic') - chart.full_clean() - chart.save() + # Temporarily disable the validation rules for the Chart model + original_full_clean = Chart.full_clean + + def disabled_full_clean(self): + pass + Chart.full_clean = disabled_full_clean + + try: + with transaction.atomic(): + metric, created = Metric._get_or_create( + configuration='general_clients', + name='General Clients', + key='wifi_clients', + object_id=None, + content_type_id=None, + ) + if created: + chart = Chart(metric=metric, configuration='gen_wifi_clients') + logger.debug(f'Creating chart with configuration: {chart.configuration}') + chart.save() + + metric, created = Metric._get_or_create( + configuration='general_traffic', + name='General Traffic', + key='traffic', + object_id=None, + content_type_id=None, + ) + if created: + chart = Chart(metric=metric, configuration='general_traffic') + logger.debug(f'Creating chart with configuration: {chart.configuration}') + chart.save() + finally: + # Restore the original full_clean method + Chart.full_clean = original_full_clean def delete_general_metrics(apps, schema_editor): Metric = apps.get_model('monitoring', 'Metric') diff --git a/openwisp_monitoring/monitoring/migrations/influxdb/__ini__.py b/openwisp_monitoring/monitoring/migrations/influxdb/__init__.py similarity index 100% rename from openwisp_monitoring/monitoring/migrations/influxdb/__ini__.py rename to openwisp_monitoring/monitoring/migrations/influxdb/__init__.py diff --git a/openwisp_monitoring/monitoring/migrations/influxdb2/__init__.py b/openwisp_monitoring/monitoring/migrations/influxdb2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openwisp_monitoring/monitoring/migrations/influxdb2/influxdb2_alter_structure_0006.py b/openwisp_monitoring/monitoring/migrations/influxdb2/influxdb2_alter_structure_0006.py new file mode 100644 index 000000000..10338cf97 --- /dev/null +++ b/openwisp_monitoring/monitoring/migrations/influxdb2/influxdb2_alter_structure_0006.py @@ -0,0 +1,112 @@ +# openwisp_monitoring/monitoring/migrations/influxdb2/influxdb2_alter_structure_0006.py +import logging +from datetime import datetime, timedelta + +from influxdb_client import InfluxDBClient +from influxdb_client.client.write_api import SYNCHRONOUS +from swapper import load_model + +from openwisp_monitoring.db.backends.influxdb2.client import DatabaseClient +from openwisp_monitoring.db.exceptions import TimeseriesWriteException + +SELECT_QUERY_LIMIT = 1000 +WRITE_BATCH_SIZE = 1000 +CHUNK_SIZE = 1000 +EXCLUDED_MEASUREMENTS = [ + 'ping', + 'config_applied', + 'clients', + 'disk', + 'memory', + 'cpu', + 'signal_strength', + 'signal_quality', + 'access_tech', + 'device_data', + 'traffic', + 'wifi_clients', +] + + +logger = logging.getLogger(__name__) + + +def get_influxdb_client(): + db_config = { + 'bucket': 'mybucket', + 'org': 'myorg', + 'token': 'dltiEmsmMKU__9SoBE0ingFdMTS3UksrESwIQDNtW_3WOgn8bQGdyYzPcx_aDtvZkqvR8RbMkwVVlzUJxpm62w==', + 'url': 'http://localhost:8086', + } + return DatabaseClient(**db_config) + + +def requires_migration(): + client = get_influxdb_client() + query_api = client.client.query_api() + query = f'from(bucket: "{client.bucket}") |> range(start: -1h)' + tsdb_measurements = query_api.query(org=client.org, query=query) + for table in tsdb_measurements: + for record in table.records: + if record.get_measurement() not in EXCLUDED_MEASUREMENTS: + return True + return False + + +def migrate_influxdb_structure(): + if not requires_migration(): + logger.info( + 'Timeseries data migration is already migrated. Skipping migration!' + ) + return + + # Implement your data migration logic here + logger.info('Starting migration for InfluxDB 2.0...') + migrate_wifi_clients() + migrate_traffic_data() + logger.info('Timeseries data migration completed.') + + +def migrate_influxdb_data(query_api, write_api, read_query, measurement, tags): + logger.debug(f'Executing query: {read_query}') + result = query_api.query(org='myorg', query=read_query) + points = [] + + for table in result: + for record in table.records: + point = { + 'measurement': measurement, + 'tags': tags, + 'fields': record.values, + 'time': record.get_time(), + } + points.append(point) + + write_api.write( + bucket='mybucket', org='myorg', record=points, write_options=SYNCHRONOUS + ) + logger.info(f'Migrated data for measurement: {measurement}') + + +def migrate_wifi_clients(): + client = get_influxdb_client() + query_api = client.client.query_api() + write_api = client.client.write_api(write_options=SYNCHRONOUS) + + read_query = 'from(bucket: "mybucket") |> range(start: -30d) |> filter(fn: (r) => r._measurement == "wifi_clients")' + tags = {'source': 'migration'} + + migrate_influxdb_data(query_api, write_api, read_query, 'wifi_clients', tags) + logger.info('"wifi_clients" measurements successfully migrated.') + + +def migrate_traffic_data(): + client = get_influxdb_client() + query_api = client.client.query_api() + write_api = client.client.write_api(write_options=SYNCHRONOUS) + + read_query = 'from(bucket: "mybucket") |> range(start: -30d) |> filter(fn: (r) => r._measurement == "traffic")' + tags = {'source': 'migration'} + + migrate_influxdb_data(query_api, write_api, read_query, 'traffic', tags) + logger.info('"traffic" measurements successfully migrated.') diff --git a/openwisp_monitoring/monitoring/tasks.py b/openwisp_monitoring/monitoring/tasks.py index 392cb6748..d12fac155 100644 --- a/openwisp_monitoring/monitoring/tasks.py +++ b/openwisp_monitoring/monitoring/tasks.py @@ -1,13 +1,26 @@ +from datetime import timezone +import os + from celery import shared_task +from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from swapper import load_model from openwisp_utils.tasks import OpenwispCeleryTask +from openwisp_monitoring.db.backends.influxdb.client import DatabaseClient as InfluxDB1Client +from openwisp_monitoring.db.backends.influxdb2.client import DatabaseClient as InfluxDB2Client + from ..db import timeseries_db from ..db.exceptions import TimeseriesWriteException +from .migrations.influxdb import influxdb_alter_structure_0006 as influxdb_migration +from .migrations.influxdb2 import influxdb2_alter_structure_0006 as influxdb2_migration from .settings import RETRY_OPTIONS from .signals import post_metric_write +from openwisp_monitoring.db.backends.influxdb.client import DatabaseClient as InfluxDB1Client +from openwisp_monitoring.db.backends.influxdb2.client import DatabaseClient as InfluxDB2Client +from django.utils.dateparse import parse_date + def _metric_post_write(name, values, metric, check_threshold_kwargs, **kwargs): @@ -54,18 +67,19 @@ def _timeseries_write(name, values, metric=None, check_threshold_kwargs=None, ** If the timeseries database is using UDP to write data, then write data synchronously. """ - if timeseries_db.use_udp: + if hasattr(timeseries_db, 'use_udp') and timeseries_db.use_udp: + # InfluxDB 1.x with UDP support func = timeseries_write + args = (name, values, metric, check_threshold_kwargs) + elif hasattr(timeseries_db, 'write'): + # InfluxDB 2.0 or InfluxDB 1.x without UDP support + func = timeseries_db.write(name, values, **kwargs) + _metric_post_write(name, values, metric, check_threshold_kwargs, **kwargs) else: + # Fallback to delayed write for other cases func = timeseries_write.delay metric = metric.pk if metric else None - func( - name=name, - values=values, - metric=metric, - check_threshold_kwargs=check_threshold_kwargs, - **kwargs - ) + args = (name, values, metric, check_threshold_kwargs) @shared_task( @@ -99,8 +113,18 @@ def _timeseries_batch_write(data): @shared_task(base=OpenwispCeleryTask) def delete_timeseries(key, tags): - timeseries_db.delete_series(key=key, tags=tags) - + backend = settings.TIMESERIES_DATABASE['BACKEND'] + + if backend == 'openwisp_monitoring.db.backends.influxdb': + # InfluxDB 1.x + client = InfluxDB1Client() + client.delete_series(key=key, tags=tags) + elif backend == 'openwisp_monitoring.db.backends.influxdb2': + # InfluxDB 2.x + # No need to perform any action for InfluxDB 2.x + pass + else: + raise ValueError(f"Unsupported backend: {backend}") @shared_task def migrate_timeseries_database(): @@ -111,8 +135,7 @@ def migrate_timeseries_database(): To be removed in 1.1.0 release. """ - from .migrations.influxdb.influxdb_alter_structure_0006 import ( - migrate_influxdb_structure, - ) - - migrate_influxdb_structure() + if os.environ.get('USE_INFLUXDB2', 'False') == 'True': + influxdb2_migration.migrate_influxdb_structure() + else: + influxdb_migration.migrate_influxdb_structure() diff --git a/openwisp_monitoring/monitoring/tests/__init__.py b/openwisp_monitoring/monitoring/tests/__init__.py index eb3e3243c..2c155bce8 100644 --- a/openwisp_monitoring/monitoring/tests/__init__.py +++ b/openwisp_monitoring/monitoring/tests/__init__.py @@ -1,6 +1,7 @@ import time from datetime import timedelta +from django.conf import settings from django.core.cache import cache from django.utils.timezone import now from swapper import load_model @@ -245,17 +246,47 @@ class TestMonitoringMixin(TestOrganizationMixin): - ORIGINAL_DB = TIMESERIES_DB['NAME'] - TEST_DB = f'{ORIGINAL_DB}_test' + INFLUXDB_BACKEND = TIMESERIES_DB.get('BACKEND') + TIMESERIES_DB = getattr(settings, 'TIMESERIES_DATABASE', None) + TEST_DB = f"{TIMESERIES_DB['NAME']}" if 'NAME' in TIMESERIES_DB else 'test_db' + TEST_BUCKET = f"{TIMESERIES_DB['BUCKET']}" + TEST_ORG = f"{TIMESERIES_DB['ORG']}" + TEST_TOKEN = f"{TIMESERIES_DB['TOKEN']}" + + if INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb': + # InfluxDB 1.x configuration + ORIGINAL_DB = TIMESERIES_DB['NAME'] + TEST_DB = f"{ORIGINAL_DB}" + elif INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb2': + # InfluxDB 2.x configuration + ORG_BUCKET = f"{TIMESERIES_DB['ORG']}/{TIMESERIES_DB['BUCKET']}" + ORIGINAL_DB = ORG_BUCKET + TEST_DB = f"{ORG_BUCKET}" + else: + ORIGINAL_DB = None + TEST_DB = None @classmethod def setUpClass(cls): + # import pdb; pdb.set_trace() # By default timeseries_db.db shall connect to the database # defined in settings when apps are loaded. We don't want that while testing - timeseries_db.db_name = cls.TEST_DB - del timeseries_db.db - del timeseries_db.dbs + if 'NAME' in cls.TIMESERIES_DB: + # InfluxDB 1.8 configuration + timeseries_db.db_name = cls.TEST_DB + del timeseries_db.db + del timeseries_db.dbs + else: + # InfluxDB 2.0 configuration + timeseries_db.bucket = cls.TEST_BUCKET + timeseries_db.org = cls.TEST_ORG + timeseries_db.token = cls.TEST_TOKEN + + # Create the test database or bucket timeseries_db.create_database() + + # Rest of the setup code... + super().setUpClass() for key, value in metrics.items(): register_metric(key, value) for key, value in charts.items(): diff --git a/tests/openwisp2/settings.py b/tests/openwisp2/settings.py index 4d0186252..bbe669167 100644 --- a/tests/openwisp2/settings.py +++ b/tests/openwisp2/settings.py @@ -7,7 +7,8 @@ TESTING = 'test' in sys.argv SHELL = 'shell' in sys.argv or 'shell_plus' in sys.argv BASE_DIR = os.path.dirname(os.path.abspath(__file__)) - +INFLUXDB_BACKEND = 'openwisp_monitoring.db.backends.influxdb2' +INFLUXDB_BACKEND = 'openwisp_monitoring.db.backends.influxdb' DEBUG = True ALLOWED_HOSTS = ['*'] @@ -35,11 +36,11 @@ # For InfluxDB 2.x INFLUXDB_2x_DATABASE = { 'BACKEND': 'openwisp_monitoring.db.backends.influxdb2', - 'TOKEN': 't8Q3Y5mTWuqqTRdGyVxZuyVLO-8pl3I8KaNTR3jV7uTDr_GVECP5Z7LsrZwILGw79Xp4O8pAWkdqTREgIk073Q==', + 'TOKEN': 'dltiEmsmMKU__9SoBE0ingFdMTS3UksrESwIQDNtW_3WOgn8bQGdyYzPcx_aDtvZkqvR8RbMkwVVlzUJxpm62w==', 'ORG': 'myorg', 'BUCKET': 'mybucket', 'HOST': os.getenv('INFLUXDB_HOST', 'localhost'), - 'PORT': '9086', + 'PORT': '8086', } if os.environ.get('USE_INFLUXDB2', 'False') == 'True': From 61300a60cfd2ebb94cbf50b4a7aead13960647ea Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Sat, 29 Jun 2024 17:40:21 +0530 Subject: [PATCH 5/7] [monitoring] Avoided if statements #274 Fixes #274 --- openwisp_monitoring/db/__init__.py | 4 +- openwisp_monitoring/db/backends/__init__.py | 61 +++---------- .../db/backends/influxdb/client.py | 43 ++++++++- .../db/backends/influxdb/queries.py | 9 -- .../db/backends/influxdb2/client.py | 44 +-------- openwisp_monitoring/device/base/models.py | 26 ++---- openwisp_monitoring/monitoring/base/models.py | 91 +++++++------------ .../monitoring/migrations/__init__.py | 63 +++++-------- 8 files changed, 124 insertions(+), 217 deletions(-) diff --git a/openwisp_monitoring/db/__init__.py b/openwisp_monitoring/db/__init__.py index 063d2d8f7..64510ebc5 100644 --- a/openwisp_monitoring/db/__init__.py +++ b/openwisp_monitoring/db/__init__.py @@ -1,7 +1,5 @@ from .backends import timeseries_db chart_query = timeseries_db.queries.chart_query -default_chart_query = timeseries_db.queries.default_chart_query -device_data_query = timeseries_db.queries.device_data_query -__all__ = ['timeseries_db', 'chart_query', 'default_chart_query', 'device_data_query'] +__all__ = ['timeseries_db', 'chart_query'] diff --git a/openwisp_monitoring/db/backends/__init__.py b/openwisp_monitoring/db/backends/__init__.py index be0cd843c..518c469e1 100644 --- a/openwisp_monitoring/db/backends/__init__.py +++ b/openwisp_monitoring/db/backends/__init__.py @@ -9,36 +9,19 @@ TIMESERIES_DB = getattr(settings, 'TIMESERIES_DATABASE', None) if not TIMESERIES_DB: - INFLUXDB_BACKEND = getattr(settings, 'INFLUXDB_BACKEND', 'openwisp_monitoring.db.backends.influxdb') - - if INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb': - # InfluxDB 1.x configuration - TIMESERIES_DB = { - 'BACKEND': INFLUXDB_BACKEND, - 'USER': getattr(settings, 'INFLUXDB_USER', 'openwisp'), - 'PASSWORD': getattr(settings, 'INFLUXDB_PASSWORD', 'openwisp'), - 'NAME': getattr(settings, 'INFLUXDB_DATABASE', 'openwisp2'), - 'HOST': getattr(settings, 'INFLUXDB_HOST', 'localhost'), - 'PORT': getattr(settings, 'INFLUXDB_PORT', '8086'), - } - elif INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb2': - # InfluxDB 2.x configuration - TIMESERIES_DB = { - 'BACKEND': INFLUXDB_BACKEND, - 'TOKEN': getattr(settings, 'INFLUXDB_TOKEN', 'dltiEmsmMKU__9SoBE0ingFdMTS3UksrESwIQDNtW_3WOgn8bQGdyYzPcx_aDtvZkqvR8RbMkwVVlzUJxpm62w=='), - 'ORG': getattr(settings, 'INFLUXDB_ORG', 'myorg'), - 'BUCKET': getattr(settings, 'INFLUXDB_BUCKET', 'mybucket'), - 'HOST': getattr(settings, 'INFLUXDB_HOST', 'localhost'), - 'PORT': getattr(settings, 'INFLUXDB_PORT', '8086'), - } - else: - logger.warning('Invalid INFLUXDB_BACKEND setting. Please check the documentation.') + TIMESERIES_DB = { + 'BACKEND': 'openwisp_monitoring.db.backends.influxdb', + 'USER': getattr(settings, 'INFLUXDB_USER', 'openwisp'), + 'PASSWORD': getattr(settings, 'INFLUXDB_PASSWORD', 'openwisp'), + 'NAME': getattr(settings, 'INFLUXDB_DATABASE', 'openwisp2'), + 'HOST': getattr(settings, 'INFLUXDB_HOST', 'localhost'), + 'PORT': getattr(settings, 'INFLUXDB_PORT', '8086'), + } + logger.warning( + 'The previous method to define Timeseries Database has been deprecated. Please refer to the docs:\n' + 'https://github.com/openwisp/openwisp-monitoring#setup-integrate-in-an-existing-django-project' + ) - if INFLUXDB_BACKEND == 'openwisp_monitoring.db.backends.influxdb': - logger.warning( - 'The previous method to define Timeseries Database has been deprecated. Please refer to the docs:\n' - 'https://github.com/openwisp/openwisp-monitoring#setup-integrate-in-an-existing-django-project' - ) def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): """ @@ -47,8 +30,7 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): """ try: assert 'BACKEND' in TIMESERIES_DB, 'BACKEND' - is_influxdb2 = '2' in TIMESERIES_DB['BACKEND'] - if is_influxdb2: + if 'BACKEND' in TIMESERIES_DB and '2' in TIMESERIES_DB['BACKEND']: # InfluxDB 2.x specific checks assert 'TOKEN' in TIMESERIES_DB, 'TOKEN' assert 'ORG' in TIMESERIES_DB, 'ORG' @@ -58,8 +40,6 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): assert 'USER' in TIMESERIES_DB, 'USER' assert 'PASSWORD' in TIMESERIES_DB, 'PASSWORD' assert 'NAME' in TIMESERIES_DB, 'NAME' - assert 'HOST' in TIMESERIES_DB, 'HOST' - assert 'PORT' in TIMESERIES_DB, 'PORT' if module: return import_module(f'{backend_name}.{module}') else: @@ -82,18 +62,7 @@ def load_backend_module(backend_name=TIMESERIES_DB['BACKEND'], module=None): "Try using 'openwisp_monitoring.db.backends.XXX', where XXX is one of:\n" f"{builtin_backends}" ) from e - else: - raise e -if '2' in TIMESERIES_DB['BACKEND']: - timeseries_db = load_backend_module(module='client').DatabaseClient( - bucket=TIMESERIES_DB['BUCKET'], - org=TIMESERIES_DB['ORG'], - token=TIMESERIES_DB['TOKEN'], - url=f"http://{TIMESERIES_DB['HOST']}:{TIMESERIES_DB['PORT']}", - ) - timeseries_db.queries = load_backend_module(TIMESERIES_DB['BACKEND'], module='queries') -else: - timeseries_db = load_backend_module(module='client').DatabaseClient() - timeseries_db.queries = load_backend_module(module='queries') \ No newline at end of file +timeseries_db = load_backend_module(module='client').DatabaseClient() +timeseries_db.queries = load_backend_module(module='queries') diff --git a/openwisp_monitoring/db/backends/influxdb/client.py b/openwisp_monitoring/db/backends/influxdb/client.py index 906769a00..583ce1fac 100644 --- a/openwisp_monitoring/db/backends/influxdb/client.py +++ b/openwisp_monitoring/db/backends/influxdb/client.py @@ -56,7 +56,6 @@ class DatabaseClient(object): backend_name = 'influxdb' def __init__(self, db_name=None): - self._db = None self.db_name = db_name or TIMESERIES_DB['NAME'] self.client_error = InfluxDBClientError @@ -255,7 +254,7 @@ def read(self, key, fields, tags, **kwargs): q = f'{q} LIMIT {limit}' return list(self.query(q, precision='s').get_points()) - def get_list_query(self, query, precision='s'): + def get_list_query(self, query, precision='s', **kwargs): result = self.query(query, precision=precision) if not len(result.keys()) or result.keys()[0][1] is None: return list(result.get_points()) @@ -426,6 +425,7 @@ def __transform_field(self, field, function, operation=None): def _get_top_fields( self, + default_query, query, params, chart_type, @@ -433,9 +433,15 @@ def _get_top_fields( number, time, timezone=settings.TIME_ZONE, + get_fields=True, ): + """ + Returns top fields if ``get_fields`` set to ``True`` (default) + else it returns points containing the top fields. + """ + q = default_query.replace('{field_name}', '{fields}') q = self.get_query( - query=query, + query=q, params=params, chart_type=chart_type, group_map=group_map, @@ -444,7 +450,7 @@ def _get_top_fields( time=time, timezone=timezone, ) - res = list(self.query(q, precision='s').get_points()) + res = self.get_list_query(q) if not res: return [] res = res[0] @@ -454,4 +460,31 @@ def _get_top_fields( keys = list(sorted_dict.keys()) keys.reverse() top = keys[0:number] - return [item.replace('sum_', '') for item in top] + top_fields = [item.replace('sum_', '') for item in top] + if get_fields: + return top_fields + query = self.get_query( + query=query, + params=params, + chart_type=chart_type, + group_map=group_map, + summary=True, + fields=top_fields, + time=time, + timezone=timezone, + ) + return self.get_list_query(query) + + def default_chart_query(self, tags): + q = "SELECT {field_name} FROM {key} WHERE time >= '{time}'" + if tags: + q += " AND content_type = '{content_type}' AND object_id = '{object_id}'" + return q + + def _device_data(self, key, tags, rp, **kwargs): + """ returns last snapshot of ``device_data`` """ + query = ( + f"SELECT data FROM {rp}.{key} WHERE pk = '{tags['pk']}' " + "ORDER BY time DESC LIMIT 1" + ) + return self.get_list_query(query, precision=None) diff --git a/openwisp_monitoring/db/backends/influxdb/queries.py b/openwisp_monitoring/db/backends/influxdb/queries.py index f3f64aa2e..185677a9c 100644 --- a/openwisp_monitoring/db/backends/influxdb/queries.py +++ b/openwisp_monitoring/db/backends/influxdb/queries.py @@ -144,12 +144,3 @@ ) }, } - -default_chart_query = [ - "SELECT {field_name} FROM {key} WHERE time >= '{time}' {end_date}", - " AND content_type = '{content_type}' AND object_id = '{object_id}'", -] - -device_data_query = ( - "SELECT data FROM {0}.{1} WHERE pk = '{2}' " "ORDER BY time DESC LIMIT 1" -) diff --git a/openwisp_monitoring/db/backends/influxdb2/client.py b/openwisp_monitoring/db/backends/influxdb2/client.py index 9af567a57..256c12d9b 100644 --- a/openwisp_monitoring/db/backends/influxdb2/client.py +++ b/openwisp_monitoring/db/backends/influxdb2/client.py @@ -31,7 +31,7 @@ def __init__(self, bucket=None, org=None, token=None, url=None): self.bucket = bucket or TIMESERIES_DB['BUCKET'] self.org = org or TIMESERIES_DB['ORG'] self.token = token or TIMESERIES_DB['TOKEN'] - self.url = url + self.url = url or f'http://{TIMESERIES_DB["HOST"]}:{TIMESERIES_DB["PORT"]}' self.client = InfluxDBClient(url=self.url, token=self.token, org=self.org) self.write_api = self.client.write_api(write_options=SYNCHRONOUS) self.query_api = self.client.query_api() @@ -85,63 +85,28 @@ def _get_timestamp(self, timestamp=None): def write(self, name, values, **kwargs): timestamp = self._get_timestamp(timestamp=kwargs.get('timestamp')) - try: - tags = kwargs.get('tags', {}) - if 'content_type' in kwargs: - tags['content_type'] = kwargs['content_type'] - if 'object_id' in kwargs: - tags['object_id'] = kwargs['object_id'] + try: point = { 'measurement': name, - 'tags': tags, + 'tags': kwargs.get('tags'), 'fields': values, 'time': timestamp, } - # import pdb; pdb.set_trace() - print(f"Writing point to InfluxDB: {point}") self.write_api.write(bucket=self.bucket, org=self.org, record=point) - print(f"Successfully wrote point to bucket {self.bucket}") except Exception as e: print(f"Error writing to InfluxDB: {e}") def batch_write(self, metric_data): - print(f"Batch writing to InfluxDB - Data: {metric_data}") points = [] for data in metric_data: timestamp = self._get_timestamp(timestamp=data.get('timestamp')) point = Point(data.get('name')).tag(**data.get('tags', {})).field(**data.get('values')).time(timestamp, WritePrecision.NS) - points.append(point) - + points.append(point) try: self.write_api.write(bucket=self.bucket, org=self.org, record=points) - logger.debug(f'Written batch of {len(points)} points to bucket {self.bucket}') except Exception as e: logger.error(f"Error writing batch to InfluxDB: {e}") - # def query(self, query): - # print(f"Executing query: {query}") - # try: - # tables = self.query_api.query(query) - # print(f"Query result: {tables}") - # result = [] - # for table in tables: - # for record in table.records: - # record_dict = { - # 'time': record.get_time(), - # 'measurement': record.get_measurement(), - # 'field': record.get_field(), - # 'value': record.get_value() - # } - # result.append(record_dict) - # print(f"Record: {record_dict}") - # print(f"Query result: {result}") - # if not result: - # print("Query returned no data") - # return result - # except Exception as e: - # logger.error(f"Error querying InfluxDB: {e}") - # print(f"Error querying InfluxDB: {e}") - # return [] def _format_date(self, date_str): if date_str is None or date_str == 'now()': return date_str @@ -193,7 +158,6 @@ def query(self, query): return result except Exception as e: logger.error(f"Error executing query: {e}") - return None def read(self, measurement, fields, tags, **kwargs): extra_fields = kwargs.get('extra_fields') diff --git a/openwisp_monitoring/device/base/models.py b/openwisp_monitoring/device/base/models.py index a9a5ced78..cdb9ba8e8 100644 --- a/openwisp_monitoring/device/base/models.py +++ b/openwisp_monitoring/device/base/models.py @@ -26,7 +26,7 @@ from openwisp_monitoring.device.settings import get_critical_device_metrics from openwisp_utils.base import TimeStampedEditableModel -from ...db import device_data_query, timeseries_db +from ...db import timeseries_db from ...monitoring.signals import threshold_crossed from ...monitoring.tasks import _timeseries_write from ...settings import CACHE_TIMEOUT @@ -156,22 +156,12 @@ def data(self): """ if self.__data: return self.__data - - if settings.TIMESERIES_DATABASE['BACKEND'] == 'openwisp_monitoring.db.backends.influxdb2': - # InfluxDB 2.x query - q = device_data_query.format( - bucket=settings.TIMESERIES_DATABASE['BUCKET'], - measurement=self.__key, - object_id=self.pk - ) - else: - # InfluxDB 1.x query (kept for backward compatibility) - q = "SELECT data FROM {0}.{1} WHERE pk = '{2}' ORDER BY time DESC LIMIT 1".format(SHORT_RP, self.__key, self.pk) - cache_key = get_device_cache_key(device=self, context='current-data') points = cache.get(cache_key) if not points: - points = timeseries_db.get_list_query(q, precision=None) + points = timeseries_db._device_data( + rp=SHORT_RP, tags={'pk': self.pk}, key=self.__key, fields='data' + ) if not points: return None self.data_timestamp = points[0]['time'] @@ -391,11 +381,11 @@ def update_status(self, value): self.full_clean() self.save() # clear device management_ip when device is offline - # if self.status == 'critical' and app_settings.AUTO_CLEAR_MANAGEMENT_IP: - # self.device.management_ip = None - # self.device.save(update_fields=['management_ip']) + if self.status == '' and app_settings.AUTO_CLEAR_MANAGEMENT_IP: + self.device.management_ip = None + self.device.save(update_fields=['management_ip']) - # health_status_changed.send(sender=self.__class__, instance=self, status=value) + health_status_changed.send(sender=self.__class__, instance=self, status=value) @property def related_metrics(self): diff --git a/openwisp_monitoring/monitoring/base/models.py b/openwisp_monitoring/monitoring/base/models.py index 89c289159..f3cd7141e 100644 --- a/openwisp_monitoring/monitoring/base/models.py +++ b/openwisp_monitoring/monitoring/base/models.py @@ -24,7 +24,7 @@ from openwisp_monitoring.monitoring.utils import clean_timeseries_data_key from openwisp_utils.base import TimeStampedEditableModel -from ...db import default_chart_query, timeseries_db +from ...db import timeseries_db from ...settings import CACHE_TIMEOUT, DEFAULT_CHART_TIME from ..configuration import ( CHART_CONFIGURATION_CHOICES, @@ -421,14 +421,9 @@ def write( current=current, ) pre_metric_write.send(**signal_kwargs) - if time is None: - timestamp = timezone.now() - elif isinstance(time, str): - timestamp = parse_date(time) - else: - timestamp = time - if timezone.is_naive(timestamp): - timestamp = timezone.make_aware(timestamp) + timestamp = time or timezone.now() + if isinstance(timestamp, str): + timestamp = parse_date(timestamp) options = dict( tags=self.tags, timestamp=timestamp.isoformat(), @@ -472,11 +467,6 @@ def batch_write(cls, raw_data): for metric, kwargs in raw_data: try: write_data.append(metric.write(**kwargs, write=False)) - if kwargs.get('check', True): - check_value = kwargs['value'] - if metric.alert_on_related_field and kwargs.get('extra_values'): - check_value = kwargs['extra_values'][metric.alert_field] - metric.check_threshold(check_value, kwargs.get('time'), kwargs.get('retention_policy'), kwargs.get('send_alert', True)) except ValueError as error: error_dict[metric.key] = str(error) _timeseries_batch_write(write_data) @@ -486,7 +476,7 @@ def batch_write(cls, raw_data): def read(self, **kwargs): """reads timeseries data""" return timeseries_db.read( - measurement=self.key, fields=self.field_name, tags=self.tags, **kwargs + key=self.key, fields=self.field_name, tags=self.tags, **kwargs ) def _notify_users(self, notification_type, alert_settings): @@ -627,10 +617,8 @@ def top_fields(self): @property def _default_query(self): - q = default_chart_query[0] - if self.metric.object_id: - q += default_chart_query[1] - return q + tags = True if self.metric.object_id else False + return timeseries_db.default_chart_query(tags) @classmethod def _get_group_map(cls, time=None): @@ -666,16 +654,6 @@ def _get_group_map(cls, time=None): group = '7d' custom_group_map.update({time: group}) return custom_group_map - - def _format_date(self, date_str): - if date_str is None or date_str == 'now()': - return date_str - try: - date = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') - return date.strftime('%Y-%m-%dT%H:%M:%SZ') - except ValueError: - # If the date_str is not in the expected format, return it as is - return date_str def get_query( self, @@ -695,13 +673,8 @@ def get_query( params = self._get_query_params(time, start_date, end_date) params.update(additional_params) params.update({'start_date': start_date, 'end_date': end_date}) - params.update({ - 'start_date': self._format_date(start_date) if start_date else None, - 'end_date': self._format_date(end_date) if end_date else None - }) if not params.get('organization_id') and self.config_dict.get('__all__', False): params['organization_id'] = ['__all__'] - params['measurement'] = params.get('measurement') or params.get('key') return timeseries_db.get_query( self.type, params, @@ -718,10 +691,10 @@ def get_top_fields(self, number): Returns list of top ``number`` of fields (highest sum) of a measurement in the specified time range (descending order). """ - q = self._default_query.replace('{field_name}', '{fields}') params = self._get_query_params(self.DEFAULT_TIME) return timeseries_db._get_top_fields( - query=q, + default_query=self._default_query, + query=self.get_query(), chart_type=self.type, group_map=self._get_group_map(params['days']), number=number, @@ -732,7 +705,6 @@ def get_top_fields(self, number): def _get_query_params(self, time, start_date=None, end_date=None): m = self.metric params = dict( - measurement=m.key, field_name=m.field_name, key=m.key, time=self._get_time(time, start_date, end_date), @@ -780,26 +752,31 @@ def read( ): additional_query_kwargs = additional_query_kwargs or {} traces = {} - x = [] + if x_axys: + x = [] try: query_kwargs = dict( time=time, timezone=timezone, start_date=start_date, end_date=end_date ) query_kwargs.update(additional_query_kwargs) if self.top_fields: - fields = self.get_top_fields(self.top_fields) - data_query = self.get_query(fields=fields, **query_kwargs) - summary_query = self.get_query( - fields=fields, summary=True, **query_kwargs + points = summary = timeseries_db._get_top_fields( + default_query=self._default_query, + chart_type=self.type, + group_map=self.GROUP_MAP, + number=self.top_fields, + params=self._get_query_params(self.DEFAULT_TIME), + time=time, + query=self.query, + get_fields=False, ) else: data_query = self.get_query(**query_kwargs) summary_query = self.get_query(summary=True, **query_kwargs) - points = timeseries_db.get_list_query(data_query) - logging.debug(f"Data points: {points}") - logging.debug(f"Data query: {data_query}") - summary = timeseries_db.get_list_query(summary_query) - logging.debug(f"Summary query: {summary_query}") + points = timeseries_db.get_list_query(data_query, key=self.metric.key) + summary = timeseries_db.get_list_query( + summary_query, key=self.metric.key + ) except timeseries_db.client_error as e: logging.error(e, exc_info=True) raise e @@ -809,31 +786,31 @@ def read( logging.warning(f"Point missing time value: {point}") continue for key, value in point.items(): - if key in ['time', '_time']: + if key == 'time': continue traces.setdefault(key, []) if decimal_places and isinstance(value, (int, float)): value = self._round(value, decimal_places) traces[key].append(value) - if isinstance(time_value, str): - time = datetime.fromisoformat(time_value.rstrip('Z')).replace(tzinfo=utc).astimezone(tz(timezone)) - else: - time = datetime.fromtimestamp(time_value, tz=tz(timezone)) - formatted_time = time.strftime('%Y-%m-%d %H:%M') - x.append(formatted_time) + time = datetime.fromtimestamp(point['time'], tz=tz(timezone)).strftime( + '%Y-%m-%d %H:%M' + ) + if x_axys: + x.append(time) # prepare result to be returned # (transform chart data so its order is not random) result = {'traces': sorted(traces.items())} - result['x'] = x + if x_axys: + result['x'] = x # add summary if len(summary) > 0: result['summary'] = {} for key, value in summary[0].items(): - if key in ['time', '_time']: + if key == 'time': continue if not timeseries_db.validate_query(self.query): value = None - elif value is not None: + elif value: value = self._round(value, decimal_places) result['summary'][key] = value return result diff --git a/openwisp_monitoring/monitoring/migrations/__init__.py b/openwisp_monitoring/monitoring/migrations/__init__.py index 747840018..d8198ede3 100644 --- a/openwisp_monitoring/monitoring/migrations/__init__.py +++ b/openwisp_monitoring/monitoring/migrations/__init__.py @@ -1,10 +1,7 @@ -from asyncio.log import logger - import swapper from django.contrib.auth.models import Permission from openwisp_controller.migrations import create_default_permissions, get_swapped_model -from django.db import transaction def assign_permissions_to_groups(apps, schema_editor): @@ -75,42 +72,30 @@ def create_general_metrics(apps, schema_editor): Chart = swapper.load_model('monitoring', 'Chart') Metric = swapper.load_model('monitoring', 'Metric') - # Temporarily disable the validation rules for the Chart model - original_full_clean = Chart.full_clean - - def disabled_full_clean(self): - pass - - Chart.full_clean = disabled_full_clean - - try: - with transaction.atomic(): - metric, created = Metric._get_or_create( - configuration='general_clients', - name='General Clients', - key='wifi_clients', - object_id=None, - content_type_id=None, - ) - if created: - chart = Chart(metric=metric, configuration='gen_wifi_clients') - logger.debug(f'Creating chart with configuration: {chart.configuration}') - chart.save() - - metric, created = Metric._get_or_create( - configuration='general_traffic', - name='General Traffic', - key='traffic', - object_id=None, - content_type_id=None, - ) - if created: - chart = Chart(metric=metric, configuration='general_traffic') - logger.debug(f'Creating chart with configuration: {chart.configuration}') - chart.save() - finally: - # Restore the original full_clean method - Chart.full_clean = original_full_clean + + metric, created = Metric._get_or_create( + configuration='general_clients', + name='General Clients', + key='wifi_clients', + object_id=None, + content_type_id=None, + ) + if created: + chart = Chart(metric=metric, configuration='gen_wifi_clients') + logger.debug(f'Creating chart with configuration: {chart.configuration}') + chart.save() + + metric, created = Metric._get_or_create( + configuration='general_traffic', + name='General Traffic', + key='traffic', + object_id=None, + content_type_id=None, + ) + if created: + chart = Chart(metric=metric, configuration='general_traffic') + logger.debug(f'Creating chart with configuration: {chart.configuration}') + chart.save() def delete_general_metrics(apps, schema_editor): Metric = apps.get_model('monitoring', 'Metric') From bff44f1564f1f78ad33190bda8007fe451c754aa Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Sun, 30 Jun 2024 21:57:13 +0530 Subject: [PATCH 6/7] [monitoring] Chart generation code modified #274 Fixes #274 --- .../db/backends/influxdb2/client.py | 387 +++++++++--------- .../db/backends/influxdb2/queries.py | 300 -------------- openwisp_monitoring/device/base/models.py | 1 - openwisp_monitoring/monitoring/base/models.py | 108 +++-- .../monitoring/migrations/__init__.py | 1 + openwisp_monitoring/views.py | 5 + tests/openwisp2/settings.py | 2 - 7 files changed, 287 insertions(+), 517 deletions(-) diff --git a/openwisp_monitoring/db/backends/influxdb2/client.py b/openwisp_monitoring/db/backends/influxdb2/client.py index 256c12d9b..e0327a825 100644 --- a/openwisp_monitoring/db/backends/influxdb2/client.py +++ b/openwisp_monitoring/db/backends/influxdb2/client.py @@ -92,7 +92,9 @@ def write(self, name, values, **kwargs): 'fields': values, 'time': timestamp, } + print(f"Writing point to InfluxDB: {point}") self.write_api.write(bucket=self.bucket, org=self.org, record=point) + print("Successfully wrote point to InfluxDB") except Exception as e: print(f"Error writing to InfluxDB: {e}") @@ -117,114 +119,135 @@ def _format_date(self, date_str): # If the date_str is not in the expected format, return it as is return date_str - def get_query(self, chart_type, params, time, group_map, summary=False, fields=None, query=None, timezone=settings.TIME_ZONE): - print(f"get_query called with params: {params}") - measurement = params.get('measurement') or params.get('key') - if not measurement or measurement == 'None': - logger.error(f"Invalid or missing measurement in params: {params}") - return None - - start_date = self._format_date(params.get('start_date', f'-{time}')) - end_date = self._format_date(params.get('end_date', 'now()')) - content_type = params.get('content_type') - object_id = params.get('object_id') - - - window = group_map.get(time, '1h') - - flux_query = f''' - from(bucket: "{self.bucket}") - |> range(start: {start_date}, stop: {end_date}) - |> filter(fn: (r) => r["_measurement"] == "{measurement}") - ''' - - if content_type and object_id: - flux_query += f' |> filter(fn: (r) => r.content_type == "{content_type}" and r.object_id == "{object_id}")\n' - - if fields: - field_filters = ' or '.join([f'r["_field"] == "{field}"' for field in fields]) - flux_query += f' |> filter(fn: (r) => {field_filters})\n' - - flux_query += f' |> aggregateWindow(every: {window}, fn: mean, createEmpty: false)\n' - flux_query += ' |> yield(name: "mean")' - - print(f"Generated Flux query: {flux_query}") - return flux_query - def query(self, query): print(f"Executing query: {query}") try: result = self.query_api.query(query) + print(f"Query result: {result}") return result except Exception as e: + print(f"Error executing query: {e}") logger.error(f"Error executing query: {e}") - - def read(self, measurement, fields, tags, **kwargs): + return [] + + def _parse_query_result(self, result): + print("Parsing query result") + parsed_result = [] + for table in result: + for record in table.records: + parsed_record = { + 'time': record.get_time().isoformat(), + } + for key, value in record.values.items(): + if key not in ['_time', '_start', '_stop', '_measurement']: + parsed_record[key] = value + parsed_result.append(parsed_record) + print(f"Parsed result: {parsed_result}") + return parsed_result + + def read(self, key, fields, tags, **kwargs): extra_fields = kwargs.get('extra_fields') - since = kwargs.get('since', '-30d') + since = kwargs.get('since', '-30d') # Default to last 30 days if not specified order = kwargs.get('order') limit = kwargs.get('limit') + bucket = self.bucket - flux_query = f''' - from(bucket: "{self.bucket}") - |> range(start: {since}) - |> filter(fn: (r) => r._measurement == "{measurement}") - ''' - if fields and fields != '*': - field_filters = ' or '.join([f'r._field == "{field}"' for field in fields.split(', ')]) - flux_query += f' |> filter(fn: (r) => {field_filters})' - + # Start building the Flux query + flux_query = f'from(bucket:"{bucket}")' + + # Add time range + flux_query += f'\n |> range(start: {since})' + + # Filter by measurement (key) + flux_query += f'\n |> filter(fn: (r) => r["_measurement"] == "{key}")' + + # Filter by fields + if fields != '*': + if extra_fields and extra_fields != '*': + all_fields = [fields] + extra_fields if isinstance(extra_fields, list) else [fields, extra_fields] + field_filter = ' or '.join([f'r["_field"] == "{field}"' for field in all_fields]) + else: + field_filter = f'r["_field"] == "{fields}"' + flux_query += f'\n |> filter(fn: (r) => {field_filter})' + + # Filter by tags if tags: tag_filters = ' and '.join([f'r["{tag}"] == "{value}"' for tag, value in tags.items()]) - flux_query += f' |> filter(fn: (r) => {tag_filters})' - - flux_query += ''' - |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") - |> map(fn: (r) => ({r with _value: float(v: r._value)})) - |> keep(columns: ["_time", "_value", "_field", "content_type", "object_id"]) - |> rename(columns: {_time: "time"}) - ''' + flux_query += f'\n |> filter(fn: (r) => {tag_filters})' + # Add ordering if order: - if order == 'time': - flux_query += ' |> sort(columns: ["time"], desc: false)' - elif order == '-time': - flux_query += ' |> sort(columns: ["time"], desc: true)' + if order in ['time', '-time']: + desc = 'true' if order == '-time' else 'false' + flux_query += f'\n |> sort(columns: ["_time"], desc: {desc})' else: - raise ValueError(f'Invalid order "{order}" passed.\nYou may pass "time" / "-time" to get result sorted in ascending /descending order respectively.') - + raise self.client_error( + f'Invalid order "{order}" passed.\nYou may pass "time" / "-time" to get ' + 'result sorted in ascending /descending order respectively.' + ) + + # Add limit if limit: - flux_query += f' |> limit(n: {limit})' + flux_query += f'\n |> limit(n: {limit})' - return self.query(flux_query) + # Pivot the result to make it similar to InfluxDB 1.x output + flux_query += '\n |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' - def get_list_query(self, query, precision=None): - print(f"get_list_query called with query: {query}") - result = self.query(query) - result_points = [] - - if result is None: - print("Query returned None") - return result_points + # Execute the query + try: + result = self.query_api.query(flux_query) + return self._parse_read_result(result) + except Exception as e: + logger.error(f"Error executing read query: {e}") + return [] + def _parse_read_result(self, result): + parsed_result = [] for table in result: for record in table.records: - time = record.get_time() - if precision is not None: - # Truncate the time based on the specified precision - time = time.isoformat()[:precision] - else: - time = time.isoformat() - - values = {col: record.values.get(col) for col in record.values if col != '_time'} - values['time'] = time - values['_value'] = record.get_value() - values['_field'] = record.get_field() - result_points.append(values) - - print(f"get_list_query returned {len(result_points)} points") - print(f"Processed result points: {result_points}") - return result_points + parsed_record = { + 'time': record.get_time().isoformat(), + } + for key, value in record.values.items(): + if key not in ['_time', '_start', '_stop', '_measurement']: + parsed_record[key] = value + parsed_result.append(parsed_record) + return parsed_result + + def get_ping_data_query(self, bucket, start, stop, device_ids): + device_filter = ' or '.join([f'r["object_id"] == "{id}"' for id in device_ids]) + query = f''' + from(bucket: "{bucket}") + |> range(start: {start}, stop: {stop}) + |> filter(fn: (r) => r["_measurement"] == "ping") + |> filter(fn: (r) => r["_field"] == "loss" or r["_field"] == "reachable" or r["_field"] == "rtt_avg" or r["_field"] == "rtt_max" or r["_field"] == "rtt_min") + |> filter(fn: (r) => r["content_type"] == "config.device") + |> filter(fn: (r) => {device_filter}) + |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) + |> yield(name: "mean") + ''' + return query + + def execute_query(self, query): + try: + result = self.query_api.query(query) + return self._parse_result(result) + except Exception as e: + logger.error(f"Error executing query: {e}") + return [] + + def _parse_result(self, result): + parsed_result = [] + for table in result: + for record in table.records: + parsed_record = { + 'time': record.get_time().isoformat(), + 'device_id': record.values.get('object_id'), + 'field': record.values.get('_field'), + 'value': record.values.get('_value') + } + parsed_result.append(parsed_record) + return parsed_result def delete_metric_data(self, key=None, tags=None): start = "1970-01-01T00:00:00Z" @@ -274,7 +297,17 @@ def _get_filter_query(self, field, items): filters.append(f'r["{field}"] == "{item}"') return f'|> filter(fn: (r) => {" or ".join(filters)})' - # def get_query(self, chart_type, params, time, group_map, summary=False, fields=None, query=None, timezone=settings.TIME_ZONE): + def get_query( + self, + chart_type, + params, + time, + group_map, + summary=False, + fields=None, + query=None, + timezone=settings.TIME_ZONE + ): bucket = self.bucket measurement = params.get('measurement') if not measurement or measurement == 'None': @@ -283,117 +316,49 @@ def _get_filter_query(self, field, items): start_date = params.get('start_date') end_date = params.get('end_date') + + # Set default values for start and end dates if they're None + if start_date is None: + start_date = f'-{time}' + if end_date is None: + end_date = 'now()' + content_type = params.get('content_type') object_id = params.get('object_id') - print(f"get_query called with params: {params}") - import pdb; pdb.set_trace() - def format_time(time_str): - if time_str: - try: - if isinstance(time_str, str): - # Try parsing as ISO format first - try: - dt = datetime.fromisoformat(time_str.replace('Z', '+00:00')) - except ValueError: - # If that fails, try parsing as a different format - dt = datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S') - else: - dt = time_str - return dt.strftime('%Y-%m-%dT%H:%M:%SZ') - except Exception as e: - print(f"Error parsing time: {e}") - return None - - start_date = format_time(start_date) if start_date else f'-{time}' - end_date = format_time(end_date) if end_date else 'now()' + field_name = params.get('field_name') or fields + + object_id_filter = f' and r.object_id == "{object_id}"' if object_id else "" flux_query = f''' from(bucket: "{bucket}") |> range(start: {start_date}, stop: {end_date}) |> filter(fn: (r) => r._measurement == "{measurement}") - |> filter(fn: (r) => r.content_type == "{content_type}" and r.object_id == "{object_id}") + |> filter(fn: (r) => r.content_type == "{content_type}"{object_id_filter}) ''' - if not summary: - window = group_map.get(time, '1h') - flux_query += f'|> aggregateWindow(every: {window}, fn: mean, createEmpty: false)' + if field_name: + if isinstance(field_name, (list, tuple)): + field_filter = ' or '.join([f'r._field == "{field}"' for field in field_name]) + else: + field_filter = f'r._field == "{field_name}"' + flux_query += f' |> filter(fn: (r) => {field_filter})\n' - flux_query += ''' - |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") - ''' + logger.debug(f"Time: {time}") + logger.debug(f"Group map: {group_map}") + window = group_map.get(time, '1h') + logger.debug(f"Window: {window}") - if summary: - flux_query += '|> last()' + if not summary: + flux_query += f' |> aggregateWindow(every: {window}, fn: mean, createEmpty: false)\n' + else: + flux_query += ' |> last()\n' - flux_query += '|> yield(name: "result")' + flux_query += ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")\n' + flux_query += ' |> yield(name: "result")' - print(f"Generated Flux query: {flux_query}") + logger.debug(f"Generated Flux query: {flux_query}") return flux_query - # def get_query( - # self, - # chart_type, - # params, - # time_range, - # group_map, - # summary=False, - # fields=None, - # query=None, - # timezone=settings.TIME_ZONE, - # ): - # flux_query = f'from(bucket: "{self.bucket}")' - - # def format_date(date): - # if date is None: - # return None - # if isinstance(date, str): - # try: - # dt = datetime.strptime(date, "%Y-%m-%d %H:%M:%S") - # return str(int(dt.timestamp())) - # except ValueError: - # return date - # if isinstance(date, datetime): - # return str(int(date.timestamp())) - # return str(date) - - # start_date = format_date(params.get('start_date')) - # end_date = format_date(params.get('end_date')) - - # if start_date: - # flux_query += f' |> range(start: {start_date}' - # else: - # flux_query += f' |> range(start: -{time_range}' - - # if end_date: - # flux_query += f', stop: {end_date})' - # else: - # flux_query += ')' - - # if 'key' in params: - # flux_query += f' |> filter(fn: (r) => r._measurement == "{params["key"]}")' - - # if fields and fields != '*': - # field_filters = ' or '.join([f'r._field == "{field.strip()}"' for field in fields.split(',')]) - # flux_query += f' |> filter(fn: (r) => {field_filters})' - - # if 'content_type' in params and 'object_id' in params: - # flux_query += f' |> filter(fn: (r) => r.content_type == "{params["content_type"]}" and r.object_id == "{params["object_id"]}")' - - # window_period = group_map.get(time_range, '1h') - # if chart_type in ['line', 'stackedbar']: - # flux_query += f' |> aggregateWindow(every: {window_period}, fn: mean, createEmpty: false)' - - # flux_query += ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' - - # if summary: - # flux_query += ' |> last()' - - # flux_query = f'import "timezone"\n\noption location = timezone.location(name: "{timezone}")\n\n{flux_query}' - - # flux_query += ' |> yield(name: "result")' - - # print(f"Generated Flux query: {flux_query}") # Debug print - # return flux_query - + def _fields(self, fields, query, field_name): matches = re.search(self._fields_regex, query) if not matches and not fields: @@ -436,6 +401,60 @@ def _get_top_fields(self, query, params, chart_type, group_map, number, time, ti top_fields = [record["_field"] for table in result for record in table.records] return top_fields + def default_chart_query(self, tags): + q = f''' + from(bucket: "{self.bucket}") + |> range(start: {{time}}) + |> filter(fn: (r) => r._measurement == "{{key}}") + |> filter(fn: (r) => r._field == "{{field_name}}") + ''' + if tags: + q += ''' + |> filter(fn: (r) => r.content_type == "{{content_type}}") + |> filter(fn: (r) => r.object_id == "{{object_id}}") + ''' + if '{{end_date}}' in tags: + q += ' |> range(stop: {{end_date}})' + return q + + def _device_data(self, key, tags, rp, **kwargs): + """ returns last snapshot of ``device_data`` """ + query = f''' + from(bucket: "{self.bucket}") + |> range(start: -30d) + |> filter(fn: (r) => r._measurement == "ping") + |> filter(fn: (r) => r.pk == "{tags['pk']}") + |> last() + |> yield(name: "last") + ''' + print(f"Modified _device_data query: {query}") + return self.get_list_query(query, precision=None) + + def get_list_query(self, query, precision='s', **kwargs): + print(f"get_list_query called with query: {query}") + result = self.query(query) + parsed_result = self._parse_query_result(result) if result else [] + print(f"get_list_query result: {parsed_result}") + return parsed_result + + def get_device_data_structure(self, device_pk): + query = f''' + from(bucket: "{self.bucket}") + |> range(start: -30d) + |> filter(fn: (r) => r._measurement == "ping") + |> filter(fn: (r) => r.pk == "{device_pk}") + |> limit(n: 1) + ''' + print(f"Checking device data structure: {query}") + result = self.query(query) + if result: + for table in result: + for record in table.records: + print(f"Sample record: {record}") + print(f"Available fields: {record.values.keys()}") + else: + print("No data found for this device") + def close(self): self.client.close() diff --git a/openwisp_monitoring/db/backends/influxdb2/queries.py b/openwisp_monitoring/db/backends/influxdb2/queries.py index 057ec6e34..2b62d03cf 100644 --- a/openwisp_monitoring/db/backends/influxdb2/queries.py +++ b/openwisp_monitoring/db/backends/influxdb2/queries.py @@ -1,285 +1,3 @@ -# chart_query = { -# 'uptime': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "{field_name}")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> mean()' -# ' |> map(fn: (r) => ({ r with _value: r._value * 100.0 }))' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "uptime")' -# ) -# }, -# 'packet_loss': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "loss")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "packet_loss")' -# ) -# }, -# 'rtt': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "rtt_avg" or r._field == "rtt_max" or r._field == "rtt_min")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> yield(name: "rtt")' -# ) -# }, -# 'wifi_clients': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "{field_name}")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> filter(fn: (r) => r.ifname == "{ifname}")' -# ' |> group()' -# ' |> distinct()' -# ' |> count()' -# ' |> set(key: "_field", value: "wifi_clients")' -# ' |> aggregateWindow(every: 1d, fn: max)' -# ) -# }, -# 'general_wifi_clients': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "{field_name}")' -# ' |> filter(fn: (r) => r.organization_id == "{organization_id}")' -# ' |> filter(fn: (r) => r.location_id == "{location_id}")' -# ' |> filter(fn: (r) => r.floorplan_id == "{floorplan_id}")' -# ' |> group()' -# ' |> distinct()' -# ' |> count()' -# ' |> set(key: "_field", value: "wifi_clients")' -# ' |> aggregateWindow(every: 1d, fn: max)' -# ) -# }, -# 'traffic': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "tx_bytes" or r._field == "rx_bytes")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> filter(fn: (r) => r.ifname == "{ifname}")' -# ' |> sum()' -# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' -# ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' -# ' |> yield(name: "traffic")' -# ) -# }, -# 'general_traffic': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "tx_bytes" or r._field == "rx_bytes")' -# ' |> filter(fn: (r) => r.organization_id == "{organization_id}")' -# ' |> filter(fn: (r) => r.location_id == "{location_id}")' -# ' |> filter(fn: (r) => r.floorplan_id == "{floorplan_id}")' -# ' |> filter(fn: (r) => r.ifname == "{ifname}")' -# ' |> sum()' -# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' -# ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' -# ' |> yield(name: "general_traffic")' -# ) -# }, -# 'memory': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "percent_used")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "memory_usage")' -# ) -# }, -# 'cpu': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "cpu_usage")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "CPU_load")' -# ) -# }, -# 'disk': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "used_disk")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "disk_usage")' -# ) -# }, -# 'signal_strength': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "signal_strength" or r._field == "signal_power")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> yield(name: "signal_strength")' -# ) -# }, -# 'signal_quality': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "signal_quality" or r._field == "snr")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> yield(name: "signal_quality")' -# ) -# }, -# 'access_tech': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "access_tech")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: (column) => mode(column: "_value"), createEmpty: false)' -# ' |> yield(name: "access_tech")' -# ) -# }, -# 'bandwidth': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "sent_bps_tcp" or r._field == "sent_bps_udp")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> rename(columns: {sent_bps_tcp: "TCP", sent_bps_udp: "UDP"})' -# ' |> yield(name: "bandwidth")' -# ) -# }, -# 'transfer': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "sent_bytes_tcp" or r._field == "sent_bytes_udp")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> sum()' -# ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> rename(columns: {sent_bytes_tcp: "TCP", sent_bytes_udp: "UDP"})' -# ' |> yield(name: "transfer")' -# ) -# }, -# 'retransmits': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "retransmits")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "retransmits")' -# ) -# }, -# 'jitter': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "jitter")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "jitter")' -# ) -# }, -# 'datagram': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "lost_packets" or r._field == "total_packets")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' -# ' |> rename(columns: {lost_packets: "lost_datagram", total_packets: "total_datagram"})' -# ' |> yield(name: "datagram")' -# ) -# }, -# 'datagram_loss': { -# 'flux': ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "lost_percent")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' -# ' |> yield(name: "datagram_loss")' -# ) -# } -# } - -# default_chart_query = ( -# 'from(bucket: "mybucket")' -# ' |> range(start: {time}, stop: {end_date})' -# ' |> filter(fn: (r) => r._measurement == "{key}")' -# ' |> filter(fn: (r) => r._field == "{field_name}")' -# ' |> filter(fn: (r) => r.content_type == "{content_type}")' -# ' |> filter(fn: (r) => r.object_id == "{object_id}")' -# ) - -# device_data_query = ( -# 'from(bucket: "mybucket")' -# ' |> range(start: -30d)' -# ' |> filter(fn: (r) => r._measurement == "{0}")' -# ' |> filter(fn: (r) => r.pk == "{1}")' -# ' |> last()' -# ) - chart_query = { 'uptime': { 'influxdb2': ( @@ -544,21 +262,3 @@ ) } } - -default_chart_query = ( - 'from(bucket: "mybucket")' - ' |> range(start: {time}, stop: {end_date})' - ' |> filter(fn: (r) => r._measurement == "{measurement}")' - ' |> filter(fn: (r) => r._field == "{field_name}")' - ' |> filter(fn: (r) => r.content_type == "{content_type}")' - ' |> filter(fn: (r) => r.object_id == "{object_id}")' -) - -device_data_query = ( - 'from(bucket: "{bucket}")' - ' |> range(start: -30d)' - ' |> filter(fn: (r) => r._measurement == "{measurement}")' - ' |> filter(fn: (r) => r.object_id == "{object_id}")' - ' |> last()' - ' |> yield(name: "last")' -) diff --git a/openwisp_monitoring/device/base/models.py b/openwisp_monitoring/device/base/models.py index cdb9ba8e8..52e45e744 100644 --- a/openwisp_monitoring/device/base/models.py +++ b/openwisp_monitoring/device/base/models.py @@ -384,7 +384,6 @@ def update_status(self, value): if self.status == '' and app_settings.AUTO_CLEAR_MANAGEMENT_IP: self.device.management_ip = None self.device.save(update_fields=['management_ip']) - health_status_changed.send(sender=self.__class__, instance=self, status=value) @property diff --git a/openwisp_monitoring/monitoring/base/models.py b/openwisp_monitoring/monitoring/base/models.py index f3cd7141e..247815df0 100644 --- a/openwisp_monitoring/monitoring/base/models.py +++ b/openwisp_monitoring/monitoring/base/models.py @@ -4,6 +4,9 @@ from copy import deepcopy from datetime import date, datetime, timedelta +from dateutil.parser import parse +from django.utils.timezone import make_aware, is_aware + from cache_memoize import cache_memoize from dateutil.parser import parse as parse_date from django.conf import settings @@ -672,20 +675,25 @@ def get_query( additional_params = additional_params or {} params = self._get_query_params(time, start_date, end_date) params.update(additional_params) - params.update({'start_date': start_date, 'end_date': end_date}) + params.update({ + 'start_date': start_date, + 'end_date': end_date, + 'measurement': self.config_dict.get('measurement', self.metric.key), + 'field_name': fields or self.config_dict.get('field_name'), + }) if not params.get('organization_id') and self.config_dict.get('__all__', False): params['organization_id'] = ['__all__'] return timeseries_db.get_query( - self.type, - params, - time, - self._get_group_map(time), - summary, - fields, - query, - timezone, + chart_type=self.type, + params=params, + time=time, + group_map=self._get_group_map(time), + summary=summary, + fields=fields, + query=query, + timezone=timezone, ) - + def get_top_fields(self, number): """ Returns list of top ``number`` of fields (highest sum) of a @@ -752,8 +760,9 @@ def read( ): additional_query_kwargs = additional_query_kwargs or {} traces = {} - if x_axys: - x = [] + x = [] + result = {'traces': [], 'summary': {}} # Initialize result dictionary + try: query_kwargs = dict( time=time, timezone=timezone, start_date=start_date, end_date=end_date @@ -778,42 +787,81 @@ def read( summary_query, key=self.metric.key ) except timeseries_db.client_error as e: - logging.error(e, exc_info=True) - raise e + logger.error(f"Error fetching data: {e}", exc_info=True) + raise + for point in points: time_value = point.get('time') or point.get('_time') if not time_value: - logging.warning(f"Point missing time value: {point}") + logger.warning(f"Point missing time value: {point}") + continue + + try: + formatted_time = self._parse_and_format_time(time_value, timezone) + except ValueError as e: + logger.warning(f"Error parsing time value: {time_value}. Error: {e}") continue + for key, value in point.items(): - if key == 'time': + if key in ('time', '_time', 'result', 'table', 'content_type', 'object_id'): continue traces.setdefault(key, []) - if decimal_places and isinstance(value, (int, float)): + if decimal_places is not None and value is not None: value = self._round(value, decimal_places) traces[key].append(value) - time = datetime.fromtimestamp(point['time'], tz=tz(timezone)).strftime( - '%Y-%m-%d %H:%M' - ) + if x_axys: - x.append(time) - # prepare result to be returned - # (transform chart data so its order is not random) - result = {'traces': sorted(traces.items())} + x.append(formatted_time) + + # Prepare result + result['traces'] = sorted(traces.items()) if x_axys: result['x'] = x - # add summary - if len(summary) > 0: - result['summary'] = {} + + # Handle summary calculation + if summary: for key, value in summary[0].items(): - if key == 'time': + if key in ('time', '_time', 'result', 'table', 'content_type', 'object_id'): continue if not timeseries_db.validate_query(self.query): value = None - elif value: + elif value is not None: value = self._round(value, decimal_places) - result['summary'][key] = value + result['summary'][key] = 'N/A' if value is None else value + return result + + def _round(self, value, decimal_places): + logger.debug(f"Rounding value: {value}, type: {type(value)}") + if value is None: + logger.debug("Value is None, returning None") + return None + try: + float_value = float(value) + rounded = round(float_value, decimal_places) + logger.debug(f"Rounded value: {rounded}") + return rounded + except (ValueError, TypeError) as e: + logger.warning(f"Could not round value: {value}. Error: {e}") + return value + + def _parse_and_format_time(self, time_str, timezone): + time_obj = parse(time_str) + if not is_aware(time_obj): + time_obj = make_aware(time_obj, timezone=tz(timezone)) + return time_obj.strftime('%Y-%m-%d %H:%M') + + def _safe_round(self, value, decimal_places): + if isinstance(value, (int, float)): + return self._round(value, decimal_places) + return value + + def _round(self, value, decimal_places): + try: + control = 10 ** decimal_places + return round(float(value) * control) / control + except (ValueError, TypeError): + return value def json(self, time=DEFAULT_TIME, **kwargs): try: diff --git a/openwisp_monitoring/monitoring/migrations/__init__.py b/openwisp_monitoring/monitoring/migrations/__init__.py index d8198ede3..d7d3f6fc1 100644 --- a/openwisp_monitoring/monitoring/migrations/__init__.py +++ b/openwisp_monitoring/monitoring/migrations/__init__.py @@ -1,3 +1,4 @@ +from asyncio.log import logger import swapper from django.contrib.auth.models import Permission diff --git a/openwisp_monitoring/views.py b/openwisp_monitoring/views.py index 840042a5a..21ab811e7 100644 --- a/openwisp_monitoring/views.py +++ b/openwisp_monitoring/views.py @@ -135,6 +135,11 @@ def _get_charts_data(self, charts, time, timezone, start_date, end_date): chart_dict['connect_points'] = chart.connect_points if chart.trace_labels: chart_dict['trace_labels'] = chart.trace_labels + # Handle None values in summary + if 'summary' in chart_dict: + for key, value in chart_dict['summary'].items(): + if value is None: + chart_dict['summary'][key] = 'N/A' except InvalidChartConfigException: logger.exception(f'Skipped chart for metric {chart.metric}') continue diff --git a/tests/openwisp2/settings.py b/tests/openwisp2/settings.py index bbe669167..bd27777df 100644 --- a/tests/openwisp2/settings.py +++ b/tests/openwisp2/settings.py @@ -7,8 +7,6 @@ TESTING = 'test' in sys.argv SHELL = 'shell' in sys.argv or 'shell_plus' in sys.argv BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -INFLUXDB_BACKEND = 'openwisp_monitoring.db.backends.influxdb2' -INFLUXDB_BACKEND = 'openwisp_monitoring.db.backends.influxdb' DEBUG = True ALLOWED_HOSTS = ['*'] From 3859434d5b9189222dc23e8f6862f50980a6d24c Mon Sep 17 00:00:00 2001 From: Prapti Sharma Date: Thu, 4 Jul 2024 17:33:18 +0530 Subject: [PATCH 7/7] [monitoring] Some changes updated #274 Fixes #274 --- Dockerfile | 1 - .../db/backends/influxdb2/client.py | 16 +--- .../db/backends/influxdb2/queries.py | 24 +++-- .../db/backends/influxdb2/tests.py | 6 +- openwisp_monitoring/device/base/models.py | 1 + openwisp_monitoring/monitoring/base/models.py | 37 +++----- .../monitoring/tests/__init__.py | 93 +++++++++++++------ .../monitoring/tests/test_configuration.py | 13 ++- openwisp_monitoring/views.py | 5 - 9 files changed, 106 insertions(+), 90 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8cf5a1a1e..12aafb97d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,4 +27,3 @@ ENV NAME=openwisp-monitoring \ REDIS_HOST=redis CMD ["sh", "docker-entrypoint.sh"] EXPOSE 8000 -# Command to run the application diff --git a/openwisp_monitoring/db/backends/influxdb2/client.py b/openwisp_monitoring/db/backends/influxdb2/client.py index e0327a825..b2401548f 100644 --- a/openwisp_monitoring/db/backends/influxdb2/client.py +++ b/openwisp_monitoring/db/backends/influxdb2/client.py @@ -214,20 +214,6 @@ def _parse_read_result(self, result): parsed_result.append(parsed_record) return parsed_result - def get_ping_data_query(self, bucket, start, stop, device_ids): - device_filter = ' or '.join([f'r["object_id"] == "{id}"' for id in device_ids]) - query = f''' - from(bucket: "{bucket}") - |> range(start: {start}, stop: {stop}) - |> filter(fn: (r) => r["_measurement"] == "ping") - |> filter(fn: (r) => r["_field"] == "loss" or r["_field"] == "reachable" or r["_field"] == "rtt_avg" or r["_field"] == "rtt_max" or r["_field"] == "rtt_min") - |> filter(fn: (r) => r["content_type"] == "config.device") - |> filter(fn: (r) => {device_filter}) - |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) - |> yield(name: "mean") - ''' - return query - def execute_query(self, query): try: result = self.query_api.query(query) @@ -309,7 +295,7 @@ def get_query( timezone=settings.TIME_ZONE ): bucket = self.bucket - measurement = params.get('measurement') + measurement = params.get('key') if not measurement or measurement == 'None': logger.error("Invalid or missing measurement in params") return None diff --git a/openwisp_monitoring/db/backends/influxdb2/queries.py b/openwisp_monitoring/db/backends/influxdb2/queries.py index 2b62d03cf..0af4588da 100644 --- a/openwisp_monitoring/db/backends/influxdb2/queries.py +++ b/openwisp_monitoring/db/backends/influxdb2/queries.py @@ -34,7 +34,8 @@ ' |> filter(fn: (r) => r.content_type == "{content_type}")' ' |> filter(fn: (r) => r.object_id == "{object_id}")' ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> yield(name: "rtt")' ) }, @@ -82,7 +83,8 @@ ' |> sum()' ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' ' |> yield(name: "traffic")' ) @@ -100,7 +102,8 @@ ' |> sum()' ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' ' |> aggregateWindow(every: 1d, fn: sum, createEmpty: false)' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> rename(columns: {tx_bytes: "upload", rx_bytes: "download"})' ' |> yield(name: "general_traffic")' ) @@ -151,7 +154,8 @@ ' |> filter(fn: (r) => r.object_id == "{object_id}")' ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> yield(name: "signal_strength")' ) }, @@ -165,7 +169,8 @@ ' |> filter(fn: (r) => r.object_id == "{object_id}")' ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' ' |> map(fn: (r) => ({ r with _value: float(v: int(v: r._value)) }))' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> yield(name: "signal_quality")' ) }, @@ -191,7 +196,8 @@ ' |> filter(fn: (r) => r.object_id == "{object_id}")' ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> rename(columns: {sent_bps_tcp: "TCP", sent_bps_udp: "UDP"})' ' |> yield(name: "bandwidth")' ) @@ -206,7 +212,8 @@ ' |> filter(fn: (r) => r.object_id == "{object_id}")' ' |> sum()' ' |> map(fn: (r) => ({ r with _value: r._value / 1000000000.0 }))' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> rename(columns: {sent_bytes_tcp: "TCP", sent_bytes_udp: "UDP"})' ' |> yield(name: "transfer")' ) @@ -244,7 +251,8 @@ ' |> filter(fn: (r) => r.content_type == "{content_type}")' ' |> filter(fn: (r) => r.object_id == "{object_id}")' ' |> aggregateWindow(every: 1d, fn: mean, createEmpty: false)' - ' |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' + ' |> rename(columns: {_time: "time"})' + ' |> pivot(rowKey:["time"], columnKey: ["_field"], valueColumn: "_value")' ' |> rename(columns: {lost_packets: "lost_datagram", total_packets: "total_datagram"})' ' |> yield(name: "datagram")' ) diff --git a/openwisp_monitoring/db/backends/influxdb2/tests.py b/openwisp_monitoring/db/backends/influxdb2/tests.py index 5283bda83..77cd30010 100644 --- a/openwisp_monitoring/db/backends/influxdb2/tests.py +++ b/openwisp_monitoring/db/backends/influxdb2/tests.py @@ -231,12 +231,12 @@ def test_read_order(self, mock_influxdb_client): # Test ascending read order m.read(limit=2, order='time') query = mock_query_api.query.call_args[0][0] - self.assertIn('|> sort(columns: ["_time"], desc: false)', query) + self.assertIn('|> sort(columns: ["time"], desc: false)', query) # Test descending read order m.read(limit=2, order='-time') query = mock_query_api.query.call_args[0][0] - self.assertIn('|> sort(columns: ["_time"], desc: true)', query) + self.assertIn('|> sort(columns: ["time"], desc: true)', query) # Test invalid read order with self.assertRaises(ValueError): @@ -258,4 +258,4 @@ def ping_write_microseconds_precision(self, mock_influxdb_client): self.assertEqual(call_args_2['record']['time'], '2020-07-31T22:05:47.235152') if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/openwisp_monitoring/device/base/models.py b/openwisp_monitoring/device/base/models.py index 52e45e744..cdb9ba8e8 100644 --- a/openwisp_monitoring/device/base/models.py +++ b/openwisp_monitoring/device/base/models.py @@ -384,6 +384,7 @@ def update_status(self, value): if self.status == '' and app_settings.AUTO_CLEAR_MANAGEMENT_IP: self.device.management_ip = None self.device.save(update_fields=['management_ip']) + health_status_changed.send(sender=self.__class__, instance=self, status=value) @property diff --git a/openwisp_monitoring/monitoring/base/models.py b/openwisp_monitoring/monitoring/base/models.py index 247815df0..1cd15a71f 100644 --- a/openwisp_monitoring/monitoring/base/models.py +++ b/openwisp_monitoring/monitoring/base/models.py @@ -678,8 +678,8 @@ def get_query( params.update({ 'start_date': start_date, 'end_date': end_date, - 'measurement': self.config_dict.get('measurement', self.metric.key), - 'field_name': fields or self.config_dict.get('field_name'), + # 'measurement': self.config_dict.get('measurement', self.metric.key), + # 'field_name': fields or self.config_dict.get('field_name'), }) if not params.get('organization_id') and self.config_dict.get('__all__', False): params['organization_id'] = ['__all__'] @@ -769,33 +769,22 @@ def read( ) query_kwargs.update(additional_query_kwargs) if self.top_fields: - points = summary = timeseries_db._get_top_fields( - default_query=self._default_query, - chart_type=self.type, - group_map=self.GROUP_MAP, - number=self.top_fields, - params=self._get_query_params(self.DEFAULT_TIME), - time=time, - query=self.query, - get_fields=False, + fields = self.get_top_fields(self.top_fields) + data_query = self.get_query(fields=fields, **query_kwargs) + summary_query = self.get_query( + fields=fields, summary=True, **query_kwargs ) else: data_query = self.get_query(**query_kwargs) summary_query = self.get_query(summary=True, **query_kwargs) - points = timeseries_db.get_list_query(data_query, key=self.metric.key) - summary = timeseries_db.get_list_query( - summary_query, key=self.metric.key - ) + points = timeseries_db.get_list_query(data_query) + summary = timeseries_db.get_list_query(summary_query) except timeseries_db.client_error as e: logger.error(f"Error fetching data: {e}", exc_info=True) - raise + raise e for point in points: - time_value = point.get('time') or point.get('_time') - if not time_value: - logger.warning(f"Point missing time value: {point}") - continue - + time_value = point.get('time') try: formatted_time = self._parse_and_format_time(time_value, timezone) except ValueError as e: @@ -803,10 +792,10 @@ def read( continue for key, value in point.items(): - if key in ('time', '_time', 'result', 'table', 'content_type', 'object_id'): + if key in ('time', 'result', 'table', 'content_type', 'object_id'): continue traces.setdefault(key, []) - if decimal_places is not None and value is not None: + if decimal_places and isinstance(value, (int, float)): value = self._round(value, decimal_places) traces[key].append(value) @@ -821,7 +810,7 @@ def read( # Handle summary calculation if summary: for key, value in summary[0].items(): - if key in ('time', '_time', 'result', 'table', 'content_type', 'object_id'): + if key in ('time', 'result', 'table', 'content_type', 'object_id'): continue if not timeseries_db.validate_query(self.query): value = None diff --git a/openwisp_monitoring/monitoring/tests/__init__.py b/openwisp_monitoring/monitoring/tests/__init__.py index 2c155bce8..3018ddd2b 100644 --- a/openwisp_monitoring/monitoring/tests/__init__.py +++ b/openwisp_monitoring/monitoring/tests/__init__.py @@ -94,10 +94,14 @@ "'{content_type}' AND object_id = '{object_id}'" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> sum()' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> sum()' + ' |> yield(name: "histogram")' ), }, }, @@ -129,9 +133,13 @@ "content_type = '{content_type}' AND object_id = '{object_id}'" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> yield(name: "default")' ), }, }, @@ -147,11 +155,13 @@ "content_type = '{content_type}' AND object_id = '{object_id}'" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" or ' - 'r["_measurement"] == "value2" and ' - 'r["content_type"] == "{content_type}" and ' - 'r["object_id"] == "{object_id}")' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}" or r._field == "value2")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> yield(name: "multiple_test")' ), }, }, @@ -167,9 +177,15 @@ " GROUP BY time(1d), metric_num" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}") ' - '|> group(columns: ["metric_num"]) |> sum() |> cumulativeSum() |> window(every: 1d)' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> group(columns: ["metric_num"])' + ' |> sum()' + ' |> cumulativeSum()' + ' |> window(every: 1d)' + ' |> yield(name: "group_by_tag")' ), }, 'summary_query': { @@ -178,9 +194,14 @@ " GROUP BY time(30d), metric_num" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}") ' - '|> group(columns: ["metric_num"]) |> sum() |> window(every: 30d)' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "ping")' + ' |> filter(fn: (r) => r._field == "loss")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> mean()' + ' |> yield(name: "summary")' ), }, }, @@ -196,10 +217,14 @@ "content_type = '{content_type}' AND object_id = '{object_id}'" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean()' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> mean()' + ' |> yield(name: "mean_test")' ), }, }, @@ -215,10 +240,14 @@ "content_type = '{content_type}' AND object_id = '{object_id}'" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> sum()' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> sum()' + ' |> yield(name: "sum_test")' ), }, }, @@ -235,10 +264,14 @@ "'{content_type}' AND object_id = '{object_id}'" ), 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}") ' - '|> mean()' + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> mean()' + ' |> yield(name: "top_fields_mean")' ), }, }, diff --git a/openwisp_monitoring/monitoring/tests/test_configuration.py b/openwisp_monitoring/monitoring/tests/test_configuration.py index bd276870c..202c8d2e6 100644 --- a/openwisp_monitoring/monitoring/tests/test_configuration.py +++ b/openwisp_monitoring/monitoring/tests/test_configuration.py @@ -35,10 +35,15 @@ def _get_new_metric(self): "WHERE time >= '{time}' AND content_type = " "'{content_type}' AND object_id = '{object_id}'" ), - 'influxdb2': ( - 'from(bucket: "{key}") |> range(start: {time}) ' - '|> filter(fn: (r) => r["_measurement"] == "{field_name}" and ' - 'r["content_type"] == "{content_type}" and r["object_id"] == "{object_id}")' + 'influxdb2': ( + 'from(bucket: "mybucket")' + ' |> range(start: {time}, stop: {end_date})' + ' |> filter(fn: (r) => r._measurement == "{measurement}")' + ' |> filter(fn: (r) => r._field == "{field_name}")' + ' |> filter(fn: (r) => r.content_type == "{content_type}")' + ' |> filter(fn: (r) => r.object_id == "{object_id}")' + ' |> sum()' + ' |> yield(name: "histogram")' ), }, } diff --git a/openwisp_monitoring/views.py b/openwisp_monitoring/views.py index 21ab811e7..840042a5a 100644 --- a/openwisp_monitoring/views.py +++ b/openwisp_monitoring/views.py @@ -135,11 +135,6 @@ def _get_charts_data(self, charts, time, timezone, start_date, end_date): chart_dict['connect_points'] = chart.connect_points if chart.trace_labels: chart_dict['trace_labels'] = chart.trace_labels - # Handle None values in summary - if 'summary' in chart_dict: - for key, value in chart_dict['summary'].items(): - if value is None: - chart_dict['summary'][key] = 'N/A' except InvalidChartConfigException: logger.exception(f'Skipped chart for metric {chart.metric}') continue