Implement running scheduled rules and check aggregations

This commit is contained in:
2023-01-15 17:59:12 +00:00
parent 435d9b5571
commit 6bfa0aa73b
15 changed files with 600 additions and 258 deletions

View File

@@ -2,12 +2,75 @@ import msgpack
from django.core.management.base import BaseCommand
from redis import StrictRedis
from core.lib.rules import process_rules
from core.lib.rules import rule_matched
from core.models import NotificationRule
from core.util import logs
log = logs.get_logger("processing")
def process_rules(data):
all_rules = NotificationRule.objects.filter(enabled=True)
for index, index_messages in data.items():
for message in index_messages:
for rule in all_rules:
parsed_rule = rule.parse()
matched = {}
if "index" not in parsed_rule:
continue
if "source" not in parsed_rule:
continue
rule_index = parsed_rule["index"]
rule_source = parsed_rule["source"]
# if not type(rule_index) == list:
# rule_index = [rule_index]
# if not type(rule_source) == list:
# rule_source = [rule_source]
if index not in rule_index:
continue
if message["src"] not in rule_source:
continue
matched["index"] = index
matched["source"] = message["src"]
rule_field_length = len(parsed_rule.keys())
matched_field_number = 0
for field, value in parsed_rule.items():
# if not type(value) == list:
# value = [value]
if field == "src":
continue
if field == "tokens":
for token in value:
if "tokens" in message:
if token in message["tokens"]:
matched_field_number += 1
matched[field] = token
# Break out of the token matching loop
break
# Continue to next field
continue
# Allow partial matches for msg
if field == "msg":
for msg in value:
if "msg" in message:
if msg.lower() in message["msg"].lower():
matched_field_number += 1
matched[field] = msg
# Break out of the msg matching loop
break
# Continue to next field
continue
if field in message and message[field] in value:
matched_field_number += 1
matched[field] = message[field]
if matched_field_number == rule_field_length - 2:
rule_matched(rule, message, matched)
class Command(BaseCommand):
def handle(self, *args, **options):
r = StrictRedis(unix_socket_path="/var/run/socks/redis.sock", db=0)

View File

@@ -1,25 +1,18 @@
import asyncio
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from asgiref.sync import sync_to_async
from django.core.management.base import BaseCommand
# from core.db.storage import db
# from core.models import NotificationRule
from core.db.storage import db
from core.lib.parsing import QueryError
from core.lib.rules import NotificationRuleData
from core.models import NotificationRule
from core.util import logs
log = logs.get_logger("scheduling")
# INTERVAL_CHOICES = (
# (0, "On demand"),
# (60, "Every minute"),
# (900, "Every 15 minutes"),
# (1800, "Every 30 minutes"),
# (3600, "Every hour"),
# (14400, "Every 4 hours"),
# (86400, "Every day"),
# )
INTERVALS = [60, 900, 1800, 3600, 14400, 86400]
INTERVALS = [5, 60, 900, 1800, 3600, 14400, 86400]
async def job(interval_seconds):
@@ -27,10 +20,17 @@ async def job(interval_seconds):
Run all schedules matching the given interval.
:param interval_seconds: The interval to run.
"""
print("Running schedule", interval_seconds)
# matching_rules = NotificationRule.objects.filter(
# enabled=True, interval=interval_seconds
# )
matching_rules = await sync_to_async(list)(
NotificationRule.objects.filter(enabled=True, interval=interval_seconds)
)
for rule in matching_rules:
log.debug(f"Running rule {rule}")
try:
rule = NotificationRuleData(rule.user, rule, db=db)
await rule.run_schedule()
# results = await db.schedule_query_results(rule.user, rule)
except QueryError as e:
log.error(f"Error running rule {rule}: {e}")
class Command(BaseCommand):