neptune/core/management/commands/processing.py

109 lines
4.7 KiB
Python
Raw Normal View History

2023-01-12 07:20:48 +00:00
import msgpack
2023-02-08 18:26:40 +00:00
from asgiref.sync import async_to_sync
2023-01-12 07:20:48 +00:00
from django.core.management.base import BaseCommand
2023-01-12 07:20:48 +00:00
from redis import StrictRedis
2023-01-12 07:20:48 +00:00
2023-02-08 18:26:40 +00:00
from core.db.storage import db
from core.lib.rules import NotificationRuleData
from core.models import NotificationRule
2023-01-12 07:20:48 +00:00
from core.util import logs
2023-01-12 07:20:48 +00:00
log = logs.get_logger("processing")
2023-01-12 07:20:48 +00:00
def process_rules(data):
all_rules = NotificationRule.objects.filter(enabled=True, interval=0)
for index, index_messages in data.items():
for message in index_messages:
for rule in all_rules:
2023-01-15 18:40:17 +00:00
# Quicker helper to get the data without spinning
# up a NotificationRuleData object
parsed_rule = rule.parse()
matched = {}
2023-01-16 07:20:37 +00:00
# Rule is invalid, this shouldn't happen
if "index" not in parsed_rule:
continue
if "source" not in parsed_rule:
continue
rule_index = parsed_rule["index"]
rule_source = parsed_rule["source"]
# if not type(rule_index) == list:
# rule_index = [rule_index]
# if not type(rule_source) == list:
# rule_source = [rule_source]
if index not in rule_index:
2023-01-16 07:20:37 +00:00
# We don't care about this index, go to the next one
continue
if message["src"] not in rule_source:
2023-01-16 07:20:37 +00:00
# We don't care about this source, go to the next one
continue
matched["index"] = index
matched["source"] = message["src"]
rule_field_length = len(parsed_rule.keys())
matched_field_number = 0
for field, value in parsed_rule.items():
# if not type(value) == list:
# value = [value]
if field == "src":
2023-01-16 07:20:37 +00:00
# We already checked this
continue
if field == "tokens":
2023-01-16 07:20:37 +00:00
# Check if tokens are in the rule
# We only check if *at least one* token matches
for token in value:
if "tokens" in message:
if token in message["tokens"]:
matched_field_number += 1
matched[field] = token
# Break out of the token matching loop
break
# Continue to next field
continue
if field == "msg":
2023-01-16 07:20:37 +00:00
# Allow partial matches for msg
for msg in value:
if "msg" in message:
if msg.lower() in message["msg"].lower():
matched_field_number += 1
matched[field] = msg
# Break out of the msg matching loop
break
# Continue to next field
continue
if field in message and message[field] in value:
2023-01-16 07:20:37 +00:00
# Do exact matches for all other fields
matched_field_number += 1
matched[field] = message[field]
2023-01-16 07:20:37 +00:00
# Subtract 2, 1 for source and 1 for index
if matched_field_number == rule_field_length - 2:
2023-02-01 07:20:24 +00:00
meta = {"matched": matched, "total_hits": 1}
2023-02-08 18:26:40 +00:00
# Parse the rule, we saved some work above to avoid doing this,
# but it makes delivering messages significantly easier as we ca
# use the same code as for scheduling.
rule_data_object = NotificationRuleData(rule.user, rule, db=db)
# rule_notify(rule, index, message, meta=meta)
print("ABOUT TO RUN ASYNC TO SYNC")
rule_matched = async_to_sync(rule_data_object.rule_matched)
rule_matched(index, message, meta=meta, mode="ondemand")
2023-01-12 07:20:48 +00:00
class Command(BaseCommand):
def handle(self, *args, **options):
r = StrictRedis(unix_socket_path="/var/run/socks/redis.sock", db=0)
p = r.pubsub()
p.psubscribe("messages")
for message in p.listen():
if message:
if message["channel"] == b"messages":
data = message["data"]
try:
unpacked = msgpack.unpackb(data, raw=False)
except TypeError:
continue
process_rules(unpacked)