Compare commits

...

14 Commits

38 changed files with 1740 additions and 505 deletions

4
.gitignore vendored
View File

@@ -58,7 +58,6 @@ cover/
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
@@ -156,3 +155,6 @@ cython_debug/
.bash_history
.python_history
.vscode/
stack.env
static/

View File

@@ -2,17 +2,17 @@
FROM python:3
ARG OPERATION
RUN useradd -d /code pathogen
RUN useradd -d /code xf
RUN mkdir -p /code
RUN chown -R pathogen:pathogen /code
RUN chown -R xf:xf /code
RUN mkdir -p /conf/static
RUN chown -R pathogen:pathogen /conf
RUN chown -R xf:xf /conf
RUN mkdir /venv
RUN chown pathogen:pathogen /venv
RUN chown xf:xf /venv
USER pathogen
USER xf
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
WORKDIR /code

View File

@@ -1,20 +1,20 @@
run:
docker-compose --env-file=stack.env up -d
docker-compose -f docker-compose.prod.yml --env-file=stack.env up -d
build:
docker-compose --env-file=stack.env build
docker-compose -f docker-compose.prod.yml --env-file=stack.env build
stop:
docker-compose --env-file=stack.env down
docker-compose -f docker-compose.prod.yml --env-file=stack.env down
log:
docker-compose --env-file=stack.env logs -f
docker-compose -f docker-compose.prod.yml --env-file=stack.env logs -f --names
migrate:
docker-compose --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py migrate"
docker-compose -f docker-compose.prod.yml --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py migrate"
makemigrations:
docker-compose --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py makemigrations"
docker-compose -f docker-compose.prod.yml --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py makemigrations"
auth:
docker-compose --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py createsuperuser"
docker-compose -f docker-compose.prod.yml --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py createsuperuser"

View File

@@ -1,3 +1,5 @@
from os import getenv
# Elasticsearch settings
ELASTICSEARCH_URL = "10.1.0.1"
ELASTICSEARCH_PORT = 9200
@@ -104,3 +106,8 @@ META_QUERY_SIZE = 10000
DEBUG = True
PROFILER = False
REDIS_HOST = getenv("REDIS_HOST", "redis_fisk_dev")
REDIS_PASSWORD = getenv("REDIS_PASSWORD", "changeme")
REDIS_DB = int(getenv("REDIS_DB", "10"))
REDIS_PORT = int(getenv("REDIS_PORT", "6379"))

87
app/local_settings.py Normal file
View File

@@ -0,0 +1,87 @@
from os import getenv
trues = ("t", "true", "yes", "y", "1")
# Elasticsearch settings
ELASTICSEARCH_URL = getenv("ELASTICSEARCH_URL", "10.1.0.1")
ELASTICSEARCH_PORT = int(getenv("ELASTICSEARCH_PORT", "9200"))
ELASTICSEARCH_TLS = getenv("ELASTICSEARCH_TLS", "True").lower() in trues
ELASTICSEARCH_USERNAME = getenv("ELASTICSEARCH_USERNAME", "admin")
ELASTICSEARCH_PASSWORD = getenv("ELASTICSEARCH_PASSWORD", "secret")
# Manticore settings
MANTICORE_URL = getenv("MANTICORE_URL", "http://example-db-1:9308")
DB_BACKEND = getenv("DB_BACKEND", "MANTICORE")
# Common DB settings
INDEX_MAIN = getenv("INDEX_MAIN", "main")
INDEX_RESTRICTED = getenv("INDEX_RESTRICTED", "restricted")
INDEX_META = getenv("INDEX_META", "meta")
INDEX_INT = getenv("INDEX_INT", "internal")
INDEX_RULE_STORAGE = getenv("INDEX_RULE_STORAGE", "rule_storage")
MAIN_SIZES = getenv("MAIN_SIZES", "1,5,15,30,50,100,250,500,1000").split(",")
MAIN_SIZES_ANON = getenv("MAIN_SIZES_ANON", "1,5,15,30,50,100").split(",")
MAIN_SOURCES = getenv("MAIN_SOURCES", "dis,4ch,all").split(",")
SOURCES_RESTRICTED = getenv("SOURCES_RESTRICTED", "irc").split(",")
CACHE = getenv("CACHE", "False").lower() in trues
CACHE_TIMEOUT = int(getenv("CACHE_TIMEOUT", "2"))
DRILLDOWN_RESULTS_PER_PAGE = int(getenv("DRILLDOWN_RESULTS_PER_PAGE", "15"))
DRILLDOWN_DEFAULT_PARAMS = {
"size": getenv("DRILLDOWN_DEFAULT_SIZE", "15"),
"index": getenv("DRILLDOWN_DEFAULT_INDEX", "main"),
"sorting": getenv("DRILLDOWN_DEFAULT_SORTING", "desc"),
"source": getenv("DRILLDOWN_DEFAULT_SOURCE", "all"),
}
# URLs
DOMAIN = getenv("DOMAIN", "example.com")
URL = getenv("URL", f"https://{DOMAIN}")
# Access control
ALLOWED_HOSTS = getenv("ALLOWED_HOSTS", f"127.0.0.1,{DOMAIN}").split(",")
# CSRF
CSRF_TRUSTED_ORIGINS = getenv("CSRF_TRUSTED_ORIGINS", URL).split(",")
# Stripe
BILLING_ENABLED = getenv("BILLING_ENABLED", "false").lower() in trues
STRIPE_TEST = getenv("STRIPE_TEST", "True").lower() in trues
STRIPE_API_KEY_TEST = getenv("STRIPE_API_KEY_TEST", "")
STRIPE_PUBLIC_API_KEY_TEST = getenv("STRIPE_PUBLIC_API_KEY_TEST", "")
STRIPE_API_KEY_PROD = getenv("STRIPE_API_KEY_PROD", "")
STRIPE_PUBLIC_API_KEY_PROD = getenv("STRIPE_PUBLIC_API_KEY_PROD", "")
STRIPE_ENDPOINT_SECRET = getenv("STRIPE_ENDPOINT_SECRET", "")
STATIC_ROOT = getenv("STATIC_ROOT", "")
SECRET_KEY = getenv("SECRET_KEY", "a")
STRIPE_ADMIN_COUPON = getenv("STRIPE_ADMIN_COUPON", "")
# Threshold
THRESHOLD_ENDPOINT = getenv("THRESHOLD_ENDPOINT", "http://threshold:13869")
THRESHOLD_API_KEY = getenv("THRESHOLD_API_KEY", "api_1")
THRESHOLD_API_TOKEN = getenv("THRESHOLD_API_TOKEN", "")
THRESHOLD_API_COUNTER = getenv("THRESHOLD_API_COUNTER", "")
# NickTrace
NICKTRACE_MAX_ITERATIONS = int(getenv("NICKTRACE_MAX_ITERATIONS", "4"))
NICKTRACE_MAX_CHUNK_SIZE = int(getenv("NICKTRACE_MAX_CHUNK_SIZE", "500"))
NICKTRACE_QUERY_SIZE = int(getenv("NICKTRACE_QUERY_SIZE", "10000"))
# Meta
META_MAX_ITERATIONS = int(getenv("META_MAX_ITERATIONS", "4"))
META_MAX_CHUNK_SIZE = int(getenv("META_MAX_CHUNK_SIZE", "500"))
META_QUERY_SIZE = int(getenv("META_QUERY_SIZE", "10000"))
DEBUG = getenv("DEBUG", "True").lower() in trues
PROFILER = getenv("PROFILER", "False").lower() in trues
REDIS_HOST = getenv("REDIS_HOST", "redis_neptune_dev")
REDIS_PASSWORD = getenv("REDIS_PASSWORD", "changeme")
REDIS_DB = int(getenv("REDIS_DB", "1"))
REDIS_DB_CACHE = int(getenv("REDIS_DB_CACHE", "10"))
REDIS_PORT = int(getenv("REDIS_PORT", "6379"))
# Elasticsearch blacklist
ELASTICSEARCH_BLACKLISTED = {}

View File

@@ -47,19 +47,6 @@ INSTALLED_APPS = [
"cachalot",
]
# Performance optimisations
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.redis.RedisCache",
"LOCATION": "unix:///var/run/socks/redis.sock",
"OPTIONS": {
"db": "10",
"parser_class": "redis.connection.HiredisParser",
"pool_class": "redis.BlockingConnectionPool",
},
}
}
CRISPY_TEMPLATE_PACK = "bulma"
CRISPY_ALLOWED_TEMPLATE_PACKS = ("bulma",)
DJANGO_TABLES2_TEMPLATE = "django-tables2/bulma.html"
@@ -163,7 +150,7 @@ REST_FRAMEWORK = {
INTERNAL_IPS = [
"127.0.0.1",
"10.1.10.11",
# "10.1.10.11",
]
DEBUG_TOOLBAR_PANELS = [
@@ -187,6 +174,22 @@ DEBUG_TOOLBAR_PANELS = [
from app.local_settings import * # noqa
# Performance optimisations
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": "unix:///var/run/socks/redis.sock",
# "LOCATION": f"redis://{REDIS_HOST}:{REDIS_PORT}",
"LOCATION": "unix:///var/run/neptune-redis.sock",
"OPTIONS": {
"db": REDIS_DB,
# "parser_class": "django_redis.cache.RedisCache",
# "PASSWORD": REDIS_PASSWORD,
"pool_class": "redis.BlockingConnectionPool",
},
}
}
if PROFILER: # noqa - trust me its there
import pyroscope

View File

@@ -58,6 +58,9 @@ from core.views.manage.threshold.threshold import (
ThresholdIRCOverview,
)
# Stats
from core.views.manage.monolith import stats
# Main tool pages
from core.views.ui.drilldown import ( # DrilldownTableView,; Drilldown,
DrilldownContextModal,
@@ -92,7 +95,7 @@ urlpatterns = [
),
path("cancel/", TemplateView.as_view(template_name="cancel.html"), name="cancel"),
path("portal", Portal.as_view(), name="portal"),
path("admin/", admin.site.urls),
path("sapp/", admin.site.urls),
path("accounts/", include("django.contrib.auth.urls")),
path("accounts/signup/", Signup.as_view(), name="signup"),
##
@@ -311,4 +314,14 @@ urlpatterns = [
notifications.RuleClear.as_view(),
name="rule_clear",
),
path(
"manage/monolith/stats/",
stats.MonolithStats.as_view(),
name="monolith_stats",
),
path(
"manage/monolith/stats_db/<str:type>/",
stats.MonolithDBStats.as_view(),
name="monolith_stats_db",
)
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)

View File

@@ -6,8 +6,15 @@ from redis import StrictRedis
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
r = StrictRedis(unix_socket_path="/var/run/socks/redis.sock", db=0)
# /var/run/neptune-redis.sock
# use the socket
r = StrictRedis(unix_socket_path="/var/run/neptune-redis.sock", db=settings.REDIS_DB)
# r = StrictRedis(
# host=settings.REDIS_HOST,
# port=settings.REDIS_PORT,
# password=settings.REDIS_PASSWORD,
# db=settings.REDIS_DB
# )
if settings.STRIPE_TEST:
stripe.api_key = settings.STRIPE_API_KEY_TEST

View File

@@ -168,6 +168,71 @@ class StorageBackend(ABC):
# Actually get rid of all the things we set to None
response["hits"]["hits"] = [hit for hit in response["hits"]["hits"] if hit]
def add_bool(self, search_query, add_bool):
"""
Add the specified boolean matches to search query.
"""
if not add_bool:
return
for item in add_bool:
search_query["query"]["bool"]["must"].append({"match_phrase": item})
def add_top(self, search_query, add_top, negative=False):
"""
Merge add_top with the base of the search_query.
"""
if not add_top:
return
if negative:
for item in add_top:
if "must_not" in search_query["query"]["bool"]:
search_query["query"]["bool"]["must_not"].append(item)
else:
search_query["query"]["bool"]["must_not"] = [item]
else:
for item in add_top:
if "query" not in search_query:
search_query["query"] = {"bool": {"must": []}}
search_query["query"]["bool"]["must"].append(item)
def schedule_check_aggregations(self, rule_object, result_map):
"""
Check the results of a scheduled query for aggregations.
"""
if rule_object.aggs is None:
return result_map
for index, (meta, result) in result_map.items():
# Default to true, if no aggs are found, we still want to match
match = True
for agg_name, (operator, number) in rule_object.aggs.items():
if agg_name in meta["aggs"]:
agg_value = meta["aggs"][agg_name]["value"]
# TODO: simplify this, match is default to True
if operator == ">":
if agg_value > number:
match = True
else:
match = False
elif operator == "<":
if agg_value < number:
match = True
else:
match = False
elif operator == "=":
if agg_value == number:
match = True
else:
match = False
else:
match = False
else:
# No aggregation found, but it is required
match = False
result_map[index][0]["aggs"][agg_name]["match"] = match
return result_map
def query(self, user, search_query, **kwargs):
# For time tracking
start = time.process_time()
@@ -198,7 +263,20 @@ class StorageBackend(ABC):
if "took" in response:
if response["took"] is None:
return None
if len(response["hits"]["hits"]) == 0:
if "error" in response:
message = f"Error: {response['error']}"
message_class = "danger"
time_took = (time.process_time() - start) * 1000
# Round to 3 significant figures
time_took_rounded = round(
time_took, 3 - int(floor(log10(abs(time_took)))) - 1
)
return {
"message": message,
"class": message_class,
"took": time_took_rounded,
}
elif len(response["hits"]["hits"]) == 0:
message = "No results."
message_class = "danger"
time_took = (time.process_time() - start) * 1000
@@ -213,7 +291,7 @@ class StorageBackend(ABC):
}
# For Druid
if "error" in response:
elif "error" in response:
if "errorMessage" in response:
context = {
"message": response["errorMessage"],
@@ -240,6 +318,106 @@ class StorageBackend(ABC):
time_took_rounded = round(time_took, 3 - int(floor(log10(abs(time_took)))) - 1)
return {"object_list": response_parsed, "took": time_took_rounded}
def construct_context_query(
self, index, net, channel, src, num, size, type=None, nicks=None
):
# Get the initial query
query = self.construct_query(None, size, blank=True)
extra_must = []
extra_should = []
extra_should2 = []
if num:
extra_must.append({"match_phrase": {"num": num}})
if net:
extra_must.append({"match_phrase": {"net": net}})
if channel:
extra_must.append({"match": {"channel": channel}})
if nicks:
for nick in nicks:
extra_should2.append({"match": {"nick": nick}})
types = ["msg", "notice", "action", "kick", "topic", "mode"]
fields = [
"nick",
"ident",
"host",
"channel",
"ts",
"msg",
"type",
"net",
"src",
"tokens",
]
query["fields"] = fields
if index == "internal":
fields.append("mtype")
if channel == "*status" or type == "znc":
if {"match": {"channel": channel}} in extra_must:
extra_must.remove({"match": {"channel": channel}})
extra_should2 = []
# Type is one of msg or notice
# extra_should.append({"match": {"mtype": "msg"}})
# extra_should.append({"match": {"mtype": "notice"}})
extra_should.append({"match": {"type": "znc"}})
extra_should.append({"match": {"type": "self"}})
extra_should2.append({"match": {"type": "znc"}})
extra_should2.append({"match": {"nick": channel}})
elif type == "auth":
if {"match": {"channel": channel}} in extra_must:
extra_must.remove({"match": {"channel": channel}})
extra_should2 = []
extra_should2.append({"match": {"nick": channel}})
# extra_should2.append({"match": {"mtype": "msg"}})
# extra_should2.append({"match": {"mtype": "notice"}})
extra_should.append({"match": {"type": "query"}})
extra_should2.append({"match": {"type": "self"}})
extra_should.append({"match": {"nick": channel}})
else:
for ctype in types:
extra_should.append({"match": {"mtype": ctype}})
else:
for ctype in types:
extra_should.append({"match": {"type": ctype}})
# query = {
# "index": index,
# "limit": size,
# "query": {
# "bool": {
# "must": [
# # {"equals": {"src": src}},
# # {
# # "bool": {
# # "should": [*extra_should],
# # }
# # },
# # {
# # "bool": {
# # "should": [*extra_should2],
# # }
# # },
# *extra_must,
# ]
# }
# },
# "fields": fields,
# # "_source": False,
# }
if extra_must:
for x in extra_must:
query["query"]["bool"]["must"].append(x)
if extra_should:
query["query"]["bool"]["must"].append({"bool": {"should": [*extra_should]}})
if extra_should2:
query["query"]["bool"]["must"].append(
{"bool": {"should": [*extra_should2]}}
)
return query
@abstractmethod
def query_results(self, **kwargs):
pass

View File

@@ -338,8 +338,15 @@ class ElasticsearchBackend(StorageBackend):
{"match_phrase": {"src": source_iter}}
)
add_top.append(add_top_tmp)
if "tokens" in data:
add_top_tmp = {"bool": {"should": []}}
for token in data["tokens"]:
add_top_tmp["bool"]["should"].append(
{"match_phrase": {"tokens": token}}
)
add_top.append(add_top_tmp)
for field, values in data.items():
if field not in ["source", "index", "tags", "query", "sentiment"]:
if field not in ["source", "index", "tags", "query", "sentiment", "tokens"]:
for value in values:
add_top.append({"match": {field: value}})
# Bypass the check for query and tags membership since we can search by msg, etc
@@ -367,44 +374,6 @@ class ElasticsearchBackend(StorageBackend):
return search_query
def schedule_check_aggregations(self, rule_object, result_map):
"""
Check the results of a scheduled query for aggregations.
"""
if rule_object.aggs is None:
return result_map
for index, (meta, result) in result_map.items():
# Default to true, if no aggs are found, we still want to match
match = True
for agg_name, (operator, number) in rule_object.aggs.items():
if agg_name in meta["aggs"]:
agg_value = meta["aggs"][agg_name]["value"]
# TODO: simplify this, match is default to True
if operator == ">":
if agg_value > number:
match = True
else:
match = False
elif operator == "<":
if agg_value < number:
match = True
else:
match = False
elif operator == "=":
if agg_value == number:
match = True
else:
match = False
else:
match = False
else:
# No aggregation found, but it is required
match = False
result_map[index][0]["aggs"][agg_name]["match"] = match
return result_map
def schedule_query_results_test_sync(self, rule_object):
"""
Helper to run a scheduled query test with reduced functionality.
@@ -595,24 +564,28 @@ class ElasticsearchBackend(StorageBackend):
if isinstance(sentiment_r, dict):
return sentiment_r
if sentiment_r:
if rule_object is not None:
sentiment_index = "meta.aggs.avg_sentiment.value"
else:
sentiment_index = "sentiment"
sentiment_method, sentiment = sentiment_r
range_query_compare = {"range": {"sentiment": {}}}
range_query_compare = {"range": {sentiment_index: {}}}
range_query_precise = {
"match": {
"sentiment": None,
sentiment_index: None,
}
}
if sentiment_method == "below":
range_query_compare["range"]["sentiment"]["lt"] = sentiment
range_query_compare["range"][sentiment_index]["lt"] = sentiment
add_top.append(range_query_compare)
elif sentiment_method == "above":
range_query_compare["range"]["sentiment"]["gt"] = sentiment
range_query_compare["range"][sentiment_index]["gt"] = sentiment
add_top.append(range_query_compare)
elif sentiment_method == "exact":
range_query_precise["match"]["sentiment"] = sentiment
range_query_precise["match"][sentiment_index] = sentiment
add_top.append(range_query_precise)
elif sentiment_method == "nonzero":
range_query_precise["match"]["sentiment"] = 0
range_query_precise["match"][sentiment_index] = 0
add_top_negative.append(range_query_precise)
# Add in the additional information we already populated

View File

@@ -1,12 +1,24 @@
import logging
from datetime import datetime
from pprint import pprint
import httpx
import orjson
import requests
from django.conf import settings
from core.db import StorageBackend, add_defaults, dedup_list
from core.db.processing import annotate_results, parse_results
from core.db.processing import parse_results
from core.lib.parsing import (
QueryError,
parse_date_time,
parse_index,
parse_rule,
parse_sentiment,
parse_size,
parse_sort,
parse_source,
)
logger = logging.getLogger(__name__)
@@ -21,14 +33,27 @@ class ManticoreBackend(StorageBackend):
"""
pass # we use requests
def construct_query(self, query, size, index, blank=False):
async def async_initialise(self, **kwargs):
"""
Initialise the Manticore client in async mode
"""
pass # we use requests
def delete_rule_entries(self, rule_id):
"""
Delete all entries for a given rule.
:param rule_id: The rule ID to delete.
"""
# TODO
def construct_query(self, query, size=None, blank=False, **kwargs):
"""
Accept some query parameters and construct an OpenSearch query.
"""
if not size:
size = 5
query_base = {
"index": index,
"index": kwargs.get("index"),
"limit": size,
"query": {"bool": {"must": []}},
}
@@ -39,11 +64,79 @@ class ManticoreBackend(StorageBackend):
query_base["query"]["bool"]["must"].append(query_string)
return query_base
def run_query(self, client, user, search_query):
response = requests.post(
f"{settings.MANTICORE_URL}/json/search", json=search_query
)
return response
def parse(self, response, **kwargs):
parsed = parse_results(response, **kwargs)
return parsed
def run_query(self, user, search_query, **kwargs):
"""
Low level helper to run Manticore query.
"""
index = kwargs.get("index")
raw = kwargs.get("raw")
if search_query and not raw:
search_query["index"] = index
path = kwargs.get("path", "json/search")
if raw:
response = requests.post(
f"{settings.MANTICORE_URL}/{path}", search_query
)
else:
response = requests.post(
f"{settings.MANTICORE_URL}/{path}", json=search_query
)
return orjson.loads(response.text)
async def async_run_query(self, user, search_query, **kwargs):
"""
Low level helper to run Manticore query asynchronously.
"""
index = kwargs.get("index")
search_query["index"] = index
async with httpx.AsyncClient() as client:
response = await client.post(
f"{settings.MANTICORE_URL}/json/search", json=search_query
)
return orjson.loads(response.text)
async def async_store_matches(self, matches):
"""
Store a list of matches in Manticore.
:param index: The index to store the matches in.
:param matches: A list of matches to store.
"""
# TODO
def store_matches(self, matches):
"""
Store a list of matches in Manticore.
:param index: The index to store the matches in.
:param matches: A list of matches to store.
"""
# TODO
def prepare_schedule_query(self, rule_object):
"""
Helper to run a scheduled query with reduced functionality.
"""
# TODO
def schedule_query_results_test_sync(self, rule_object):
"""
Helper to run a scheduled query test with reduced functionality.
Sync version for running from Django forms.
Does not return results.
"""
# TODO
async def schedule_query_results(self, rule_object):
"""
Helper to run a scheduled query with reduced functionality and async.
"""
# TODO
def query_results(
self,
@@ -67,117 +160,77 @@ class ManticoreBackend(StorageBackend):
query_created = False
source = None
add_defaults(query_params)
# Check size
# Now, run the helpers for SIQTSRSS/ADR
# S - Size
# I - Index
# Q - Query
# T - Tags
# S - Source
# R - Ranges
# S - Sort
# S - Sentiment
# A - Annotate
# D - Dedup
# R - Reverse
# S - Size
if request.user.is_anonymous:
sizes = settings.MANTICORE_MAIN_SIZES_ANON
sizes = settings.MAIN_SIZES_ANON
else:
sizes = settings.MANTICORE_MAIN_SIZES
sizes = settings.MAIN_SIZES
if not size:
if "size" in query_params:
size = query_params["size"]
if size not in sizes:
message = "Size is not permitted"
message_class = "danger"
return {"message": message, "class": message_class}
size = int(size)
else:
size = 20
size = parse_size(query_params, sizes)
if isinstance(size, dict):
return size
# Check index
if "index" in query_params:
index = query_params["index"]
if index == "main":
index = settings.MANTICORE_INDEX_MAIN
else:
if not request.user.has_perm(f"core.index_{index}"):
message = "Not permitted to search by this index"
message_class = "danger"
return {
"message": message,
"class": message_class,
}
if index == "meta":
index = settings.MANTICORE_INDEX_META
elif index == "internal":
index = settings.MANTICORE_INDEX_INT
else:
message = "Index is not valid."
message_class = "danger"
return {
"message": message,
"class": message_class,
}
rule_object = parse_rule(request.user, query_params)
if isinstance(rule_object, dict):
return rule_object
if rule_object is not None:
index = settings.INDEX_RULE_STORAGE
add_bool.append({"rule_id": str(rule_object.id)})
else:
index = settings.MANTICORE_INDEX_MAIN
# I - Index
index = parse_index(request.user, query_params)
if isinstance(index, dict):
return index
# Create the search query
if "query" in query_params:
query = query_params["query"]
search_query = self.construct_query(query, size, index)
query_created = True
else:
if custom_query:
search_query = custom_query
# Q/T - Query/Tags
search_query = self.parse_query(
query_params, tags, size, custom_query, add_bool
)
# Query should be a dict, so check if it contains message here
if "message" in search_query:
return search_query
if tags:
# Get a blank search query
if not query_created:
search_query = self.construct_query(None, size, index, blank=True)
query_created = True
for tagname, tagvalue in tags.items():
add_bool.append({tagname: tagvalue})
# S - Sources
sources = parse_source(request.user, query_params)
if isinstance(sources, dict):
return sources
total_count = len(sources)
# Total is -1 due to the "all" source
total_sources = (
len(settings.MAIN_SOURCES) - 1 + len(settings.SOURCES_RESTRICTED)
)
required_any = ["query_full", "query", "tags"]
if not any([field in query_params.keys() for field in required_any]):
if not custom_query:
message = "Empty query!"
message_class = "warning"
return {"message": message, "class": message_class}
# If the sources the user has access to are equal to all
# possible sources, then we don't need to add the source
# filter to the query.
if total_count != total_sources:
add_top_tmp = {"bool": {"should": []}}
for source_iter in sources:
add_top_tmp["bool"]["should"].append(
{"match_phrase": {"src": source_iter}}
)
if query_params["source"] != "all":
add_top.append(add_top_tmp)
# Check for a source
if "source" in query_params:
source = query_params["source"]
if source in settings.SOURCES_RESTRICTED:
if not request.user.has_perm("core.restricted_sources"):
message = "Access denied"
message_class = "danger"
return {"message": message, "class": message_class}
elif source not in settings.MAIN_SOURCES:
message = "Invalid source"
message_class = "danger"
return {"message": message, "class": message_class}
if source == "all":
source = None # the next block will populate it
if source:
sources = [source]
else:
sources = list(settings.MAIN_SOURCES)
if request.user.has_perm("core.restricted_sources"):
for source_iter in settings.SOURCES_RESTRICTED:
sources.append(source_iter)
add_top_tmp = {"bool": {"should": []}}
total_count = 0
for source_iter in sources:
add_top_tmp["bool"]["should"].append({"equals": {"src": source_iter}})
total_count += 1
total_sources = len(settings.MAIN_SOURCES) + len(settings.SOURCES_RESTRICTED)
if not total_count == total_sources:
add_top.append(add_top_tmp)
# Date/time range
if set({"from_date", "to_date", "from_time", "to_time"}).issubset(
query_params.keys()
):
from_ts = f"{query_params['from_date']}T{query_params['from_time']}Z"
to_ts = f"{query_params['to_date']}T{query_params['to_time']}Z"
from_ts = datetime.strptime(from_ts, "%Y-%m-%dT%H:%MZ")
to_ts = datetime.strptime(to_ts, "%Y-%m-%dT%H:%MZ")
from_ts = int(from_ts.timestamp())
to_ts = int(to_ts.timestamp())
# R - Ranges
# date_query = False
from_ts, to_ts = parse_date_time(query_params)
if from_ts:
range_query = {
"range": {
"ts": {
@@ -188,115 +241,87 @@ class ManticoreBackend(StorageBackend):
}
add_top.append(range_query)
# Sorting
if "sorting" in query_params:
sorting = query_params["sorting"]
if sorting not in ("asc", "desc", "none"):
message = "Invalid sort"
message_class = "danger"
return {"message": message, "class": message_class}
if sorting in ("asc", "desc"):
sort = [
{
"ts": {
"order": sorting,
}
}
]
# S - Sort
sort = parse_sort(query_params)
if isinstance(sort, dict):
return sort
# Sentiment handling
if "check_sentiment" in query_params:
if "sentiment_method" not in query_params:
message = "No sentiment method"
message_class = "danger"
return {"message": message, "class": message_class}
if "sentiment" in query_params:
sentiment = query_params["sentiment"]
try:
sentiment = float(sentiment)
except ValueError:
message = "Sentiment is not a float"
message_class = "danger"
return {"message": message, "class": message_class}
sentiment_method = query_params["sentiment_method"]
range_query_compare = {"range": {"sentiment": {}}}
if rule_object is not None:
field = "match_ts"
else:
field = "ts"
if sort:
# For Druid compatibility
sort_map = {"ascending": "asc", "descending": "desc"}
sorting = [
{
field: {
"order": sort_map[sort],
}
}
]
search_query["sort"] = sorting
# S - Sentiment
sentiment_r = parse_sentiment(query_params)
if isinstance(sentiment_r, dict):
return sentiment_r
if sentiment_r:
if rule_object is not None:
sentiment_index = "meta.aggs.avg_sentiment.value"
else:
sentiment_index = "sentiment"
sentiment_method, sentiment = sentiment_r
range_query_compare = {"range": {sentiment_index: {}}}
range_query_precise = {
"match": {
"sentiment": None,
sentiment_index: None,
}
}
if sentiment_method == "below":
range_query_compare["range"]["sentiment"]["lt"] = sentiment
range_query_compare["range"][sentiment_index]["lt"] = sentiment
add_top.append(range_query_compare)
elif sentiment_method == "above":
range_query_compare["range"]["sentiment"]["gt"] = sentiment
range_query_compare["range"][sentiment_index]["gt"] = sentiment
add_top.append(range_query_compare)
elif sentiment_method == "exact":
range_query_precise["match"]["sentiment"] = sentiment
range_query_precise["match"][sentiment_index] = sentiment
add_top.append(range_query_precise)
elif sentiment_method == "nonzero":
range_query_precise["match"]["sentiment"] = 0
range_query_precise["match"][sentiment_index] = 0
add_top_negative.append(range_query_precise)
if add_bool:
# if "bool" not in search_query["query"]:
# search_query["query"]["bool"] = {}
# if "must" not in search_query["query"]["bool"]:
# search_query["query"]["bool"] = {"must": []}
# Add in the additional information we already populated
self.add_bool(search_query, add_bool)
self.add_top(search_query, add_top)
self.add_top(search_query, add_top_negative, negative=True)
for item in add_bool:
search_query["query"]["bool"]["must"].append({"match": item})
if add_top:
for item in add_top:
search_query["query"]["bool"]["must"].append(item)
if add_top_negative:
for item in add_top_negative:
if "must_not" in search_query["query"]["bool"]:
search_query["query"]["bool"]["must_not"].append(item)
else:
search_query["query"]["bool"]["must_not"] = [item]
if sort:
search_query["sort"] = sort
pprint(search_query)
results = self.run_query(
self.client,
request.user, # passed through run_main_query to filter_blacklisted
response = self.query(
request.user,
search_query,
index=index,
)
if not results:
if not response:
message = "Error running query"
message_class = "danger"
return {"message": message, "class": message_class}
# results = results.to_dict()
if "error" in results:
message = results["error"]
if "error" in response:
message = response["error"]
message_class = "danger"
return {"message": message, "class": message_class}
results_parsed = parse_results(results)
if annotate:
annotate_results(results_parsed)
if "dedup" in query_params:
if query_params["dedup"] == "on":
dedup = True
else:
dedup = False
else:
dedup = False
if "message" in response:
return response
if reverse:
results_parsed = results_parsed[::-1]
# A/D/R - Annotate/Dedup/Reverse
response["object_list"] = self.process_results(
response["object_list"],
annotate=annotate,
dedup=dedup,
dedup_fields=dedup_fields,
reverse=reverse,
)
if dedup:
if not dedup_fields:
dedup_fields = ["msg", "nick", "ident", "host", "net", "channel"]
results_parsed = dedup_list(results_parsed, dedup_fields)
context = {
"object_list": results_parsed,
"card": results["hits"]["total"],
"took": results["took"],
}
if "cache" in results:
context["cache"] = results["cache"]
context = response
return context

302
core/db/manticore_orig.py Normal file
View File

@@ -0,0 +1,302 @@
import logging
from datetime import datetime
from pprint import pprint
import requests
from django.conf import settings
from core.db import StorageBackend, add_defaults, dedup_list
from core.db.processing import annotate_results, parse_results
logger = logging.getLogger(__name__)
class ManticoreBackend(StorageBackend):
def __init__(self):
super().__init__("manticore")
def initialise(self, **kwargs):
"""
Initialise the Manticore client
"""
pass # we use requests
def construct_query(self, query, size, index, blank=False):
"""
Accept some query parameters and construct an OpenSearch query.
"""
if not size:
size = 5
query_base = {
"index": index,
"limit": size,
"query": {"bool": {"must": []}},
}
query_string = {
"query_string": query,
}
if not blank:
query_base["query"]["bool"]["must"].append(query_string)
return query_base
def run_query(self, client, user, search_query):
response = requests.post(
f"{settings.MANTICORE_URL}/json/search", json=search_query
)
return response
def query_results(
self,
request,
query_params,
size=None,
annotate=True,
custom_query=False,
reverse=False,
dedup=False,
dedup_fields=None,
tags=None,
):
query = None
message = None
message_class = None
add_bool = []
add_top = []
add_top_negative = []
sort = None
query_created = False
source = None
add_defaults(query_params)
# Check size
if request.user.is_anonymous:
sizes = settings.MANTICORE_MAIN_SIZES_ANON
else:
sizes = settings.MANTICORE_MAIN_SIZES
if not size:
if "size" in query_params:
size = query_params["size"]
if size not in sizes:
message = "Size is not permitted"
message_class = "danger"
return {"message": message, "class": message_class}
size = int(size)
else:
size = 20
# Check index
if "index" in query_params:
index = query_params["index"]
if index == "main":
index = settings.MANTICORE_INDEX_MAIN
else:
if not request.user.has_perm(f"core.index_{index}"):
message = "Not permitted to search by this index"
message_class = "danger"
return {
"message": message,
"class": message_class,
}
if index == "meta":
index = settings.MANTICORE_INDEX_META
elif index == "internal":
index = settings.MANTICORE_INDEX_INT
else:
message = "Index is not valid."
message_class = "danger"
return {
"message": message,
"class": message_class,
}
else:
index = settings.MANTICORE_INDEX_MAIN
# Create the search query
if "query" in query_params:
query = query_params["query"]
search_query = self.construct_query(query, size, index)
query_created = True
else:
if custom_query:
search_query = custom_query
if tags:
# Get a blank search query
if not query_created:
search_query = self.construct_query(None, size, index, blank=True)
query_created = True
for tagname, tagvalue in tags.items():
add_bool.append({tagname: tagvalue})
required_any = ["query_full", "query", "tags"]
if not any([field in query_params.keys() for field in required_any]):
if not custom_query:
message = "Empty query!"
message_class = "warning"
return {"message": message, "class": message_class}
# Check for a source
if "source" in query_params:
source = query_params["source"]
if source in settings.SOURCES_RESTRICTED:
if not request.user.has_perm("core.restricted_sources"):
message = "Access denied"
message_class = "danger"
return {"message": message, "class": message_class}
elif source not in settings.MAIN_SOURCES:
message = "Invalid source"
message_class = "danger"
return {"message": message, "class": message_class}
if source == "all":
source = None # the next block will populate it
if source:
sources = [source]
else:
sources = list(settings.MAIN_SOURCES)
if request.user.has_perm("core.restricted_sources"):
for source_iter in settings.SOURCES_RESTRICTED:
sources.append(source_iter)
add_top_tmp = {"bool": {"should": []}}
total_count = 0
for source_iter in sources:
add_top_tmp["bool"]["should"].append({"equals": {"src": source_iter}})
total_count += 1
total_sources = len(settings.MAIN_SOURCES) + len(settings.SOURCES_RESTRICTED)
if not total_count == total_sources:
add_top.append(add_top_tmp)
# Date/time range
if set({"from_date", "to_date", "from_time", "to_time"}).issubset(
query_params.keys()
):
from_ts = f"{query_params['from_date']}T{query_params['from_time']}Z"
to_ts = f"{query_params['to_date']}T{query_params['to_time']}Z"
from_ts = datetime.strptime(from_ts, "%Y-%m-%dT%H:%MZ")
to_ts = datetime.strptime(to_ts, "%Y-%m-%dT%H:%MZ")
from_ts = int(from_ts.timestamp())
to_ts = int(to_ts.timestamp())
range_query = {
"range": {
"ts": {
"gt": from_ts,
"lt": to_ts,
}
}
}
add_top.append(range_query)
# Sorting
if "sorting" in query_params:
sorting = query_params["sorting"]
if sorting not in ("asc", "desc", "none"):
message = "Invalid sort"
message_class = "danger"
return {"message": message, "class": message_class}
if sorting in ("asc", "desc"):
sort = [
{
"ts": {
"order": sorting,
}
}
]
# Sentiment handling
if "check_sentiment" in query_params:
if "sentiment_method" not in query_params:
message = "No sentiment method"
message_class = "danger"
return {"message": message, "class": message_class}
if "sentiment" in query_params:
sentiment = query_params["sentiment"]
try:
sentiment = float(sentiment)
except ValueError:
message = "Sentiment is not a float"
message_class = "danger"
return {"message": message, "class": message_class}
sentiment_method = query_params["sentiment_method"]
range_query_compare = {"range": {"sentiment": {}}}
range_query_precise = {
"match": {
"sentiment": None,
}
}
if sentiment_method == "below":
range_query_compare["range"]["sentiment"]["lt"] = sentiment
add_top.append(range_query_compare)
elif sentiment_method == "above":
range_query_compare["range"]["sentiment"]["gt"] = sentiment
add_top.append(range_query_compare)
elif sentiment_method == "exact":
range_query_precise["match"]["sentiment"] = sentiment
add_top.append(range_query_precise)
elif sentiment_method == "nonzero":
range_query_precise["match"]["sentiment"] = 0
add_top_negative.append(range_query_precise)
if add_bool:
# if "bool" not in search_query["query"]:
# search_query["query"]["bool"] = {}
# if "must" not in search_query["query"]["bool"]:
# search_query["query"]["bool"] = {"must": []}
for item in add_bool:
search_query["query"]["bool"]["must"].append({"match": item})
if add_top:
for item in add_top:
search_query["query"]["bool"]["must"].append(item)
if add_top_negative:
for item in add_top_negative:
if "must_not" in search_query["query"]["bool"]:
search_query["query"]["bool"]["must_not"].append(item)
else:
search_query["query"]["bool"]["must_not"] = [item]
if sort:
search_query["sort"] = sort
pprint(search_query)
results = self.run_query(
self.client,
request.user, # passed through run_main_query to filter_blacklisted
search_query,
)
if not results:
message = "Error running query"
message_class = "danger"
return {"message": message, "class": message_class}
# results = results.to_dict()
if "error" in results:
message = results["error"]
message_class = "danger"
return {"message": message, "class": message_class}
results_parsed = parse_results(results)
if annotate:
annotate_results(results_parsed)
if "dedup" in query_params:
if query_params["dedup"] == "on":
dedup = True
else:
dedup = False
else:
dedup = False
if reverse:
results_parsed = results_parsed[::-1]
if dedup:
if not dedup_fields:
dedup_fields = ["msg", "nick", "ident", "host", "net", "channel"]
results_parsed = dedup_list(results_parsed, dedup_fields)
context = {
"object_list": results_parsed,
"card": results["hits"]["total"],
"took": results["took"],
}
if "cache" in results:
context["cache"] = results["cache"]
return context

View File

@@ -1,5 +1,5 @@
from datetime import datetime
import ast
from core.lib.threshold import annotate_num_chans, annotate_num_users, annotate_online
@@ -92,6 +92,11 @@ def parse_results(results, meta=None):
for field in list(element.keys()):
if element[field] == "":
del element[field]
# Unfold the tokens
if "tokens" in element:
if element["tokens"].startswith('["') or element["tokens"].startswith("['"):
tokens_parsed = ast.literal_eval(element["tokens"])
element["tokens"] = tokens_parsed
# Split the timestamp into date and time
if "ts" not in element:

View File

@@ -88,6 +88,7 @@ class NotificationRuleForm(RestrictedFormMixin, ModelForm):
"url",
"service",
"policy",
"ingest",
"enabled",
)
help_texts = {
@@ -102,6 +103,7 @@ class NotificationRuleForm(RestrictedFormMixin, ModelForm):
"window": "Time window to search: 1d, 1h, 1m, 1s, etc.",
"amount": "Amount of matches to be returned for scheduled queries. Cannot be used with on-demand queries.",
"policy": "When to trigger this policy.",
"ingest": "Whether to ingest matches.",
}
def clean(self):

View File

@@ -4,7 +4,7 @@ def construct_query(index, net, channel, src, num, size, type=None, nicks=None):
extra_should = []
extra_should2 = []
if num:
extra_must.append({"match_phrase": {"num": num}})
extra_must.append({"equals": {"num": num}})
if net:
extra_must.append({"match_phrase": {"net": net}})
if channel:
@@ -52,7 +52,7 @@ def construct_query(index, net, channel, src, num, size, type=None, nicks=None):
extra_should.append({"match": {"nick": channel}})
else:
for ctype in types:
extra_should.append({"match": {"mtype": ctype}})
extra_should.append({"equals": {"mtype": ctype}})
else:
for ctype in types:
extra_should.append({"match": {"type": ctype}})
@@ -84,4 +84,5 @@ def construct_query(index, net, channel, src, num, size, type=None, nicks=None):
query["query"]["bool"]["must"].append({"bool": {"should": [*extra_should]}})
if extra_should2:
query["query"]["bool"]["must"].append({"bool": {"should": [*extra_should2]}})
return query

View File

@@ -90,6 +90,7 @@ def parse_index(user, query_params, raise_error=False):
}
else:
index = settings.INDEX_MAIN
return index

View File

@@ -234,6 +234,16 @@ class NotificationRuleData(object):
break
# Continue to next field
continue
if field == "tokens":
# Allow partial matches for tokens
for token in value:
if "tokens" in message:
if token.lower() in [x.lower() for x in message["tokens"]]:
matched[field] = token
# Break out of the token matching loop
break
# Continue to next field
continue
if field in message and message[field] in value:
# Do exact matches for all other fields
matched[field] = message[field]
@@ -325,7 +335,8 @@ class NotificationRuleData(object):
if not isinstance(matches, list):
matches = [matches]
matches_copy = matches.copy()
match_ts = datetime.utcnow().isoformat()
# match_ts = datetime.utcnow().isoformat()
match_ts = int(datetime.utcnow().timestamp())
batch_id = uuid.uuid4()
# Filter empty fields in meta
@@ -347,7 +358,8 @@ class NotificationRuleData(object):
:param matches: the matches to store
"""
# new_matches = self.reform_matches(index, matches, meta, mode)
await self.db.async_store_matches(matches)
if self.object.ingest:
await self.db.async_store_matches(matches)
def ingest_matches_sync(self, index, matches, meta, mode):
"""
@@ -356,7 +368,8 @@ class NotificationRuleData(object):
:param matches: the matches to store
"""
# new_matches = self.reform_matches(index, matches, meta, mode)
self.db.store_matches(matches)
if self.object.ingest:
self.db.store_matches(matches)
async def rule_matched(self, index, message, meta, mode):
"""

View File

@@ -1,5 +1,6 @@
import msgpack
from django.core.management.base import BaseCommand
from django.conf import settings
from redis import StrictRedis
from core.db.storage import db
@@ -93,7 +94,13 @@ def process_rules(data):
class Command(BaseCommand):
def handle(self, *args, **options):
r = StrictRedis(unix_socket_path="/var/run/socks/redis.sock", db=0)
r = StrictRedis(unix_socket_path="/var/run/neptune-redis.sock", db=10) # To match Monolith DB
# r = StrictRedis(
# host=settings.REDIS_HOST,
# port=settings.REDIS_PORT,
# password=settings.REDIS_PASSWORD,
# db=settings.REDIS_DB
# )
p = r.pubsub()
p.psubscribe("messages")
for message in p.listen():

View File

@@ -44,8 +44,11 @@ class Command(BaseCommand):
for interval in INTERVALS:
log.debug(f"Scheduling {interval} second job")
scheduler.add_job(job, "interval", seconds=interval, args=[interval])
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
scheduler._eventloop = loop
scheduler.start()
loop = asyncio.get_event_loop()
try:
loop.run_forever()
except (KeyboardInterrupt, SystemExit):

View File

@@ -0,0 +1,33 @@
# Generated by Django 4.1.6 on 2023-02-13 21:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0027_alter_notificationrule_policy_and_more'),
]
operations = [
migrations.RenameField(
model_name='notificationrule',
old_name='send_empty',
new_name='ingest',
),
migrations.AlterField(
model_name='notificationrule',
name='interval',
field=models.IntegerField(choices=[(0, 'On demand'), (5, 'Every 5 seconds'), (60, 'Every minute'), (900, 'Every 15 minutes'), (1800, 'Every 30 minutes'), (3600, 'Every hour'), (14400, 'Every 4 hours'), (86400, 'Every day')], default=60),
),
migrations.AlterField(
model_name='notificationrule',
name='service',
field=models.CharField(choices=[('ntfy', 'NTFY'), ('webhook', 'Custom webhook'), ('none', 'Disabled')], default='webhook', max_length=255),
),
migrations.AlterField(
model_name='notificationrule',
name='window',
field=models.CharField(blank=True, default='30d', max_length=255, null=True),
),
]

View File

@@ -78,8 +78,9 @@ class User(AbstractUser):
"""
Override the save function to create a Stripe customer.
"""
if not self.stripe_id: # stripe ID not stored
self.stripe_id = get_or_create(self.email, self.first_name, self.last_name)
if settings.BILLING_ENABLED:
if not self.stripe_id: # stripe ID not stored
self.stripe_id = get_or_create(self.email, self.first_name, self.last_name)
to_update = {}
if self.email != self._original.email:
@@ -89,14 +90,16 @@ class User(AbstractUser):
if self.last_name != self._original.last_name:
to_update["last_name"] = self.last_name
update_customer_fields(self.stripe_id, **to_update)
if settings.BILLING_ENABLED:
update_customer_fields(self.stripe_id, **to_update)
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
if self.stripe_id:
stripe.Customer.delete(self.stripe_id)
logger.info(f"Deleted Stripe customer {self.stripe_id}")
if settings.BILLING_ENABLED:
if self.stripe_id:
stripe.Customer.delete(self.stripe_id)
logger.info(f"Deleted Stripe customer {self.stripe_id}")
super().delete(*args, **kwargs)
def has_plan(self, plan):
@@ -194,14 +197,16 @@ class NotificationRule(models.Model):
priority = models.IntegerField(choices=PRIORITY_CHOICES, default=1)
topic = models.CharField(max_length=2048, null=True, blank=True)
url = models.CharField(max_length=1024, null=True, blank=True)
interval = models.IntegerField(choices=INTERVAL_CHOICES, default=0)
window = models.CharField(max_length=255, null=True, blank=True)
interval = models.IntegerField(choices=INTERVAL_CHOICES, default=60)
window = models.CharField(max_length=255, default="30d", null=True, blank=True)
amount = models.PositiveIntegerField(default=1, null=True, blank=True)
enabled = models.BooleanField(default=True)
data = models.TextField()
match = models.JSONField(null=True, blank=True)
service = models.CharField(choices=SERVICE_CHOICES, max_length=255, default="ntfy")
send_empty = models.BooleanField(default=False)
service = models.CharField(
choices=SERVICE_CHOICES, max_length=255, default="webhook"
)
ingest = models.BooleanField(default=False)
policy = models.CharField(choices=POLICY_CHOICES, max_length=255, default="default")
def __str__(self):
@@ -238,8 +243,6 @@ class NotificationRule(models.Model):
user_settings["url"] = self.url
if self.service is not None:
user_settings["service"] = self.service
if self.send_empty is not None:
user_settings["send_empty"] = self.send_empty
if check:
if user_settings["service"] == "ntfy" and user_settings["topic"] is None:

View File

@@ -280,7 +280,7 @@
{% if user.is_superuser %}
<div class="navbar-item has-dropdown is-hoverable">
<a class="navbar-link">
Threshold
Manage
</a>
<div class="navbar-dropdown">
@@ -290,6 +290,9 @@
<a class="navbar-item" href="#">
Discord
</a>
<a class="navbar-item" href="{% url 'monolith_stats' %}">
Stats
</a>
</div>
</div>
{% endif %}

View File

@@ -0,0 +1,15 @@
{% extends "base.html" %}
{% block content %}
<div
style="display: none;"
hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}'
hx-get="{% url 'monolith_stats_db' type='page' %}"
hx-trigger="load, every 5s"
hx-target="#stats"
hx-swap="innerHTML">
</div>
<div class="box">
<div id="stats">
</div>
</div>
{% endblock %}

View File

@@ -0,0 +1,14 @@
{% extends 'mixins/partials/generic-detail.html' %}
{% block tbody %}
{% for item in object %}
{% if item.data %}
{% for row in item.data %}
<tr>
<th>{{ row.Variable_name }}</th>
<td>{{ row.Value }}</td>
</tr>
{% endfor %}
{% endif %}
{% endfor %}
{% endblock %}

View File

@@ -174,10 +174,11 @@
</td>
{% elif column.name == 'match_ts' %}
<td class="{{ column.name }}">
{% with match_ts=cell|splitstr:'T' %}
<!-- {# with match_ts=cell|splitstr:'T' %}
<p>{{ match_ts.0 }}</p>
<p>{{ match_ts.1 }}</p>
{% endwith %}
{% endwith #} -->
<p>{{ match_ts }}</p>
</td>
{% elif column.name == 'type' or column.name == 'mtype' %}
<td class="{{ column.name }}">

View File

@@ -19,6 +19,7 @@
<th>priority</th>
<th>topic</th>
<th>enabled</th>
<th>ingest</th>
<th>data length</th>
<th>match</th>
<th>actions</th>
@@ -43,6 +44,17 @@
</span>
{% endif %}
</td>
<td>
{% if item.ingest %}
<span class="icon">
<i class="fa-solid fa-check"></i>
</span>
{% else %}
<span class="icon">
<i class="fa-solid fa-xmark"></i>
</span>
{% endif %}
</td>
<td>{{ item.data|length }}</td>
<td>{{ item.matches }}</td>
<td>

View File

@@ -3,7 +3,7 @@
</div>
{% if params.index != 'int' and params.index != 'meta' %}
<div id="sentiment-container" {% if params.show_sentiment is None %} class="is-hidden" {% endif %}>
<div id="sentiment-container" {% if params.graph is None %} class="is-hidden" {% endif %}>
<canvas id="sentiment-chart"></canvas>
</div>
<script src="{% static 'chart.js' %}"></script>

View File

@@ -258,7 +258,7 @@
id="sentiment_graph_switch"
type="checkbox"
class="switch is-rounded is-info"
name="show_sentiment"
name="graph"
data-script="on click toggle .is-hidden on #sentiment-container">
<label

View File

@@ -5,4 +5,6 @@ register = template.Library()
@register.filter
def splitstr(value, arg):
if type(value) == int:
raise Exception(f"Attempt to split {value} with separator {arg}")
return value.split(arg)

View File

View File

@@ -0,0 +1,36 @@
from django.shortcuts import render
from django.views import View
from rest_framework.parsers import FormParser
from rest_framework.views import APIView
from core.db.storage import db
from mixins.views import ObjectRead
from core.views.manage.permissions import SuperUserRequiredMixin
class MonolithStats(SuperUserRequiredMixin, View):
template_name = "manage/monolith/stats/index.html"
def get(self, request):
return render(request, self.template_name)
class MonolithDBStats(SuperUserRequiredMixin, ObjectRead):
detail_template = "manage/monolith/stats/overview.html"
context_object_name_singular = "Status"
context_object_name = "Status"
detail_url_name = "monolith_stats_db"
detail_url_args = ["type"]
def get_object(self, **kwargs):
search_query = "SHOW TABLE main STATUS"
stats = db.run_query(
self.request.user,
search_query=search_query,
path="sql?mode=raw",
raw=True,
#method="get",
)
return stats

View File

@@ -81,15 +81,21 @@ def make_graph(results):
graph = []
for index, item in enumerate(results):
date = str(index)
sentiment = None
if "meta" in item:
if "aggs" in item["meta"]:
if "avg_sentiment" in item["meta"]["aggs"]:
sentiment = item["meta"]["aggs"]["avg_sentiment"]["value"]
else:
if "sentiment" in item:
sentiment = item["sentiment"]
graph.append(
{
"text": item.get("words_noun", None)
or item.get("msg", None)
or item.get("id"),
"text": item.get("msg", None) or item.get("id"),
"nick": item.get("nick", None),
"channel": item.get("channel", None),
"net": item.get("net", None),
"value": item.get("sentiment", None) or None,
"value": sentiment,
"date": date,
}
)
@@ -113,16 +119,20 @@ class DrilldownTableView(SingleTableView):
sizes = settings.MAIN_SIZES
if request.GET:
print("GET")
self.template_name = "index.html"
# GET arguments in URL like ?query=xyz
query_params = request.GET.dict()
print("QUERY_PARAMS GET", query_params)
if request.htmx:
if request.resolver_match.url_name == "search_partial":
self.template_name = "partials/results_table.html"
elif request.POST:
print("POST")
query_params = request.POST.dict()
else:
self.template_name = "index.html"
print("FRESH")
# No query, this is a fresh page load
# Don't try to search, since there's clearly nothing to do
params_with_defaults = {}

579
docker-compose.prod.yml Normal file
View File

@@ -0,0 +1,579 @@
version: "2.2"
services:
app:
image: xf/neptune:latest
container_name: neptune
build:
context: .
args:
OPERATION: ${OPERATION}
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini
#- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- type: bind
source: /code/run
target: /var/run
# env_file:
# - stack.env
environment:
# General application settings
APP_PORT: "${APP_PORT}"
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}"
APP_DATABASE_FILE: "${APP_DATABASE_FILE}"
STATIC_ROOT: "${STATIC_ROOT}"
OPERATION: "${OPERATION}"
# Elasticsearch settings
ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}"
ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}"
ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}"
ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}"
ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}"
# Manticore settings
MANTICORE_URL: "${MANTICORE_URL}"
# Database settings
DB_BACKEND: "${DB_BACKEND}"
INDEX_MAIN: "${INDEX_MAIN}"
INDEX_RESTRICTED: "${INDEX_RESTRICTED}"
INDEX_META: "${INDEX_META}"
INDEX_INT: "${INDEX_INT}"
INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}"
MAIN_SIZES: "${MAIN_SIZES}"
MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}"
MAIN_SOURCES: "${MAIN_SOURCES}"
SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}"
CACHE: "${CACHE}"
CACHE_TIMEOUT: "${CACHE_TIMEOUT}"
# Drilldown settings
DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}"
DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}"
DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}"
DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}"
DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}"
# URLs: "${# URLs}"
DOMAIN: "${DOMAIN}"
URL: "${URL}"
# Access control
ALLOWED_HOSTS: "${ALLOWED_HOSTS}"
# CSRF
CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}"
# Stripe settings
BILLING_ENABLED: "${BILLING_ENABLED}"
STRIPE_TEST: "${STRIPE_TEST}"
STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}"
STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}"
STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}"
STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}"
STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}"
STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}"
# Threshold settings
THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}"
THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}"
THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}"
THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}"
# NickTrace settings
NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}"
NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}"
NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}"
# Meta settings
META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}"
META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}"
META_QUERY_SIZE: "${META_QUERY_SIZE}"
# Debugging and profiling
DEBUG: "${DEBUG}"
PROFILER: "${PROFILER}"
# Redis settings
REDIS_HOST: "${REDIS_HOST}"
REDIS_PASSWORD: "${REDIS_PASSWORD}"
REDIS_DB: "${REDIS_DB}"
REDIS_DB_CACHE: "${REDIS_DB_CACHE}"
REDIS_PORT: "${REDIS_PORT}"
depends_on:
redis:
condition: service_healthy
migration:
condition: service_started
collectstatic:
condition: service_started
# networks:
# - default
# - xf
# - db
network_mode: host
processing:
image: xf/neptune:latest
container_name: processing_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py processing'
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini
#- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- type: bind
source: /code/run
target: /var/run
environment:
# General application settings
APP_PORT: "${APP_PORT}"
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}"
APP_DATABASE_FILE: "${APP_DATABASE_FILE}"
STATIC_ROOT: "${STATIC_ROOT}"
OPERATION: "${OPERATION}"
# Elasticsearch settings
ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}"
ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}"
ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}"
ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}"
ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}"
# Manticore settings
MANTICORE_URL: "${MANTICORE_URL}"
# Database settings
DB_BACKEND: "${DB_BACKEND}"
INDEX_MAIN: "${INDEX_MAIN}"
INDEX_RESTRICTED: "${INDEX_RESTRICTED}"
INDEX_META: "${INDEX_META}"
INDEX_INT: "${INDEX_INT}"
INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}"
MAIN_SIZES: "${MAIN_SIZES}"
MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}"
MAIN_SOURCES: "${MAIN_SOURCES}"
SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}"
CACHE: "${CACHE}"
CACHE_TIMEOUT: "${CACHE_TIMEOUT}"
# Drilldown settings
DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}"
DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}"
DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}"
DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}"
DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}"
# URLs: "${# URLs}"
DOMAIN: "${DOMAIN}"
URL: "${URL}"
# Access control
ALLOWED_HOSTS: "${ALLOWED_HOSTS}"
# CSRF
CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}"
# Stripe settings
BILLING_ENABLED: "${BILLING_ENABLED}"
STRIPE_TEST: "${STRIPE_TEST}"
STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}"
STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}"
STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}"
STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}"
STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}"
STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}"
# Threshold settings
THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}"
THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}"
THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}"
THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}"
# NickTrace settings
NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}"
NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}"
NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}"
# Meta settings
META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}"
META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}"
META_QUERY_SIZE: "${META_QUERY_SIZE}"
# Debugging and profiling
DEBUG: "${DEBUG}"
PROFILER: "${PROFILER}"
# Redis settings
REDIS_HOST: "${REDIS_HOST}"
REDIS_PASSWORD: "${REDIS_PASSWORD}"
REDIS_DB: "${REDIS_DB}"
REDIS_DB_CACHE: "${REDIS_DB_CACHE}"
REDIS_PORT: "${REDIS_PORT}"
# volumes_from:
# - tmp
depends_on:
redis:
condition: service_healthy
migration:
condition: service_started
collectstatic:
condition: service_started
# networks:
# - default
# - xf
# - db
network_mode: host
scheduling:
image: xf/neptune:latest
container_name: scheduling_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py scheduling'
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini
#- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- type: bind
source: /code/run
target: /var/run
environment:
# General application settings
APP_PORT: "${APP_PORT}"
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}"
APP_DATABASE_FILE: "${APP_DATABASE_FILE}"
STATIC_ROOT: "${STATIC_ROOT}"
OPERATION: "${OPERATION}"
# Elasticsearch settings
ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}"
ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}"
ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}"
ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}"
ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}"
# Manticore settings
MANTICORE_URL: "${MANTICORE_URL}"
# Database settings
DB_BACKEND: "${DB_BACKEND}"
INDEX_MAIN: "${INDEX_MAIN}"
INDEX_RESTRICTED: "${INDEX_RESTRICTED}"
INDEX_META: "${INDEX_META}"
INDEX_INT: "${INDEX_INT}"
INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}"
MAIN_SIZES: "${MAIN_SIZES}"
MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}"
MAIN_SOURCES: "${MAIN_SOURCES}"
SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}"
CACHE: "${CACHE}"
CACHE_TIMEOUT: "${CACHE_TIMEOUT}"
# Drilldown settings
DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}"
DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}"
DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}"
DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}"
DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}"
# URLs: "${# URLs}"
DOMAIN: "${DOMAIN}"
URL: "${URL}"
# Access control
ALLOWED_HOSTS: "${ALLOWED_HOSTS}"
# CSRF
CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}"
# Stripe settings
BILLING_ENABLED: "${BILLING_ENABLED}"
STRIPE_TEST: "${STRIPE_TEST}"
STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}"
STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}"
STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}"
STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}"
STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}"
STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}"
# Threshold settings
THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}"
THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}"
THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}"
THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}"
# NickTrace settings
NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}"
NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}"
NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}"
# Meta settings
META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}"
META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}"
META_QUERY_SIZE: "${META_QUERY_SIZE}"
# Debugging and profiling
DEBUG: "${DEBUG}"
PROFILER: "${PROFILER}"
# Redis settings
REDIS_HOST: "${REDIS_HOST}"
REDIS_PASSWORD: "${REDIS_PASSWORD}"
REDIS_DB: "${REDIS_DB}"
REDIS_DB_CACHE: "${REDIS_DB_CACHE}"
REDIS_PORT: "${REDIS_PORT}"
# volumes_from:
# - tmp
depends_on:
redis:
condition: service_healthy
migration:
condition: service_started
collectstatic:
condition: service_started
# networks:
# - default
# - xf
# - db
network_mode: host
migration:
image: xf/neptune:latest
container_name: migration_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py migrate --noinput'
volumes:
- ${PORTAINER_GIT_DIR}:/code
#- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- type: bind
source: /code/run
target: /var/run
environment:
# General application settings
APP_PORT: "${APP_PORT}"
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}"
APP_DATABASE_FILE: "${APP_DATABASE_FILE}"
STATIC_ROOT: "${STATIC_ROOT}"
OPERATION: "${OPERATION}"
# Elasticsearch settings
ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}"
ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}"
ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}"
ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}"
ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}"
# Manticore settings
MANTICORE_URL: "${MANTICORE_URL}"
# Database settings
DB_BACKEND: "${DB_BACKEND}"
INDEX_MAIN: "${INDEX_MAIN}"
INDEX_RESTRICTED: "${INDEX_RESTRICTED}"
INDEX_META: "${INDEX_META}"
INDEX_INT: "${INDEX_INT}"
INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}"
MAIN_SIZES: "${MAIN_SIZES}"
MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}"
MAIN_SOURCES: "${MAIN_SOURCES}"
SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}"
CACHE: "${CACHE}"
CACHE_TIMEOUT: "${CACHE_TIMEOUT}"
# Drilldown settings
DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}"
DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}"
DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}"
DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}"
DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}"
# URLs: "${# URLs}"
DOMAIN: "${DOMAIN}"
URL: "${URL}"
# Access control
ALLOWED_HOSTS: "${ALLOWED_HOSTS}"
# CSRF
CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}"
# Stripe settings
BILLING_ENABLED: "${BILLING_ENABLED}"
STRIPE_TEST: "${STRIPE_TEST}"
STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}"
STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}"
STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}"
STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}"
STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}"
STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}"
# Threshold settings
THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}"
THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}"
THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}"
THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}"
# NickTrace settings
NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}"
NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}"
NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}"
# Meta settings
META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}"
META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}"
META_QUERY_SIZE: "${META_QUERY_SIZE}"
# Debugging and profiling
DEBUG: "${DEBUG}"
PROFILER: "${PROFILER}"
# Redis settings
REDIS_HOST: "${REDIS_HOST}"
REDIS_PASSWORD: "${REDIS_PASSWORD}"
REDIS_DB: "${REDIS_DB}"
REDIS_DB_CACHE: "${REDIS_DB_CACHE}"
REDIS_PORT: "${REDIS_PORT}"
# volumes_from:
# - tmp
depends_on:
redis:
condition: service_healthy
# networks:
# - default
# - xf
# - db
network_mode: host
collectstatic:
image: xf/neptune:latest
container_name: collectstatic_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py collectstatic --noinput'
volumes:
- ${PORTAINER_GIT_DIR}:/code
#- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- type: bind
source: /code/run
target: /var/run
# volumes_from:
# - tmp
environment:
# General application settings
APP_PORT: "${APP_PORT}"
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}"
APP_DATABASE_FILE: "${APP_DATABASE_FILE}"
STATIC_ROOT: "${STATIC_ROOT}"
OPERATION: "${OPERATION}"
# Elasticsearch settings
ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}"
ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}"
ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}"
ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}"
ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}"
# Manticore settings
MANTICORE_URL: "${MANTICORE_URL}"
# Database settings
DB_BACKEND: "${DB_BACKEND}"
INDEX_MAIN: "${INDEX_MAIN}"
INDEX_RESTRICTED: "${INDEX_RESTRICTED}"
INDEX_META: "${INDEX_META}"
INDEX_INT: "${INDEX_INT}"
INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}"
MAIN_SIZES: "${MAIN_SIZES}"
MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}"
MAIN_SOURCES: "${MAIN_SOURCES}"
SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}"
CACHE: "${CACHE}"
CACHE_TIMEOUT: "${CACHE_TIMEOUT}"
# Drilldown settings
DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}"
DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}"
DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}"
DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}"
DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}"
# URLs: "${# URLs}"
DOMAIN: "${DOMAIN}"
URL: "${URL}"
# Access control
ALLOWED_HOSTS: "${ALLOWED_HOSTS}"
# CSRF
CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}"
# Stripe settings
BILLING_ENABLED: "${BILLING_ENABLED}"
STRIPE_TEST: "${STRIPE_TEST}"
STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}"
STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}"
STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}"
STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}"
STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}"
STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}"
# Threshold settings
THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}"
THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}"
THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}"
THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}"
# NickTrace settings
NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}"
NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}"
NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}"
# Meta settings
META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}"
META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}"
META_QUERY_SIZE: "${META_QUERY_SIZE}"
# Debugging and profiling
DEBUG: "${DEBUG}"
PROFILER: "${PROFILER}"
# Redis settings
REDIS_HOST: "${REDIS_HOST}"
REDIS_PASSWORD: "${REDIS_PASSWORD}"
REDIS_DB: "${REDIS_DB}"
REDIS_DB_CACHE: "${REDIS_DB_CACHE}"
REDIS_PORT: "${REDIS_PORT}"
depends_on:
redis:
condition: service_healthy
# networks:
# - default
# - xf
# - db
network_mode: host
# nginx:
# image: nginx:latest
# container_name: nginx_neptune
# ports:
# - ${APP_PORT}:9999
# ulimits:
# nproc: 65535
# nofile:
# soft: 65535
# hard: 65535
# volumes:
# - ${PORTAINER_GIT_DIR}:/code
# - ${PORTAINER_GIT_DIR}/docker/nginx/conf.d/${OPERATION}.conf:/etc/nginx/conf.d/default.conf
# - neptune_static:${STATIC_ROOT}
# # volumes_from:
# # - tmp
# networks:
# - default
# - xf
# depends_on:
# app:
# condition: service_started
# tmp:
# image: busybox
# container_name: tmp_neptune
# command: chmod -R 777 /var/run/socks
# volumes:
# - /var/run/socks
redis:
image: redis
container_name: redis_neptune
command: redis-server /etc/redis.conf
# ulimits:
# nproc: 65535
# nofile:
# soft: 65535
# hard: 65535
volumes:
- ${PORTAINER_GIT_DIR}/docker/redis.conf:/etc/redis.conf
- neptune_redis_data:/data
- type: bind
source: /code/run
target: /var/run
# volumes_from:
# - tmp
healthcheck:
test: "redis-cli ping"
interval: 2s
timeout: 2s
retries: 15
# networks:
# - default
# - xf
# networks:
# default:
# driver: bridge
# xf:
# external: true
# db:
# external: true
volumes:
# neptune_static: {}
neptune_redis_data: {}

View File

@@ -1,194 +0,0 @@
version: "2.2"
services:
app:
image: pathogen/neptune:latest
container_name: neptune
build:
context: .
args:
OPERATION: ${OPERATION}
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini
- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- neptune_static:${STATIC_ROOT}
env_file:
- stack.env
volumes_from:
- tmp
depends_on:
redis:
condition: service_healthy
migration:
condition: service_started
collectstatic:
condition: service_started
networks:
- default
- pathogen
- elastic
processing:
image: pathogen/neptune:latest
container_name: processing_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py processing'
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini
- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- neptune_static:${STATIC_ROOT}
env_file:
- stack.env
volumes_from:
- tmp
depends_on:
redis:
condition: service_healthy
migration:
condition: service_started
collectstatic:
condition: service_started
networks:
- default
- pathogen
- elastic
scheduling:
image: pathogen/neptune:latest
container_name: scheduling_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py scheduling'
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini
- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- neptune_static:${STATIC_ROOT}
env_file:
- stack.env
volumes_from:
- tmp
depends_on:
redis:
condition: service_healthy
migration:
condition: service_started
collectstatic:
condition: service_started
networks:
- default
- pathogen
- elastic
migration:
image: pathogen/neptune:latest
container_name: migration_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py migrate --noinput'
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- neptune_static:${STATIC_ROOT}
volumes_from:
- tmp
depends_on:
redis:
condition: service_healthy
collectstatic:
image: pathogen/neptune:latest
container_name: collectstatic_neptune
build:
context: .
args:
OPERATION: ${OPERATION}
command: sh -c '. /venv/bin/activate && python manage.py collectstatic --noinput'
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py
- ${APP_DATABASE_FILE}:/conf/db.sqlite3
- neptune_static:${STATIC_ROOT}
volumes_from:
- tmp
env_file:
- stack.env
depends_on:
redis:
condition: service_healthy
nginx:
image: nginx:latest
container_name: nginx_neptune
ports:
- ${APP_PORT}:9999
ulimits:
nproc: 65535
nofile:
soft: 65535
hard: 65535
volumes:
- ${PORTAINER_GIT_DIR}:/code
- ${PORTAINER_GIT_DIR}/docker/nginx/conf.d/${OPERATION}.conf:/etc/nginx/conf.d/default.conf
- neptune_static:${STATIC_ROOT}
volumes_from:
- tmp
networks:
- default
- pathogen
depends_on:
app:
condition: service_started
tmp:
image: busybox
container_name: tmp_neptune
command: chmod -R 777 /var/run/socks
volumes:
- /var/run/socks
redis:
image: redis
container_name: redis_neptune
command: redis-server /etc/redis.conf
ulimits:
nproc: 65535
nofile:
soft: 65535
hard: 65535
volumes:
- ${PORTAINER_GIT_DIR}/docker/redis.conf:/etc/redis.conf
volumes_from:
- tmp
healthcheck:
test: "redis-cli -s /var/run/socks/redis.sock ping"
interval: 2s
timeout: 2s
retries: 15
networks:
- default
- pathogen
networks:
default:
driver: bridge
pathogen:
external: true
elastic:
external: true
volumes:
neptune_static: {}

View File

@@ -1,5 +1,5 @@
unixsocket /var/run/socks/redis.sock
unixsocket /var/run/neptune-redis.sock
unixsocketperm 777
# For Monolith PubSub
port 6379
port 0
# port 6379
# requirepass changeme

View File

@@ -4,9 +4,19 @@ module=app.wsgi:application
env=DJANGO_SETTINGS_MODULE=app.settings
master=1
pidfile=/tmp/project-master.pid
socket=0.0.0.0:8000
#socket=0.0.0.0:8000
socket=/var/run/uwsgi-neptune.sock
# socket 777
chmod-socket=777
harakiri=20
max-requests=100000
#max-requests=100000
# Set a lower value for max-requests to prevent memory leaks from building up over time
max-requests=1000
# Ensure old worker processes are cleaned up properly
reload-on-as=512
reload-on-rss=256
vacuum=1
home=/venv
processes=12
processes=4
threads=2
log-level=debug

View File

@@ -26,3 +26,5 @@ git+https://git.zm.is/XF/django-crud-mixins
redis
hiredis
django-cachalot
django_redis
httpx

View File

@@ -1,6 +1,86 @@
# General application settings
APP_PORT=5000
PORTAINER_GIT_DIR=.
APP_LOCAL_SETTINGS=./app/local_settings.py
APP_DATABASE_FILE=./db.sqlite3
STATIC_ROOT=/conf/static
OPERATION=dev
STATIC_ROOT=/code/static
OPERATION=uwsgi
# Elasticsearch settings
ELASTICSEARCH_URL=10.1.0.1
ELASTICSEARCH_PORT=9200
ELASTICSEARCH_TLS=True
ELASTICSEARCH_USERNAME=admin
ELASTICSEARCH_PASSWORD=secret
# Manticore settings
MANTICORE_URL=http://127.0.0.1:9308
# Database settings
DB_BACKEND=MANTICORE
INDEX_MAIN=main
INDEX_RESTRICTED=restricted
INDEX_META=meta
INDEX_INT=internal
INDEX_RULE_STORAGE=rule_storage
MAIN_SIZES=1,5,15,30,50,100,250,500,1000
MAIN_SIZES_ANON=1,5,15,30,50,100
MAIN_SOURCES=dis,4ch,all
SOURCES_RESTRICTED=irc
CACHE=True
CACHE_TIMEOUT=2
# Drilldown settings
DRILLDOWN_RESULTS_PER_PAGE=15
DRILLDOWN_DEFAULT_SIZE=15
DRILLDOWN_DEFAULT_INDEX=main
DRILLDOWN_DEFAULT_SORTING=desc
DRILLDOWN_DEFAULT_SOURCE=all
# URLs
DOMAIN=spy.zm.is
URL=https://spy.zm.is
# Access control
ALLOWED_HOSTS=spy.zm.is
# CSRF
CSRF_TRUSTED_ORIGINS=https://spy.zm.is
# Stripe settings
BILLING_ENABLED=False
STRIPE_TEST=True
STRIPE_API_KEY_TEST=
STRIPE_PUBLIC_API_KEY_TEST=
STRIPE_API_KEY_PROD=
STRIPE_PUBLIC_API_KEY_PROD=
STRIPE_ENDPOINT_SECRET=
STRIPE_ADMIN_COUPON=
# Threshold settings
THRESHOLD_ENDPOINT=http://threshold:13869
THRESHOLD_API_KEY=api_1
THRESHOLD_API_TOKEN=
THRESHOLD_API_COUNTER=
# NickTrace settings
NICKTRACE_MAX_ITERATIONS=4
NICKTRACE_MAX_CHUNK_SIZE=500
NICKTRACE_QUERY_SIZE=10000
# Meta settings
META_MAX_ITERATIONS=4
META_MAX_CHUNK_SIZE=500
META_QUERY_SIZE=10000
# Debugging and profiling
DEBUG=n
PROFILER=False
# Redis settings
REDIS_HOST=redis_neptune
REDIS_PASSWORD=changeme
REDIS_DB=1
REDIS_DB_CACHE=10
REDIS_PORT=6379