diff --git a/Makefile b/Makefile index 5b10324..c683cd3 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,20 @@ run: - docker-compose --env-file=stack.env up -d + docker-compose -f docker-compose.prod.yml --env-file=stack.env up -d build: - docker-compose --env-file=stack.env build + docker-compose -f docker-compose.prod.yml --env-file=stack.env build stop: - docker-compose --env-file=stack.env down + docker-compose -f docker-compose.prod.yml --env-file=stack.env down log: - docker-compose --env-file=stack.env logs -f + docker-compose -f docker-compose.prod.yml --env-file=stack.env logs -f migrate: - docker-compose --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py migrate" + docker-compose -f docker-compose.prod.yml --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py migrate" makemigrations: - docker-compose --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py makemigrations" + docker-compose -f docker-compose.prod.yml --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py makemigrations" auth: - docker-compose --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py createsuperuser" + docker-compose -f docker-compose.prod.yml --env-file=stack.env run --rm app sh -c ". /venv/bin/activate && python manage.py createsuperuser" diff --git a/app/local_settings.example.py b/app/local_settings.example.py index ab02dd5..83e17fe 100644 --- a/app/local_settings.example.py +++ b/app/local_settings.example.py @@ -1,3 +1,5 @@ +from os import getenv + # Elasticsearch settings ELASTICSEARCH_URL = "10.1.0.1" ELASTICSEARCH_PORT = 9200 @@ -104,3 +106,8 @@ META_QUERY_SIZE = 10000 DEBUG = True PROFILER = False + +REDIS_HOST = getenv("REDIS_HOST", "redis_fisk_dev") +REDIS_PASSWORD = getenv("REDIS_PASSWORD", "changeme") +REDIS_DB = int(getenv("REDIS_DB", "10")) +REDIS_PORT = int(getenv("REDIS_PORT", "6379")) \ No newline at end of file diff --git a/app/local_settings.py b/app/local_settings.py new file mode 100644 index 0000000..8fda441 --- /dev/null +++ b/app/local_settings.py @@ -0,0 +1,87 @@ +from os import getenv + +trues = ("t", "true", "yes", "y", "1") + +# Elasticsearch settings +ELASTICSEARCH_URL = getenv("ELASTICSEARCH_URL", "10.1.0.1") +ELASTICSEARCH_PORT = int(getenv("ELASTICSEARCH_PORT", "9200")) +ELASTICSEARCH_TLS = getenv("ELASTICSEARCH_TLS", "True").lower() in trues +ELASTICSEARCH_USERNAME = getenv("ELASTICSEARCH_USERNAME", "admin") +ELASTICSEARCH_PASSWORD = getenv("ELASTICSEARCH_PASSWORD", "secret") + +# Manticore settings +MANTICORE_URL = getenv("MANTICORE_URL", "http://example-db-1:9308") + +DB_BACKEND = getenv("DB_BACKEND", "MANTICORE") + +# Common DB settings +INDEX_MAIN = getenv("INDEX_MAIN", "main") +INDEX_RESTRICTED = getenv("INDEX_RESTRICTED", "restricted") +INDEX_META = getenv("INDEX_META", "meta") +INDEX_INT = getenv("INDEX_INT", "internal") +INDEX_RULE_STORAGE = getenv("INDEX_RULE_STORAGE", "rule_storage") + +MAIN_SIZES = getenv("MAIN_SIZES", "1,5,15,30,50,100,250,500,1000").split(",") +MAIN_SIZES_ANON = getenv("MAIN_SIZES_ANON", "1,5,15,30,50,100").split(",") +MAIN_SOURCES = getenv("MAIN_SOURCES", "dis,4ch,all").split(",") +SOURCES_RESTRICTED = getenv("SOURCES_RESTRICTED", "irc").split(",") +CACHE = getenv("CACHE", "False").lower() in trues +CACHE_TIMEOUT = int(getenv("CACHE_TIMEOUT", "2")) + +DRILLDOWN_RESULTS_PER_PAGE = int(getenv("DRILLDOWN_RESULTS_PER_PAGE", "15")) +DRILLDOWN_DEFAULT_PARAMS = { + "size": getenv("DRILLDOWN_DEFAULT_SIZE", "15"), + "index": getenv("DRILLDOWN_DEFAULT_INDEX", "main"), + "sorting": getenv("DRILLDOWN_DEFAULT_SORTING", "desc"), + "source": getenv("DRILLDOWN_DEFAULT_SOURCE", "all"), +} + +# URLs +DOMAIN = getenv("DOMAIN", "example.com") +URL = getenv("URL", f"https://{DOMAIN}") + +# Access control +ALLOWED_HOSTS = getenv("ALLOWED_HOSTS", f"127.0.0.1,{DOMAIN}").split(",") + +# CSRF +CSRF_TRUSTED_ORIGINS = getenv("CSRF_TRUSTED_ORIGINS", URL).split(",") + +# Stripe +BILLING_ENABLED = getenv("BILLING_ENABLED", "false").lower() in trues +STRIPE_TEST = getenv("STRIPE_TEST", "True").lower() in trues +STRIPE_API_KEY_TEST = getenv("STRIPE_API_KEY_TEST", "") +STRIPE_PUBLIC_API_KEY_TEST = getenv("STRIPE_PUBLIC_API_KEY_TEST", "") +STRIPE_API_KEY_PROD = getenv("STRIPE_API_KEY_PROD", "") +STRIPE_PUBLIC_API_KEY_PROD = getenv("STRIPE_PUBLIC_API_KEY_PROD", "") +STRIPE_ENDPOINT_SECRET = getenv("STRIPE_ENDPOINT_SECRET", "") +STATIC_ROOT = getenv("STATIC_ROOT", "") +SECRET_KEY = getenv("SECRET_KEY", "a") +STRIPE_ADMIN_COUPON = getenv("STRIPE_ADMIN_COUPON", "") + +# Threshold +THRESHOLD_ENDPOINT = getenv("THRESHOLD_ENDPOINT", "http://threshold:13869") +THRESHOLD_API_KEY = getenv("THRESHOLD_API_KEY", "api_1") +THRESHOLD_API_TOKEN = getenv("THRESHOLD_API_TOKEN", "") +THRESHOLD_API_COUNTER = getenv("THRESHOLD_API_COUNTER", "") + +# NickTrace +NICKTRACE_MAX_ITERATIONS = int(getenv("NICKTRACE_MAX_ITERATIONS", "4")) +NICKTRACE_MAX_CHUNK_SIZE = int(getenv("NICKTRACE_MAX_CHUNK_SIZE", "500")) +NICKTRACE_QUERY_SIZE = int(getenv("NICKTRACE_QUERY_SIZE", "10000")) + +# Meta +META_MAX_ITERATIONS = int(getenv("META_MAX_ITERATIONS", "4")) +META_MAX_CHUNK_SIZE = int(getenv("META_MAX_CHUNK_SIZE", "500")) +META_QUERY_SIZE = int(getenv("META_QUERY_SIZE", "10000")) + +DEBUG = getenv("DEBUG", "True").lower() in trues +PROFILER = getenv("PROFILER", "False").lower() in trues + +REDIS_HOST = getenv("REDIS_HOST", "redis_neptune_dev") +REDIS_PASSWORD = getenv("REDIS_PASSWORD", "changeme") +REDIS_DB = int(getenv("REDIS_DB", "1")) +REDIS_DB_CACHE = int(getenv("REDIS_DB_CACHE", "10")) +REDIS_PORT = int(getenv("REDIS_PORT", "6379")) + +# Elasticsearch blacklist +ELASTICSEARCH_BLACKLISTED = {} diff --git a/app/settings.py b/app/settings.py index fe1878e..4878b6e 100644 --- a/app/settings.py +++ b/app/settings.py @@ -47,19 +47,6 @@ INSTALLED_APPS = [ "cachalot", ] -# Performance optimisations -CACHES = { - "default": { - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "unix:///var/run/socks/redis.sock", - "OPTIONS": { - "db": "10", - # "parser_class": "django_redis.cache.RedisCache", - "pool_class": "redis.BlockingConnectionPool", - }, - } -} - CRISPY_TEMPLATE_PACK = "bulma" CRISPY_ALLOWED_TEMPLATE_PACKS = ("bulma",) DJANGO_TABLES2_TEMPLATE = "django-tables2/bulma.html" @@ -163,7 +150,7 @@ REST_FRAMEWORK = { INTERNAL_IPS = [ "127.0.0.1", - "10.1.10.11", + # "10.1.10.11", ] DEBUG_TOOLBAR_PANELS = [ @@ -187,6 +174,21 @@ DEBUG_TOOLBAR_PANELS = [ from app.local_settings import * # noqa +# Performance optimisations +CACHES = { + "default": { + "BACKEND": "django_redis.cache.RedisCache", + # "LOCATION": "unix:///var/run/socks/redis.sock", + "LOCATION": f"redis://{REDIS_HOST}:{REDIS_PORT}", + "OPTIONS": { + "db": REDIS_DB_CACHE, + # "parser_class": "django_redis.cache.RedisCache", + "PASSWORD": REDIS_PASSWORD, + "pool_class": "redis.BlockingConnectionPool", + }, + } +} + if PROFILER: # noqa - trust me its there import pyroscope diff --git a/app/urls.py b/app/urls.py index 2a9b15d..fb82682 100644 --- a/app/urls.py +++ b/app/urls.py @@ -58,6 +58,9 @@ from core.views.manage.threshold.threshold import ( ThresholdIRCOverview, ) +# Stats +from core.views.manage.monolith import stats + # Main tool pages from core.views.ui.drilldown import ( # DrilldownTableView,; Drilldown, DrilldownContextModal, @@ -311,4 +314,14 @@ urlpatterns = [ notifications.RuleClear.as_view(), name="rule_clear", ), + path( + "manage/monolith/stats/", + stats.MonolithStats.as_view(), + name="monolith_stats", + ), + path( + "manage/monolith/stats_db//", + stats.MonolithDBStats.as_view(), + name="monolith_stats_db", + ) ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) diff --git a/core/__init__.py b/core/__init__.py index ce498ab..795035f 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -7,7 +7,12 @@ from redis import StrictRedis os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true" -r = StrictRedis(unix_socket_path="/var/run/socks/redis.sock", db=0) +r = StrictRedis( + host=settings.REDIS_HOST, + port=settings.REDIS_PORT, + password=settings.REDIS_PASSWORD, + db=settings.REDIS_DB +) if settings.STRIPE_TEST: stripe.api_key = settings.STRIPE_API_KEY_TEST diff --git a/core/db/__init__.py b/core/db/__init__.py index 0c1f716..a7f5ac5 100644 --- a/core/db/__init__.py +++ b/core/db/__init__.py @@ -168,6 +168,71 @@ class StorageBackend(ABC): # Actually get rid of all the things we set to None response["hits"]["hits"] = [hit for hit in response["hits"]["hits"] if hit] + def add_bool(self, search_query, add_bool): + """ + Add the specified boolean matches to search query. + """ + if not add_bool: + return + for item in add_bool: + search_query["query"]["bool"]["must"].append({"match_phrase": item}) + + def add_top(self, search_query, add_top, negative=False): + """ + Merge add_top with the base of the search_query. + """ + if not add_top: + return + if negative: + for item in add_top: + if "must_not" in search_query["query"]["bool"]: + search_query["query"]["bool"]["must_not"].append(item) + else: + search_query["query"]["bool"]["must_not"] = [item] + else: + for item in add_top: + if "query" not in search_query: + search_query["query"] = {"bool": {"must": []}} + search_query["query"]["bool"]["must"].append(item) + + def schedule_check_aggregations(self, rule_object, result_map): + """ + Check the results of a scheduled query for aggregations. + """ + if rule_object.aggs is None: + return result_map + for index, (meta, result) in result_map.items(): + # Default to true, if no aggs are found, we still want to match + match = True + for agg_name, (operator, number) in rule_object.aggs.items(): + if agg_name in meta["aggs"]: + agg_value = meta["aggs"][agg_name]["value"] + + # TODO: simplify this, match is default to True + if operator == ">": + if agg_value > number: + match = True + else: + match = False + elif operator == "<": + if agg_value < number: + match = True + else: + match = False + elif operator == "=": + if agg_value == number: + match = True + else: + match = False + else: + match = False + else: + # No aggregation found, but it is required + match = False + result_map[index][0]["aggs"][agg_name]["match"] = match + + return result_map + def query(self, user, search_query, **kwargs): # For time tracking start = time.process_time() @@ -188,6 +253,7 @@ class StorageBackend(ABC): "took": time_took_rounded, "cache": True, } + print("S2", search_query) response = self.run_query(user, search_query, **kwargs) # For Elasticsearch @@ -198,7 +264,20 @@ class StorageBackend(ABC): if "took" in response: if response["took"] is None: return None - if len(response["hits"]["hits"]) == 0: + if "error" in response: + message = f"Error: {response['error']}" + message_class = "danger" + time_took = (time.process_time() - start) * 1000 + # Round to 3 significant figures + time_took_rounded = round( + time_took, 3 - int(floor(log10(abs(time_took)))) - 1 + ) + return { + "message": message, + "class": message_class, + "took": time_took_rounded, + } + elif len(response["hits"]["hits"]) == 0: message = "No results." message_class = "danger" time_took = (time.process_time() - start) * 1000 @@ -213,7 +292,7 @@ class StorageBackend(ABC): } # For Druid - if "error" in response: + elif "error" in response: if "errorMessage" in response: context = { "message": response["errorMessage"], @@ -240,6 +319,106 @@ class StorageBackend(ABC): time_took_rounded = round(time_took, 3 - int(floor(log10(abs(time_took)))) - 1) return {"object_list": response_parsed, "took": time_took_rounded} + def construct_context_query( + self, index, net, channel, src, num, size, type=None, nicks=None + ): + # Get the initial query + query = self.construct_query(None, size, blank=True) + + extra_must = [] + extra_should = [] + extra_should2 = [] + if num: + extra_must.append({"match_phrase": {"num": num}}) + if net: + extra_must.append({"match_phrase": {"net": net}}) + if channel: + extra_must.append({"match": {"channel": channel}}) + if nicks: + for nick in nicks: + extra_should2.append({"match": {"nick": nick}}) + + types = ["msg", "notice", "action", "kick", "topic", "mode"] + fields = [ + "nick", + "ident", + "host", + "channel", + "ts", + "msg", + "type", + "net", + "src", + "tokens", + ] + query["fields"] = fields + + if index == "internal": + fields.append("mtype") + if channel == "*status" or type == "znc": + if {"match": {"channel": channel}} in extra_must: + extra_must.remove({"match": {"channel": channel}}) + extra_should2 = [] + # Type is one of msg or notice + # extra_should.append({"match": {"mtype": "msg"}}) + # extra_should.append({"match": {"mtype": "notice"}}) + extra_should.append({"match": {"type": "znc"}}) + extra_should.append({"match": {"type": "self"}}) + + extra_should2.append({"match": {"type": "znc"}}) + extra_should2.append({"match": {"nick": channel}}) + elif type == "auth": + if {"match": {"channel": channel}} in extra_must: + extra_must.remove({"match": {"channel": channel}}) + extra_should2 = [] + extra_should2.append({"match": {"nick": channel}}) + # extra_should2.append({"match": {"mtype": "msg"}}) + # extra_should2.append({"match": {"mtype": "notice"}}) + + extra_should.append({"match": {"type": "query"}}) + extra_should2.append({"match": {"type": "self"}}) + extra_should.append({"match": {"nick": channel}}) + else: + for ctype in types: + extra_should.append({"match": {"mtype": ctype}}) + else: + for ctype in types: + extra_should.append({"match": {"type": ctype}}) + # query = { + # "index": index, + # "limit": size, + # "query": { + # "bool": { + # "must": [ + # # {"equals": {"src": src}}, + # # { + # # "bool": { + # # "should": [*extra_should], + # # } + # # }, + # # { + # # "bool": { + # # "should": [*extra_should2], + # # } + # # }, + # *extra_must, + # ] + # } + # }, + # "fields": fields, + # # "_source": False, + # } + if extra_must: + for x in extra_must: + query["query"]["bool"]["must"].append(x) + if extra_should: + query["query"]["bool"]["must"].append({"bool": {"should": [*extra_should]}}) + if extra_should2: + query["query"]["bool"]["must"].append( + {"bool": {"should": [*extra_should2]}} + ) + return query + @abstractmethod def query_results(self, **kwargs): pass diff --git a/core/db/elastic.py b/core/db/elastic.py index 002fe35..c5ff940 100644 --- a/core/db/elastic.py +++ b/core/db/elastic.py @@ -374,44 +374,6 @@ class ElasticsearchBackend(StorageBackend): return search_query - def schedule_check_aggregations(self, rule_object, result_map): - """ - Check the results of a scheduled query for aggregations. - """ - if rule_object.aggs is None: - return result_map - for index, (meta, result) in result_map.items(): - # Default to true, if no aggs are found, we still want to match - match = True - for agg_name, (operator, number) in rule_object.aggs.items(): - if agg_name in meta["aggs"]: - agg_value = meta["aggs"][agg_name]["value"] - - # TODO: simplify this, match is default to True - if operator == ">": - if agg_value > number: - match = True - else: - match = False - elif operator == "<": - if agg_value < number: - match = True - else: - match = False - elif operator == "=": - if agg_value == number: - match = True - else: - match = False - else: - match = False - else: - # No aggregation found, but it is required - match = False - result_map[index][0]["aggs"][agg_name]["match"] = match - - return result_map - def schedule_query_results_test_sync(self, rule_object): """ Helper to run a scheduled query test with reduced functionality. diff --git a/core/db/manticore.py b/core/db/manticore.py index b884998..f897af8 100644 --- a/core/db/manticore.py +++ b/core/db/manticore.py @@ -1,12 +1,24 @@ import logging from datetime import datetime from pprint import pprint +import httpx +import orjson import requests from django.conf import settings from core.db import StorageBackend, add_defaults, dedup_list -from core.db.processing import annotate_results, parse_results +from core.db.processing import parse_results +from core.lib.parsing import ( + QueryError, + parse_date_time, + parse_index, + parse_rule, + parse_sentiment, + parse_size, + parse_sort, + parse_source, +) logger = logging.getLogger(__name__) @@ -21,29 +33,113 @@ class ManticoreBackend(StorageBackend): """ pass # we use requests - def construct_query(self, query, size, index, blank=False): + async def async_initialise(self, **kwargs): + """ + Initialise the Manticore client in async mode + """ + pass # we use requests + + def delete_rule_entries(self, rule_id): + """ + Delete all entries for a given rule. + :param rule_id: The rule ID to delete. + """ + # TODO + + def construct_query(self, query, size=None, blank=False, **kwargs): """ Accept some query parameters and construct an OpenSearch query. """ if not size: size = 5 query_base = { - "index": index, + "index": kwargs.get("index"), "limit": size, "query": {"bool": {"must": []}}, } + print("BASE", query_base) query_string = { "query_string": query, } if not blank: query_base["query"]["bool"]["must"].append(query_string) return query_base + + def parse(self, response, **kwargs): + parsed = parse_results(response, **kwargs) + return parsed - def run_query(self, client, user, search_query): - response = requests.post( - f"{settings.MANTICORE_URL}/json/search", json=search_query - ) - return response + def run_query(self, user, search_query, **kwargs): + """ + Low level helper to run Manticore query. + """ + index = kwargs.get("index") + raw = kwargs.get("raw") + if search_query and not raw: + search_query["index"] = index + pprint(search_query) + + + path = kwargs.get("path", "json/search") + if raw: + response = requests.post( + f"{settings.MANTICORE_URL}/{path}", search_query + ) + else: + response = requests.post( + f"{settings.MANTICORE_URL}/{path}", json=search_query + ) + + return orjson.loads(response.text) + + async def async_run_query(self, user, search_query, **kwargs): + """ + Low level helper to run Manticore query asynchronously. + """ + index = kwargs.get("index") + search_query["index"] = index + pprint(search_query) + async with httpx.AsyncClient() as client: + response = await client.post( + f"{settings.MANTICORE_URL}/json/search", json=search_query + ) + return orjson.loads(response.text) + + async def async_store_matches(self, matches): + """ + Store a list of matches in Manticore. + :param index: The index to store the matches in. + :param matches: A list of matches to store. + """ + # TODO + + def store_matches(self, matches): + """ + Store a list of matches in Manticore. + :param index: The index to store the matches in. + :param matches: A list of matches to store. + """ + # TODO + + def prepare_schedule_query(self, rule_object): + """ + Helper to run a scheduled query with reduced functionality. + """ + # TODO + + def schedule_query_results_test_sync(self, rule_object): + """ + Helper to run a scheduled query test with reduced functionality. + Sync version for running from Django forms. + Does not return results. + """ + # TODO + + async def schedule_query_results(self, rule_object): + """ + Helper to run a scheduled query with reduced functionality and async. + """ + # TODO def query_results( self, @@ -67,117 +163,77 @@ class ManticoreBackend(StorageBackend): query_created = False source = None add_defaults(query_params) - # Check size + + # Now, run the helpers for SIQTSRSS/ADR + # S - Size + # I - Index + # Q - Query + # T - Tags + # S - Source + # R - Ranges + # S - Sort + # S - Sentiment + # A - Annotate + # D - Dedup + # R - Reverse + + # S - Size if request.user.is_anonymous: - sizes = settings.MANTICORE_MAIN_SIZES_ANON + sizes = settings.MAIN_SIZES_ANON else: - sizes = settings.MANTICORE_MAIN_SIZES + sizes = settings.MAIN_SIZES if not size: - if "size" in query_params: - size = query_params["size"] - if size not in sizes: - message = "Size is not permitted" - message_class = "danger" - return {"message": message, "class": message_class} - size = int(size) - else: - size = 20 + size = parse_size(query_params, sizes) + if isinstance(size, dict): + return size + + rule_object = parse_rule(request.user, query_params) + if isinstance(rule_object, dict): + return rule_object - # Check index - if "index" in query_params: - index = query_params["index"] - if index == "main": - index = settings.MANTICORE_INDEX_MAIN - else: - if not request.user.has_perm(f"core.index_{index}"): - message = "Not permitted to search by this index" - message_class = "danger" - return { - "message": message, - "class": message_class, - } - if index == "meta": - index = settings.MANTICORE_INDEX_META - elif index == "internal": - index = settings.MANTICORE_INDEX_INT - else: - message = "Index is not valid." - message_class = "danger" - return { - "message": message, - "class": message_class, - } + if rule_object is not None: + index = settings.INDEX_RULE_STORAGE + add_bool.append({"rule_id": str(rule_object.id)}) else: - index = settings.MANTICORE_INDEX_MAIN + # I - Index + index = parse_index(request.user, query_params) + if isinstance(index, dict): + return index - # Create the search query - if "query" in query_params: - query = query_params["query"] - search_query = self.construct_query(query, size, index) - query_created = True - else: - if custom_query: - search_query = custom_query + # Q/T - Query/Tags + search_query = self.parse_query( + query_params, tags, size, custom_query, add_bool + ) + # Query should be a dict, so check if it contains message here + if "message" in search_query: + return search_query - if tags: - # Get a blank search query - if not query_created: - search_query = self.construct_query(None, size, index, blank=True) - query_created = True - for tagname, tagvalue in tags.items(): - add_bool.append({tagname: tagvalue}) + # S - Sources + sources = parse_source(request.user, query_params) + if isinstance(sources, dict): + return sources + total_count = len(sources) + # Total is -1 due to the "all" source + total_sources = ( + len(settings.MAIN_SOURCES) - 1 + len(settings.SOURCES_RESTRICTED) + ) - required_any = ["query_full", "query", "tags"] - if not any([field in query_params.keys() for field in required_any]): - if not custom_query: - message = "Empty query!" - message_class = "warning" - return {"message": message, "class": message_class} + # If the sources the user has access to are equal to all + # possible sources, then we don't need to add the source + # filter to the query. + if total_count != total_sources: + add_top_tmp = {"bool": {"should": []}} + for source_iter in sources: + add_top_tmp["bool"]["should"].append( + {"match_phrase": {"src": source_iter}} + ) + if query_params["source"] != "all": + add_top.append(add_top_tmp) - # Check for a source - if "source" in query_params: - source = query_params["source"] - - if source in settings.SOURCES_RESTRICTED: - if not request.user.has_perm("core.restricted_sources"): - message = "Access denied" - message_class = "danger" - return {"message": message, "class": message_class} - elif source not in settings.MAIN_SOURCES: - message = "Invalid source" - message_class = "danger" - return {"message": message, "class": message_class} - - if source == "all": - source = None # the next block will populate it - - if source: - sources = [source] - else: - sources = list(settings.MAIN_SOURCES) - if request.user.has_perm("core.restricted_sources"): - for source_iter in settings.SOURCES_RESTRICTED: - sources.append(source_iter) - - add_top_tmp = {"bool": {"should": []}} - total_count = 0 - for source_iter in sources: - add_top_tmp["bool"]["should"].append({"equals": {"src": source_iter}}) - total_count += 1 - total_sources = len(settings.MAIN_SOURCES) + len(settings.SOURCES_RESTRICTED) - if not total_count == total_sources: - add_top.append(add_top_tmp) - - # Date/time range - if set({"from_date", "to_date", "from_time", "to_time"}).issubset( - query_params.keys() - ): - from_ts = f"{query_params['from_date']}T{query_params['from_time']}Z" - to_ts = f"{query_params['to_date']}T{query_params['to_time']}Z" - from_ts = datetime.strptime(from_ts, "%Y-%m-%dT%H:%MZ") - to_ts = datetime.strptime(to_ts, "%Y-%m-%dT%H:%MZ") - from_ts = int(from_ts.timestamp()) - to_ts = int(to_ts.timestamp()) + # R - Ranges + # date_query = False + from_ts, to_ts = parse_date_time(query_params) + if from_ts: range_query = { "range": { "ts": { @@ -188,115 +244,87 @@ class ManticoreBackend(StorageBackend): } add_top.append(range_query) - # Sorting - if "sorting" in query_params: - sorting = query_params["sorting"] - if sorting not in ("asc", "desc", "none"): - message = "Invalid sort" - message_class = "danger" - return {"message": message, "class": message_class} - if sorting in ("asc", "desc"): - sort = [ - { - "ts": { - "order": sorting, - } - } - ] + # S - Sort + sort = parse_sort(query_params) + if isinstance(sort, dict): + return sort - # Sentiment handling - if "check_sentiment" in query_params: - if "sentiment_method" not in query_params: - message = "No sentiment method" - message_class = "danger" - return {"message": message, "class": message_class} - if "sentiment" in query_params: - sentiment = query_params["sentiment"] - try: - sentiment = float(sentiment) - except ValueError: - message = "Sentiment is not a float" - message_class = "danger" - return {"message": message, "class": message_class} - sentiment_method = query_params["sentiment_method"] - range_query_compare = {"range": {"sentiment": {}}} + if rule_object is not None: + field = "match_ts" + else: + field = "ts" + if sort: + # For Druid compatibility + sort_map = {"ascending": "asc", "descending": "desc"} + sorting = [ + { + field: { + "order": sort_map[sort], + } + } + ] + search_query["sort"] = sorting + + # S - Sentiment + sentiment_r = parse_sentiment(query_params) + if isinstance(sentiment_r, dict): + return sentiment_r + if sentiment_r: + if rule_object is not None: + sentiment_index = "meta.aggs.avg_sentiment.value" + else: + sentiment_index = "sentiment" + sentiment_method, sentiment = sentiment_r + range_query_compare = {"range": {sentiment_index: {}}} range_query_precise = { "match": { - "sentiment": None, + sentiment_index: None, } } if sentiment_method == "below": - range_query_compare["range"]["sentiment"]["lt"] = sentiment + range_query_compare["range"][sentiment_index]["lt"] = sentiment add_top.append(range_query_compare) elif sentiment_method == "above": - range_query_compare["range"]["sentiment"]["gt"] = sentiment + range_query_compare["range"][sentiment_index]["gt"] = sentiment add_top.append(range_query_compare) elif sentiment_method == "exact": - range_query_precise["match"]["sentiment"] = sentiment + range_query_precise["match"][sentiment_index] = sentiment add_top.append(range_query_precise) elif sentiment_method == "nonzero": - range_query_precise["match"]["sentiment"] = 0 + range_query_precise["match"][sentiment_index] = 0 add_top_negative.append(range_query_precise) - if add_bool: - # if "bool" not in search_query["query"]: - # search_query["query"]["bool"] = {} - # if "must" not in search_query["query"]["bool"]: - # search_query["query"]["bool"] = {"must": []} + # Add in the additional information we already populated + self.add_bool(search_query, add_bool) + self.add_top(search_query, add_top) + self.add_top(search_query, add_top_negative, negative=True) - for item in add_bool: - search_query["query"]["bool"]["must"].append({"match": item}) - - if add_top: - for item in add_top: - search_query["query"]["bool"]["must"].append(item) - if add_top_negative: - for item in add_top_negative: - if "must_not" in search_query["query"]["bool"]: - search_query["query"]["bool"]["must_not"].append(item) - else: - search_query["query"]["bool"]["must_not"] = [item] - if sort: - search_query["sort"] = sort - - pprint(search_query) - results = self.run_query( - self.client, - request.user, # passed through run_main_query to filter_blacklisted + response = self.query( + request.user, search_query, + index=index, ) - if not results: + if not response: message = "Error running query" message_class = "danger" return {"message": message, "class": message_class} + # results = results.to_dict() - if "error" in results: - message = results["error"] + if "error" in response: + message = response["error"] message_class = "danger" return {"message": message, "class": message_class} - results_parsed = parse_results(results) - if annotate: - annotate_results(results_parsed) - if "dedup" in query_params: - if query_params["dedup"] == "on": - dedup = True - else: - dedup = False - else: - dedup = False + if "message" in response: + return response - if reverse: - results_parsed = results_parsed[::-1] + # A/D/R - Annotate/Dedup/Reverse + response["object_list"] = self.process_results( + response["object_list"], + annotate=annotate, + dedup=dedup, + dedup_fields=dedup_fields, + reverse=reverse, + ) - if dedup: - if not dedup_fields: - dedup_fields = ["msg", "nick", "ident", "host", "net", "channel"] - results_parsed = dedup_list(results_parsed, dedup_fields) - context = { - "object_list": results_parsed, - "card": results["hits"]["total"], - "took": results["took"], - } - if "cache" in results: - context["cache"] = results["cache"] + context = response return context diff --git a/core/db/manticore_orig.py b/core/db/manticore_orig.py new file mode 100644 index 0000000..b884998 --- /dev/null +++ b/core/db/manticore_orig.py @@ -0,0 +1,302 @@ +import logging +from datetime import datetime +from pprint import pprint + +import requests +from django.conf import settings + +from core.db import StorageBackend, add_defaults, dedup_list +from core.db.processing import annotate_results, parse_results + +logger = logging.getLogger(__name__) + + +class ManticoreBackend(StorageBackend): + def __init__(self): + super().__init__("manticore") + + def initialise(self, **kwargs): + """ + Initialise the Manticore client + """ + pass # we use requests + + def construct_query(self, query, size, index, blank=False): + """ + Accept some query parameters and construct an OpenSearch query. + """ + if not size: + size = 5 + query_base = { + "index": index, + "limit": size, + "query": {"bool": {"must": []}}, + } + query_string = { + "query_string": query, + } + if not blank: + query_base["query"]["bool"]["must"].append(query_string) + return query_base + + def run_query(self, client, user, search_query): + response = requests.post( + f"{settings.MANTICORE_URL}/json/search", json=search_query + ) + return response + + def query_results( + self, + request, + query_params, + size=None, + annotate=True, + custom_query=False, + reverse=False, + dedup=False, + dedup_fields=None, + tags=None, + ): + query = None + message = None + message_class = None + add_bool = [] + add_top = [] + add_top_negative = [] + sort = None + query_created = False + source = None + add_defaults(query_params) + # Check size + if request.user.is_anonymous: + sizes = settings.MANTICORE_MAIN_SIZES_ANON + else: + sizes = settings.MANTICORE_MAIN_SIZES + if not size: + if "size" in query_params: + size = query_params["size"] + if size not in sizes: + message = "Size is not permitted" + message_class = "danger" + return {"message": message, "class": message_class} + size = int(size) + else: + size = 20 + + # Check index + if "index" in query_params: + index = query_params["index"] + if index == "main": + index = settings.MANTICORE_INDEX_MAIN + else: + if not request.user.has_perm(f"core.index_{index}"): + message = "Not permitted to search by this index" + message_class = "danger" + return { + "message": message, + "class": message_class, + } + if index == "meta": + index = settings.MANTICORE_INDEX_META + elif index == "internal": + index = settings.MANTICORE_INDEX_INT + else: + message = "Index is not valid." + message_class = "danger" + return { + "message": message, + "class": message_class, + } + else: + index = settings.MANTICORE_INDEX_MAIN + + # Create the search query + if "query" in query_params: + query = query_params["query"] + search_query = self.construct_query(query, size, index) + query_created = True + else: + if custom_query: + search_query = custom_query + + if tags: + # Get a blank search query + if not query_created: + search_query = self.construct_query(None, size, index, blank=True) + query_created = True + for tagname, tagvalue in tags.items(): + add_bool.append({tagname: tagvalue}) + + required_any = ["query_full", "query", "tags"] + if not any([field in query_params.keys() for field in required_any]): + if not custom_query: + message = "Empty query!" + message_class = "warning" + return {"message": message, "class": message_class} + + # Check for a source + if "source" in query_params: + source = query_params["source"] + + if source in settings.SOURCES_RESTRICTED: + if not request.user.has_perm("core.restricted_sources"): + message = "Access denied" + message_class = "danger" + return {"message": message, "class": message_class} + elif source not in settings.MAIN_SOURCES: + message = "Invalid source" + message_class = "danger" + return {"message": message, "class": message_class} + + if source == "all": + source = None # the next block will populate it + + if source: + sources = [source] + else: + sources = list(settings.MAIN_SOURCES) + if request.user.has_perm("core.restricted_sources"): + for source_iter in settings.SOURCES_RESTRICTED: + sources.append(source_iter) + + add_top_tmp = {"bool": {"should": []}} + total_count = 0 + for source_iter in sources: + add_top_tmp["bool"]["should"].append({"equals": {"src": source_iter}}) + total_count += 1 + total_sources = len(settings.MAIN_SOURCES) + len(settings.SOURCES_RESTRICTED) + if not total_count == total_sources: + add_top.append(add_top_tmp) + + # Date/time range + if set({"from_date", "to_date", "from_time", "to_time"}).issubset( + query_params.keys() + ): + from_ts = f"{query_params['from_date']}T{query_params['from_time']}Z" + to_ts = f"{query_params['to_date']}T{query_params['to_time']}Z" + from_ts = datetime.strptime(from_ts, "%Y-%m-%dT%H:%MZ") + to_ts = datetime.strptime(to_ts, "%Y-%m-%dT%H:%MZ") + from_ts = int(from_ts.timestamp()) + to_ts = int(to_ts.timestamp()) + range_query = { + "range": { + "ts": { + "gt": from_ts, + "lt": to_ts, + } + } + } + add_top.append(range_query) + + # Sorting + if "sorting" in query_params: + sorting = query_params["sorting"] + if sorting not in ("asc", "desc", "none"): + message = "Invalid sort" + message_class = "danger" + return {"message": message, "class": message_class} + if sorting in ("asc", "desc"): + sort = [ + { + "ts": { + "order": sorting, + } + } + ] + + # Sentiment handling + if "check_sentiment" in query_params: + if "sentiment_method" not in query_params: + message = "No sentiment method" + message_class = "danger" + return {"message": message, "class": message_class} + if "sentiment" in query_params: + sentiment = query_params["sentiment"] + try: + sentiment = float(sentiment) + except ValueError: + message = "Sentiment is not a float" + message_class = "danger" + return {"message": message, "class": message_class} + sentiment_method = query_params["sentiment_method"] + range_query_compare = {"range": {"sentiment": {}}} + range_query_precise = { + "match": { + "sentiment": None, + } + } + if sentiment_method == "below": + range_query_compare["range"]["sentiment"]["lt"] = sentiment + add_top.append(range_query_compare) + elif sentiment_method == "above": + range_query_compare["range"]["sentiment"]["gt"] = sentiment + add_top.append(range_query_compare) + elif sentiment_method == "exact": + range_query_precise["match"]["sentiment"] = sentiment + add_top.append(range_query_precise) + elif sentiment_method == "nonzero": + range_query_precise["match"]["sentiment"] = 0 + add_top_negative.append(range_query_precise) + + if add_bool: + # if "bool" not in search_query["query"]: + # search_query["query"]["bool"] = {} + # if "must" not in search_query["query"]["bool"]: + # search_query["query"]["bool"] = {"must": []} + + for item in add_bool: + search_query["query"]["bool"]["must"].append({"match": item}) + + if add_top: + for item in add_top: + search_query["query"]["bool"]["must"].append(item) + if add_top_negative: + for item in add_top_negative: + if "must_not" in search_query["query"]["bool"]: + search_query["query"]["bool"]["must_not"].append(item) + else: + search_query["query"]["bool"]["must_not"] = [item] + if sort: + search_query["sort"] = sort + + pprint(search_query) + results = self.run_query( + self.client, + request.user, # passed through run_main_query to filter_blacklisted + search_query, + ) + if not results: + message = "Error running query" + message_class = "danger" + return {"message": message, "class": message_class} + # results = results.to_dict() + if "error" in results: + message = results["error"] + message_class = "danger" + return {"message": message, "class": message_class} + results_parsed = parse_results(results) + if annotate: + annotate_results(results_parsed) + if "dedup" in query_params: + if query_params["dedup"] == "on": + dedup = True + else: + dedup = False + else: + dedup = False + + if reverse: + results_parsed = results_parsed[::-1] + + if dedup: + if not dedup_fields: + dedup_fields = ["msg", "nick", "ident", "host", "net", "channel"] + results_parsed = dedup_list(results_parsed, dedup_fields) + context = { + "object_list": results_parsed, + "card": results["hits"]["total"], + "took": results["took"], + } + if "cache" in results: + context["cache"] = results["cache"] + return context diff --git a/core/db/processing.py b/core/db/processing.py index 77416a6..2d1b4b4 100644 --- a/core/db/processing.py +++ b/core/db/processing.py @@ -1,5 +1,5 @@ from datetime import datetime - +import ast from core.lib.threshold import annotate_num_chans, annotate_num_users, annotate_online @@ -92,6 +92,11 @@ def parse_results(results, meta=None): for field in list(element.keys()): if element[field] == "": del element[field] + # Unfold the tokens + if "tokens" in element: + if element["tokens"].startswith('["'): + tokens_parsed = ast.literal_eval(element["tokens"]) + element["tokens"] = tokens_parsed # Split the timestamp into date and time if "ts" not in element: diff --git a/core/lib/context.py b/core/lib/context.py index 53d31bd..d9fc58b 100644 --- a/core/lib/context.py +++ b/core/lib/context.py @@ -4,7 +4,7 @@ def construct_query(index, net, channel, src, num, size, type=None, nicks=None): extra_should = [] extra_should2 = [] if num: - extra_must.append({"match_phrase": {"num": num}}) + extra_must.append({"equals": {"num": num}}) if net: extra_must.append({"match_phrase": {"net": net}}) if channel: @@ -52,7 +52,7 @@ def construct_query(index, net, channel, src, num, size, type=None, nicks=None): extra_should.append({"match": {"nick": channel}}) else: for ctype in types: - extra_should.append({"match": {"mtype": ctype}}) + extra_should.append({"equals": {"mtype": ctype}}) else: for ctype in types: extra_should.append({"match": {"type": ctype}}) @@ -84,4 +84,6 @@ def construct_query(index, net, channel, src, num, size, type=None, nicks=None): query["query"]["bool"]["must"].append({"bool": {"should": [*extra_should]}}) if extra_should2: query["query"]["bool"]["must"].append({"bool": {"should": [*extra_should2]}}) + + print("CONTEXT QUERY", query) return query diff --git a/core/lib/parsing.py b/core/lib/parsing.py index ec7d283..2d5076f 100644 --- a/core/lib/parsing.py +++ b/core/lib/parsing.py @@ -90,6 +90,8 @@ def parse_index(user, query_params, raise_error=False): } else: index = settings.INDEX_MAIN + + print("GOT INDEX", index) return index diff --git a/core/lib/rules.py b/core/lib/rules.py index 20faa42..89bc98d 100644 --- a/core/lib/rules.py +++ b/core/lib/rules.py @@ -335,7 +335,8 @@ class NotificationRuleData(object): if not isinstance(matches, list): matches = [matches] matches_copy = matches.copy() - match_ts = datetime.utcnow().isoformat() + # match_ts = datetime.utcnow().isoformat() + match_ts = int(datetime.utcnow().timestamp()) batch_id = uuid.uuid4() # Filter empty fields in meta diff --git a/core/management/commands/processing.py b/core/management/commands/processing.py index 88e261f..5e75b0b 100644 --- a/core/management/commands/processing.py +++ b/core/management/commands/processing.py @@ -1,5 +1,6 @@ import msgpack from django.core.management.base import BaseCommand +from django.conf import settings from redis import StrictRedis from core.db.storage import db @@ -93,7 +94,12 @@ def process_rules(data): class Command(BaseCommand): def handle(self, *args, **options): - r = StrictRedis(unix_socket_path="/var/run/socks/redis.sock", db=0) + r = StrictRedis( + host=settings.REDIS_HOST, + port=settings.REDIS_PORT, + password=settings.REDIS_PASSWORD, + db=settings.REDIS_DB + ) p = r.pubsub() p.psubscribe("messages") for message in p.listen(): diff --git a/core/models.py b/core/models.py index ffc40a3..699187c 100644 --- a/core/models.py +++ b/core/models.py @@ -78,8 +78,9 @@ class User(AbstractUser): """ Override the save function to create a Stripe customer. """ - if not self.stripe_id: # stripe ID not stored - self.stripe_id = get_or_create(self.email, self.first_name, self.last_name) + if settings.BILLING_ENABLED: + if not self.stripe_id: # stripe ID not stored + self.stripe_id = get_or_create(self.email, self.first_name, self.last_name) to_update = {} if self.email != self._original.email: @@ -89,14 +90,16 @@ class User(AbstractUser): if self.last_name != self._original.last_name: to_update["last_name"] = self.last_name - update_customer_fields(self.stripe_id, **to_update) + if settings.BILLING_ENABLED: + update_customer_fields(self.stripe_id, **to_update) super().save(*args, **kwargs) def delete(self, *args, **kwargs): - if self.stripe_id: - stripe.Customer.delete(self.stripe_id) - logger.info(f"Deleted Stripe customer {self.stripe_id}") + if settings.BILLING_ENABLED: + if self.stripe_id: + stripe.Customer.delete(self.stripe_id) + logger.info(f"Deleted Stripe customer {self.stripe_id}") super().delete(*args, **kwargs) def has_plan(self, plan): diff --git a/core/templates/base.html b/core/templates/base.html index 754d894..4eca79b 100644 --- a/core/templates/base.html +++ b/core/templates/base.html @@ -280,7 +280,7 @@ {% if user.is_superuser %} {% endif %} diff --git a/core/templates/manage/monolith/stats/index.html b/core/templates/manage/monolith/stats/index.html new file mode 100644 index 0000000..7e86044 --- /dev/null +++ b/core/templates/manage/monolith/stats/index.html @@ -0,0 +1,15 @@ +{% extends "base.html" %} +{% block content %} +
+
+
+
+
+
+{% endblock %} \ No newline at end of file diff --git a/core/templates/manage/monolith/stats/overview.html b/core/templates/manage/monolith/stats/overview.html new file mode 100644 index 0000000..7d5ee21 --- /dev/null +++ b/core/templates/manage/monolith/stats/overview.html @@ -0,0 +1,14 @@ +{% extends 'mixins/partials/generic-detail.html' %} + +{% block tbody %} + {% for item in object %} + {% if item.data %} + {% for row in item.data %} + + {{ row.Variable_name }} + {{ row.Value }} + + {% endfor %} + {% endif %} + {% endfor %} +{% endblock %} diff --git a/core/templates/partials/results_table.html b/core/templates/partials/results_table.html index 1d9ecf3..ea20efb 100644 --- a/core/templates/partials/results_table.html +++ b/core/templates/partials/results_table.html @@ -174,10 +174,11 @@ {% elif column.name == 'match_ts' %} - {% with match_ts=cell|splitstr:'T' %} + +

{{ match_ts }}

{% elif column.name == 'type' or column.name == 'mtype' %} diff --git a/core/templatetags/splitstr.py b/core/templatetags/splitstr.py index 2519895..5781fb8 100644 --- a/core/templatetags/splitstr.py +++ b/core/templatetags/splitstr.py @@ -5,4 +5,6 @@ register = template.Library() @register.filter def splitstr(value, arg): + if type(value) == int: + raise Exception(f"Attempt to split {value} with separator {arg}") return value.split(arg) diff --git a/core/views/manage/monolith/__init__.py b/core/views/manage/monolith/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/views/manage/monolith/stats.py b/core/views/manage/monolith/stats.py new file mode 100644 index 0000000..5ac82e3 --- /dev/null +++ b/core/views/manage/monolith/stats.py @@ -0,0 +1,36 @@ +from django.shortcuts import render +from django.views import View +from rest_framework.parsers import FormParser +from rest_framework.views import APIView +from core.db.storage import db +from mixins.views import ObjectRead + +from core.views.manage.permissions import SuperUserRequiredMixin + +class MonolithStats(SuperUserRequiredMixin, View): + template_name = "manage/monolith/stats/index.html" + + def get(self, request): + return render(request, self.template_name) + +class MonolithDBStats(SuperUserRequiredMixin, ObjectRead): + detail_template = "manage/monolith/stats/overview.html" + + context_object_name_singular = "Status" + context_object_name = "Status" + + detail_url_name = "monolith_stats_db" + detail_url_args = ["type"] + + def get_object(self, **kwargs): + search_query = "SHOW TABLE main STATUS" + + stats = db.run_query( + self.request.user, + search_query=search_query, + path="sql?mode=raw", + raw=True, + #method="get", + ) + + return stats \ No newline at end of file diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..a8e9ffc --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,561 @@ +version: "2.2" + +services: + app: + image: pathogen/neptune:latest + container_name: neptune + build: + context: . + args: + OPERATION: ${OPERATION} + volumes: + - ${PORTAINER_GIT_DIR}:/code + - ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini + - ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py + - ${APP_DATABASE_FILE}:/conf/db.sqlite3 + - neptune_static:${STATIC_ROOT} + # env_file: + # - stack.env + environment: + # General application settings + APP_PORT: "${APP_PORT}" + PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}" + APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}" + APP_DATABASE_FILE: "${APP_DATABASE_FILE}" + STATIC_ROOT: "${STATIC_ROOT}" + OPERATION: "${OPERATION}" + # Elasticsearch settings + ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}" + ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}" + ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}" + ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}" + ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}" + # Manticore settings + MANTICORE_URL: "${MANTICORE_URL}" + # Database settings + DB_BACKEND: "${DB_BACKEND}" + INDEX_MAIN: "${INDEX_MAIN}" + INDEX_RESTRICTED: "${INDEX_RESTRICTED}" + INDEX_META: "${INDEX_META}" + INDEX_INT: "${INDEX_INT}" + INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}" + MAIN_SIZES: "${MAIN_SIZES}" + MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}" + MAIN_SOURCES: "${MAIN_SOURCES}" + SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}" + CACHE: "${CACHE}" + CACHE_TIMEOUT: "${CACHE_TIMEOUT}" + # Drilldown settings + DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}" + DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}" + DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}" + DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}" + DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}" + # URLs: "${# URLs}" + DOMAIN: "${DOMAIN}" + URL: "${URL}" + # Access control + ALLOWED_HOSTS: "${ALLOWED_HOSTS}" + # CSRF + CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}" + # Stripe settings + BILLING_ENABLED: "${BILLING_ENABLED}" + STRIPE_TEST: "${STRIPE_TEST}" + STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}" + STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}" + STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}" + STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}" + STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}" + STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}" + # Threshold settings + THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}" + THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}" + THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}" + THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}" + # NickTrace settings + NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}" + NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}" + NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}" + # Meta settings + META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}" + META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}" + META_QUERY_SIZE: "${META_QUERY_SIZE}" + # Debugging and profiling + DEBUG: "${DEBUG}" + PROFILER: "${PROFILER}" + # Redis settings + REDIS_HOST: "${REDIS_HOST}" + REDIS_PASSWORD: "${REDIS_PASSWORD}" + REDIS_DB: "${REDIS_DB}" + REDIS_DB_CACHE: "${REDIS_DB_CACHE}" + REDIS_PORT: "${REDIS_PORT}" + depends_on: + redis: + condition: service_healthy + migration: + condition: service_started + collectstatic: + condition: service_started + networks: + - default + - xf + - db + + processing: + image: pathogen/neptune:latest + container_name: processing_neptune + build: + context: . + args: + OPERATION: ${OPERATION} + command: sh -c '. /venv/bin/activate && python manage.py processing' + volumes: + - ${PORTAINER_GIT_DIR}:/code + - ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini + - ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py + - ${APP_DATABASE_FILE}:/conf/db.sqlite3 + - neptune_static:${STATIC_ROOT} + environment: + # General application settings + APP_PORT: "${APP_PORT}" + PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}" + APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}" + APP_DATABASE_FILE: "${APP_DATABASE_FILE}" + STATIC_ROOT: "${STATIC_ROOT}" + OPERATION: "${OPERATION}" + # Elasticsearch settings + ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}" + ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}" + ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}" + ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}" + ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}" + # Manticore settings + MANTICORE_URL: "${MANTICORE_URL}" + # Database settings + DB_BACKEND: "${DB_BACKEND}" + INDEX_MAIN: "${INDEX_MAIN}" + INDEX_RESTRICTED: "${INDEX_RESTRICTED}" + INDEX_META: "${INDEX_META}" + INDEX_INT: "${INDEX_INT}" + INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}" + MAIN_SIZES: "${MAIN_SIZES}" + MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}" + MAIN_SOURCES: "${MAIN_SOURCES}" + SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}" + CACHE: "${CACHE}" + CACHE_TIMEOUT: "${CACHE_TIMEOUT}" + # Drilldown settings + DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}" + DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}" + DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}" + DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}" + DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}" + # URLs: "${# URLs}" + DOMAIN: "${DOMAIN}" + URL: "${URL}" + # Access control + ALLOWED_HOSTS: "${ALLOWED_HOSTS}" + # CSRF + CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}" + # Stripe settings + BILLING_ENABLED: "${BILLING_ENABLED}" + STRIPE_TEST: "${STRIPE_TEST}" + STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}" + STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}" + STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}" + STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}" + STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}" + STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}" + # Threshold settings + THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}" + THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}" + THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}" + THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}" + # NickTrace settings + NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}" + NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}" + NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}" + # Meta settings + META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}" + META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}" + META_QUERY_SIZE: "${META_QUERY_SIZE}" + # Debugging and profiling + DEBUG: "${DEBUG}" + PROFILER: "${PROFILER}" + # Redis settings + REDIS_HOST: "${REDIS_HOST}" + REDIS_PASSWORD: "${REDIS_PASSWORD}" + REDIS_DB: "${REDIS_DB}" + REDIS_DB_CACHE: "${REDIS_DB_CACHE}" + REDIS_PORT: "${REDIS_PORT}" + # volumes_from: + # - tmp + depends_on: + redis: + condition: service_healthy + migration: + condition: service_started + collectstatic: + condition: service_started + networks: + - default + - xf + - db + + scheduling: + image: pathogen/neptune:latest + container_name: scheduling_neptune + build: + context: . + args: + OPERATION: ${OPERATION} + command: sh -c '. /venv/bin/activate && python manage.py scheduling' + volumes: + - ${PORTAINER_GIT_DIR}:/code + - ${PORTAINER_GIT_DIR}/docker/uwsgi.ini:/conf/uwsgi.ini + - ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py + - ${APP_DATABASE_FILE}:/conf/db.sqlite3 + - neptune_static:${STATIC_ROOT} + environment: + # General application settings + APP_PORT: "${APP_PORT}" + PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}" + APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}" + APP_DATABASE_FILE: "${APP_DATABASE_FILE}" + STATIC_ROOT: "${STATIC_ROOT}" + OPERATION: "${OPERATION}" + # Elasticsearch settings + ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}" + ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}" + ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}" + ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}" + ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}" + # Manticore settings + MANTICORE_URL: "${MANTICORE_URL}" + # Database settings + DB_BACKEND: "${DB_BACKEND}" + INDEX_MAIN: "${INDEX_MAIN}" + INDEX_RESTRICTED: "${INDEX_RESTRICTED}" + INDEX_META: "${INDEX_META}" + INDEX_INT: "${INDEX_INT}" + INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}" + MAIN_SIZES: "${MAIN_SIZES}" + MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}" + MAIN_SOURCES: "${MAIN_SOURCES}" + SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}" + CACHE: "${CACHE}" + CACHE_TIMEOUT: "${CACHE_TIMEOUT}" + # Drilldown settings + DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}" + DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}" + DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}" + DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}" + DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}" + # URLs: "${# URLs}" + DOMAIN: "${DOMAIN}" + URL: "${URL}" + # Access control + ALLOWED_HOSTS: "${ALLOWED_HOSTS}" + # CSRF + CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}" + # Stripe settings + BILLING_ENABLED: "${BILLING_ENABLED}" + STRIPE_TEST: "${STRIPE_TEST}" + STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}" + STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}" + STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}" + STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}" + STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}" + STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}" + # Threshold settings + THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}" + THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}" + THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}" + THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}" + # NickTrace settings + NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}" + NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}" + NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}" + # Meta settings + META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}" + META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}" + META_QUERY_SIZE: "${META_QUERY_SIZE}" + # Debugging and profiling + DEBUG: "${DEBUG}" + PROFILER: "${PROFILER}" + # Redis settings + REDIS_HOST: "${REDIS_HOST}" + REDIS_PASSWORD: "${REDIS_PASSWORD}" + REDIS_DB: "${REDIS_DB}" + REDIS_DB_CACHE: "${REDIS_DB_CACHE}" + REDIS_PORT: "${REDIS_PORT}" + # volumes_from: + # - tmp + depends_on: + redis: + condition: service_healthy + migration: + condition: service_started + collectstatic: + condition: service_started + networks: + - default + - xf + - db + + migration: + image: pathogen/neptune:latest + container_name: migration_neptune + build: + context: . + args: + OPERATION: ${OPERATION} + command: sh -c '. /venv/bin/activate && python manage.py migrate --noinput' + volumes: + - ${PORTAINER_GIT_DIR}:/code + - ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py + - ${APP_DATABASE_FILE}:/conf/db.sqlite3 + - neptune_static:${STATIC_ROOT} + environment: + # General application settings + APP_PORT: "${APP_PORT}" + PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}" + APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}" + APP_DATABASE_FILE: "${APP_DATABASE_FILE}" + STATIC_ROOT: "${STATIC_ROOT}" + OPERATION: "${OPERATION}" + # Elasticsearch settings + ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}" + ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}" + ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}" + ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}" + ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}" + # Manticore settings + MANTICORE_URL: "${MANTICORE_URL}" + # Database settings + DB_BACKEND: "${DB_BACKEND}" + INDEX_MAIN: "${INDEX_MAIN}" + INDEX_RESTRICTED: "${INDEX_RESTRICTED}" + INDEX_META: "${INDEX_META}" + INDEX_INT: "${INDEX_INT}" + INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}" + MAIN_SIZES: "${MAIN_SIZES}" + MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}" + MAIN_SOURCES: "${MAIN_SOURCES}" + SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}" + CACHE: "${CACHE}" + CACHE_TIMEOUT: "${CACHE_TIMEOUT}" + # Drilldown settings + DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}" + DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}" + DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}" + DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}" + DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}" + # URLs: "${# URLs}" + DOMAIN: "${DOMAIN}" + URL: "${URL}" + # Access control + ALLOWED_HOSTS: "${ALLOWED_HOSTS}" + # CSRF + CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}" + # Stripe settings + BILLING_ENABLED: "${BILLING_ENABLED}" + STRIPE_TEST: "${STRIPE_TEST}" + STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}" + STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}" + STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}" + STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}" + STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}" + STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}" + # Threshold settings + THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}" + THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}" + THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}" + THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}" + # NickTrace settings + NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}" + NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}" + NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}" + # Meta settings + META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}" + META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}" + META_QUERY_SIZE: "${META_QUERY_SIZE}" + # Debugging and profiling + DEBUG: "${DEBUG}" + PROFILER: "${PROFILER}" + # Redis settings + REDIS_HOST: "${REDIS_HOST}" + REDIS_PASSWORD: "${REDIS_PASSWORD}" + REDIS_DB: "${REDIS_DB}" + REDIS_DB_CACHE: "${REDIS_DB_CACHE}" + REDIS_PORT: "${REDIS_PORT}" + # volumes_from: + # - tmp + depends_on: + redis: + condition: service_healthy + networks: + - default + - xf + - db + + collectstatic: + image: pathogen/neptune:latest + container_name: collectstatic_neptune + build: + context: . + args: + OPERATION: ${OPERATION} + command: sh -c '. /venv/bin/activate && python manage.py collectstatic --noinput' + volumes: + - ${PORTAINER_GIT_DIR}:/code + - ${APP_LOCAL_SETTINGS}:/code/app/local_settings.py + - ${APP_DATABASE_FILE}:/conf/db.sqlite3 + - neptune_static:${STATIC_ROOT} + # volumes_from: + # - tmp + environment: + # General application settings + APP_PORT: "${APP_PORT}" + PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}" + APP_LOCAL_SETTINGS: "${APP_LOCAL_SETTINGS}" + APP_DATABASE_FILE: "${APP_DATABASE_FILE}" + STATIC_ROOT: "${STATIC_ROOT}" + OPERATION: "${OPERATION}" + # Elasticsearch settings + ELASTICSEARCH_URL: "${ELASTICSEARCH_URL}" + ELASTICSEARCH_PORT: "${ELASTICSEARCH_PORT}" + ELASTICSEARCH_TLS: "${ELASTICSEARCH_TLS}" + ELASTICSEARCH_USERNAME: "${ELASTICSEARCH_USERNAME}" + ELASTICSEARCH_PASSWORD: "${ELASTICSEARCH_PASSWORD}" + # Manticore settings + MANTICORE_URL: "${MANTICORE_URL}" + # Database settings + DB_BACKEND: "${DB_BACKEND}" + INDEX_MAIN: "${INDEX_MAIN}" + INDEX_RESTRICTED: "${INDEX_RESTRICTED}" + INDEX_META: "${INDEX_META}" + INDEX_INT: "${INDEX_INT}" + INDEX_RULE_STORAGE: "${INDEX_RULE_STORAGE}" + MAIN_SIZES: "${MAIN_SIZES}" + MAIN_SIZES_ANON: "${MAIN_SIZES_ANON}" + MAIN_SOURCES: "${MAIN_SOURCES}" + SOURCES_RESTRICTED: "${SOURCES_RESTRICTED}" + CACHE: "${CACHE}" + CACHE_TIMEOUT: "${CACHE_TIMEOUT}" + # Drilldown settings + DRILLDOWN_RESULTS_PER_PAGE: "${DRILLDOWN_RESULTS_PER_PAGE}" + DRILLDOWN_DEFAULT_SIZE: "${DRILLDOWN_DEFAULT_SIZE}" + DRILLDOWN_DEFAULT_INDEX: "${DRILLDOWN_DEFAULT_INDEX}" + DRILLDOWN_DEFAULT_SORTING: "${DRILLDOWN_DEFAULT_SORTING}" + DRILLDOWN_DEFAULT_SOURCE: "${DRILLDOWN_DEFAULT_SOURCE}" + # URLs: "${# URLs}" + DOMAIN: "${DOMAIN}" + URL: "${URL}" + # Access control + ALLOWED_HOSTS: "${ALLOWED_HOSTS}" + # CSRF + CSRF_TRUSTED_ORIGINS: "${CSRF_TRUSTED_ORIGINS}" + # Stripe settings + BILLING_ENABLED: "${BILLING_ENABLED}" + STRIPE_TEST: "${STRIPE_TEST}" + STRIPE_API_KEY_TEST: "${STRIPE_API_KEY_TEST}" + STRIPE_PUBLIC_API_KEY_TEST: "${STRIPE_PUBLIC_API_KEY_TEST}" + STRIPE_API_KEY_PROD: "${STRIPE_API_KEY_PROD}" + STRIPE_PUBLIC_API_KEY_PROD: "${STRIPE_PUBLIC_API_KEY_PROD}" + STRIPE_ENDPOINT_SECRET: "${STRIPE_ENDPOINT_SECRET}" + STRIPE_ADMIN_COUPON: "${STRIPE_ADMIN_COUPON}" + # Threshold settings + THRESHOLD_ENDPOINT: "${THRESHOLD_ENDPOINT}" + THRESHOLD_API_KEY: "${THRESHOLD_API_KEY}" + THRESHOLD_API_TOKEN: "${THRESHOLD_API_TOKEN}" + THRESHOLD_API_COUNTER: "${THRESHOLD_API_COUNTER}" + # NickTrace settings + NICKTRACE_MAX_ITERATIONS: "${NICKTRACE_MAX_ITERATIONS}" + NICKTRACE_MAX_CHUNK_SIZE: "${NICKTRACE_MAX_CHUNK_SIZE}" + NICKTRACE_QUERY_SIZE: "${NICKTRACE_QUERY_SIZE}" + # Meta settings + META_MAX_ITERATIONS: "${META_MAX_ITERATIONS}" + META_MAX_CHUNK_SIZE: "${META_MAX_CHUNK_SIZE}" + META_QUERY_SIZE: "${META_QUERY_SIZE}" + # Debugging and profiling + DEBUG: "${DEBUG}" + PROFILER: "${PROFILER}" + # Redis settings + REDIS_HOST: "${REDIS_HOST}" + REDIS_PASSWORD: "${REDIS_PASSWORD}" + REDIS_DB: "${REDIS_DB}" + REDIS_DB_CACHE: "${REDIS_DB_CACHE}" + REDIS_PORT: "${REDIS_PORT}" + depends_on: + redis: + condition: service_healthy + networks: + - default + - xf + - db + + nginx: + image: nginx:latest + container_name: nginx_neptune + ports: + - ${APP_PORT}:9999 + ulimits: + nproc: 65535 + nofile: + soft: 65535 + hard: 65535 + volumes: + - ${PORTAINER_GIT_DIR}:/code + - ${PORTAINER_GIT_DIR}/docker/nginx/conf.d/${OPERATION}.conf:/etc/nginx/conf.d/default.conf + - neptune_static:${STATIC_ROOT} + # volumes_from: + # - tmp + networks: + - default + - xf + depends_on: + app: + condition: service_started + + # tmp: + # image: busybox + # container_name: tmp_neptune + # command: chmod -R 777 /var/run/socks + # volumes: + # - /var/run/socks + + redis: + image: redis + container_name: redis_neptune + command: redis-server /etc/redis.conf + ulimits: + nproc: 65535 + nofile: + soft: 65535 + hard: 65535 + volumes: + - ${PORTAINER_GIT_DIR}/docker/redis.conf:/etc/redis.conf + - neptune_redis_data:/data + # volumes_from: + # - tmp + healthcheck: + test: "redis-cli ping" + interval: 2s + timeout: 2s + retries: 15 + networks: + - default + - xf + +networks: + default: + driver: bridge + xf: + external: true + db: + external: true + +volumes: + neptune_static: {} + neptune_redis_data: {} \ No newline at end of file diff --git a/docker/redis.conf b/docker/redis.conf index 7f886a2..b4acb37 100644 --- a/docker/redis.conf +++ b/docker/redis.conf @@ -1,5 +1,4 @@ -unixsocket /var/run/socks/redis.sock -unixsocketperm 777 - -# For Monolith PubSub -port 6379 \ No newline at end of file +# unixsocket /var/run/socks/redis.sock +# unixsocketperm 777 +port 6379 +requirepass changeme \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ede10dc..2a28de9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,3 +27,4 @@ redis hiredis django-cachalot django_redis +httpx \ No newline at end of file diff --git a/stack.env b/stack.env index 9b544e0..d8967ac 100644 --- a/stack.env +++ b/stack.env @@ -1,6 +1,86 @@ +# General application settings APP_PORT=5000 PORTAINER_GIT_DIR=. APP_LOCAL_SETTINGS=./app/local_settings.py APP_DATABASE_FILE=./db.sqlite3 STATIC_ROOT=/conf/static -OPERATION=dev \ No newline at end of file +OPERATION=dev + +# Elasticsearch settings +ELASTICSEARCH_URL=10.1.0.1 +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_TLS=True +ELASTICSEARCH_USERNAME=admin +ELASTICSEARCH_PASSWORD=secret + +# Manticore settings +MANTICORE_URL=http://monolith_db:9308 + +# Database settings +DB_BACKEND=MANTICORE +INDEX_MAIN=main +INDEX_RESTRICTED=restricted +INDEX_META=meta +INDEX_INT=internal +INDEX_RULE_STORAGE=rule_storage + +MAIN_SIZES=1,5,15,30,50,100,250,500,1000 +MAIN_SIZES_ANON=1,5,15,30,50,100 +MAIN_SOURCES=dis,4ch,all +SOURCES_RESTRICTED=irc +CACHE=True +CACHE_TIMEOUT=2 + +# Drilldown settings +DRILLDOWN_RESULTS_PER_PAGE=15 +DRILLDOWN_DEFAULT_SIZE=15 +DRILLDOWN_DEFAULT_INDEX=main +DRILLDOWN_DEFAULT_SORTING=desc +DRILLDOWN_DEFAULT_SOURCE=all + +# URLs +DOMAIN=qi +URL=http://10.0.0.10:5000 + +# Access control +ALLOWED_HOSTS=127.0.0.1,localhost,10.0.0.10,qi + +# CSRF +CSRF_TRUSTED_ORIGINS=http://127.0.0.1:5000,http://localhost:5000,http://qi:5000,http://10.0.0.10:5000 + +# Stripe settings +BILLING_ENABLED=False +STRIPE_TEST=True +STRIPE_API_KEY_TEST= +STRIPE_PUBLIC_API_KEY_TEST= +STRIPE_API_KEY_PROD= +STRIPE_PUBLIC_API_KEY_PROD= +STRIPE_ENDPOINT_SECRET= +STRIPE_ADMIN_COUPON= + +# Threshold settings +THRESHOLD_ENDPOINT=http://threshold:13869 +THRESHOLD_API_KEY=api_1 +THRESHOLD_API_TOKEN= +THRESHOLD_API_COUNTER= + +# NickTrace settings +NICKTRACE_MAX_ITERATIONS=4 +NICKTRACE_MAX_CHUNK_SIZE=500 +NICKTRACE_QUERY_SIZE=10000 + +# Meta settings +META_MAX_ITERATIONS=4 +META_MAX_CHUNK_SIZE=500 +META_QUERY_SIZE=10000 + +# Debugging and profiling +DEBUG=True +PROFILER=False + +# Redis settings +REDIS_HOST=redis_neptune +REDIS_PASSWORD=changeme +REDIS_DB=1 +REDIS_DB_CACHE=10 +REDIS_PORT=6379