From 2d3b8fdac6f4c33cff3a12db9c2bd34baa8f9747 Mon Sep 17 00:00:00 2001 From: Mark Veidemanis Date: Sun, 15 Feb 2026 04:27:28 +0000 Subject: [PATCH] Implement AI workspace and mitigation workflow --- app/local_settings.py | 2 +- app/urls.py | 160 +- auth_django.py | 17 +- core/clients/__init__.py | 1 + core/clients/signal.py | 181 +- core/clients/signalapi.py | 56 +- core/clients/xmpp.py | 460 ++- core/db/sql.py | 19 +- core/forms.py | 74 +- core/lib/deferred.py | 103 +- core/lib/prompts/bases.py | 2 +- core/lib/prompts/functions.py | 149 +- core/management/commands/scheduling.py | 1 - core/management/commands/ur.py | 15 +- core/messaging/ai.py | 11 +- core/messaging/analysis.py | 30 +- core/messaging/history.py | 141 +- core/messaging/natural.py | 16 +- core/messaging/replies.py | 46 +- core/messaging/utils.py | 3 +- ...son_group_persona_manipulation_and_more.py | 3 +- ...alter_ai_user_alter_group_user_and_more.py | 3 +- ...entifier_alter_manipulation_ai_and_more.py | 3 +- core/migrations/0014_queuedmessage.py | 3 +- ...airesult_workspaceconversation_and_more.py | 95 + ...017_remove_airesult_risk_flags_and_more.py | 359 +++ ...nplan_patternmitigationmessage_and_more.py | 84 + .../0019_patternmitigationcorrection.py | 28 + ...ationcorrection_language_style_and_more.py | 38 + ...gationcorrection_clarification_and_more.py | 43 + .../0022_patternmitigationautosettings.py | 38 + core/models.py | 1129 ++++++- core/modules/router.py | 7 +- core/templates/base.html | 12 +- core/templates/index.html | 12 +- core/templates/pages/ai-workspace.html | 11 + core/templates/pages/signal.html | 2 +- .../partials/ai-workspace-ai-result.html | 213 ++ .../ai-workspace-mitigation-panel.html | 660 ++++ .../ai-workspace-mitigation-status.html | 5 + .../partials/ai-workspace-person-widget.html | 698 ++++ .../partials/ai-workspace-send-status.html | 3 + .../partials/ai-workspace-widget.html | 54 + core/templates/partials/group-list.html | 10 +- .../templates/partials/manipulation-list.html | 10 +- core/templates/partials/message-list.html | 8 +- core/templates/partials/person-list.html | 10 +- core/templates/partials/persona-list.html | 10 +- core/templates/partials/queue-list.html | 150 +- core/templates/partials/session-list.html | 4 +- core/templates/partials/signal-accounts.html | 60 +- .../templates/partials/signal-chats-list.html | 6 +- .../partials/signal-contacts-list.html | 92 +- core/views/ais.py | 11 +- core/views/groups.py | 11 +- core/views/identifiers.py | 22 +- core/views/manipulations.py | 11 +- core/views/messages.py | 8 +- core/views/people.py | 11 +- core/views/personas.py | 11 +- core/views/queues.py | 98 +- core/views/sessions.py | 12 +- core/views/signal.py | 29 +- core/views/workspace.py | 2864 +++++++++++++++++ 64 files changed, 7669 insertions(+), 769 deletions(-) create mode 100644 core/migrations/0016_airequest_airesult_workspaceconversation_and_more.py create mode 100644 core/migrations/0017_remove_airesult_risk_flags_and_more.py create mode 100644 core/migrations/0018_patternmitigationplan_patternmitigationmessage_and_more.py create mode 100644 core/migrations/0019_patternmitigationcorrection.py create mode 100644 core/migrations/0020_patternmitigationcorrection_language_style_and_more.py create mode 100644 core/migrations/0021_alter_patternmitigationcorrection_clarification_and_more.py create mode 100644 core/migrations/0022_patternmitigationautosettings.py create mode 100644 core/templates/pages/ai-workspace.html create mode 100644 core/templates/partials/ai-workspace-ai-result.html create mode 100644 core/templates/partials/ai-workspace-mitigation-panel.html create mode 100644 core/templates/partials/ai-workspace-mitigation-status.html create mode 100644 core/templates/partials/ai-workspace-person-widget.html create mode 100644 core/templates/partials/ai-workspace-send-status.html create mode 100644 core/templates/partials/ai-workspace-widget.html create mode 100644 core/views/workspace.py diff --git a/app/local_settings.py b/app/local_settings.py index f5b999f..f9aa407 100644 --- a/app/local_settings.py +++ b/app/local_settings.py @@ -52,4 +52,4 @@ SIGNAL_NUMBER = getenv("SIGNAL_NUMBER") XMPP_ADDRESS = getenv("XMPP_ADDRESS") XMPP_JID = getenv("XMPP_JID") XMPP_PORT = getenv("XMPP_PORT") -XMPP_SECRET = getenv("XMPP_SECRET") \ No newline at end of file +XMPP_SECRET = getenv("XMPP_SECRET") diff --git a/app/urls.py b/app/urls.py index c7ade4e..bedf419 100644 --- a/app/urls.py +++ b/app/urls.py @@ -21,7 +21,21 @@ from django.urls import include, path from django.views.generic import TemplateView from two_factor.urls import urlpatterns as tf_urls -from core.views import base, notifications, signal, people, ais, groups, personas, manipulations, identifiers, sessions, messages, queues +from core.views import ( + ais, + base, + groups, + identifiers, + manipulations, + messages, + notifications, + people, + personas, + queues, + sessions, + signal, + workspace, +) urlpatterns = [ path("__debug__/", include("debug_toolbar.urls")), @@ -68,6 +82,86 @@ urlpatterns = [ name="signal_account_add", ), # AIs + path( + "ai/workspace/", + workspace.AIWorkspace.as_view(), + name="ai_workspace", + ), + path( + "ai/workspace//contacts/", + workspace.AIWorkspaceContactsWidget.as_view(), + name="ai_workspace_contacts", + ), + path( + "ai/workspace//person//", + workspace.AIWorkspacePersonWidget.as_view(), + name="ai_workspace_person", + ), + path( + "ai/workspace//person//run//", + workspace.AIWorkspaceRunOperation.as_view(), + name="ai_workspace_run", + ), + path( + "ai/workspace//person//send/", + workspace.AIWorkspaceSendDraft.as_view(), + name="ai_workspace_send", + ), + path( + "ai/workspace//person//queue/", + workspace.AIWorkspaceQueueDraft.as_view(), + name="ai_workspace_queue", + ), + path( + "ai/workspace//person//mitigation/create/", + workspace.AIWorkspaceCreateMitigation.as_view(), + name="ai_workspace_mitigation_create", + ), + path( + "ai/workspace//person//mitigation//chat/", + workspace.AIWorkspaceMitigationChat.as_view(), + name="ai_workspace_mitigation_chat", + ), + path( + "ai/workspace//person//mitigation//export/", + workspace.AIWorkspaceExportArtifact.as_view(), + name="ai_workspace_mitigation_export", + ), + path( + "ai/workspace//person//mitigation//artifact/create//", + workspace.AIWorkspaceCreateArtifact.as_view(), + name="ai_workspace_mitigation_artifact_create", + ), + path( + "ai/workspace//person//mitigation//artifact///save/", + workspace.AIWorkspaceUpdateArtifact.as_view(), + name="ai_workspace_mitigation_artifact_save", + ), + path( + "ai/workspace//person//mitigation//artifact///delete/", + workspace.AIWorkspaceDeleteArtifact.as_view(), + name="ai_workspace_mitigation_artifact_delete", + ), + path( + "ai/workspace//person//mitigation//artifact//delete-all/", + workspace.AIWorkspaceDeleteArtifactList.as_view(), + name="ai_workspace_mitigation_artifact_delete_all", + ), + path( + "ai/workspace//person//mitigation//engage/share/", + workspace.AIWorkspaceEngageShare.as_view(), + name="ai_workspace_mitigation_engage_share", + ), + path( + "ai/workspace//person//mitigation//auto/", + workspace.AIWorkspaceAutoSettings.as_view(), + name="ai_workspace_mitigation_auto", + ), + path( + "ai/workspace//person//mitigation//fundamentals/save/", + workspace.AIWorkspaceUpdateFundamentals.as_view(), + name="ai_workspace_mitigation_fundamentals_save", + ), path( "ai//", ais.AIList.as_view(), @@ -88,7 +182,6 @@ urlpatterns = [ ais.AIDelete.as_view(), name="ai_delete", ), - # People path( "person//", @@ -110,7 +203,6 @@ urlpatterns = [ people.PersonDelete.as_view(), name="person_delete", ), - # Groups path( "group//", @@ -132,7 +224,6 @@ urlpatterns = [ groups.GroupDelete.as_view(), name="group_delete", ), - # Personas path( "persona//", @@ -154,7 +245,6 @@ urlpatterns = [ personas.PersonaDelete.as_view(), name="persona_delete", ), - # Manipulations path( "manipulation//", @@ -198,19 +288,59 @@ urlpatterns = [ name="session_delete", ), # Identifiers - path("person//identifiers//", identifiers.PersonIdentifierList.as_view(), name="person_identifiers"), - path("person//identifiers/create/", identifiers.PersonIdentifierCreate.as_view(), name="person_identifier_create"), - path("person//identifiers/update///", identifiers.PersonIdentifierUpdate.as_view(), name="person_identifier_update"), - path("person//identifiers/delete///", identifiers.PersonIdentifierDelete.as_view(), name="person_identifier_delete"), + path( + "person//identifiers//", + identifiers.PersonIdentifierList.as_view(), + name="person_identifiers", + ), + path( + "person//identifiers/create/", + identifiers.PersonIdentifierCreate.as_view(), + name="person_identifier_create", + ), + path( + "person//identifiers/update///", + identifiers.PersonIdentifierUpdate.as_view(), + name="person_identifier_update", + ), + path( + "person//identifiers/delete///", + identifiers.PersonIdentifierDelete.as_view(), + name="person_identifier_delete", + ), # Messages - path("session//messages//", messages.MessageList.as_view(), name="messages"), - path("session//messages/create/", messages.MessageCreate.as_view(), name="message_create"), - path("session//messages/update///", messages.MessageUpdate.as_view(), name="message_update"), - path("session//messages/delete///", messages.MessageDelete.as_view(), name="message_delete"), + path( + "session//messages//", + messages.MessageList.as_view(), + name="messages", + ), + path( + "session//messages/create/", + messages.MessageCreate.as_view(), + name="message_create", + ), + path( + "session//messages/update///", + messages.MessageUpdate.as_view(), + name="message_update", + ), + path( + "session//messages/delete///", + messages.MessageDelete.as_view(), + name="message_delete", + ), # API # Queues - path("api/v1/queue/message/accept//", queues.AcceptMessageAPI.as_view(), name="message_accept_api"), - path("api/v1/queue/message/reject//", queues.RejectMessageAPI.as_view(), name="message_reject_api"), + path( + "api/v1/queue/message/accept//", + queues.AcceptMessageAPI.as_view(), + name="message_accept_api", + ), + path( + "api/v1/queue/message/reject//", + queues.RejectMessageAPI.as_view(), + name="message_reject_api", + ), path( "queue//", queues.QueueList.as_view(), diff --git a/auth_django.py b/auth_django.py index 6c08eb9..856971c 100755 --- a/auth_django.py +++ b/auth_django.py @@ -1,16 +1,18 @@ - # Create a debug log to confirm script execution -import sys -import django import os +import sys + +import django LOG_PATH = "auth_debug.log" + def log(data): with open(LOG_PATH, "a") as f: f.write(f"{data}\n") + # Set up Django environment os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings") # Adjust if needed django.setup() @@ -18,11 +20,13 @@ django.setup() from django.contrib.auth import authenticate from django.contrib.auth.models import User + def check_credentials(username, password): """Authenticate user via Django""" user = authenticate(username=username, password=password) return user is not None and user.is_active + def main(): """Process authentication requests from Prosody""" while True: @@ -42,7 +46,9 @@ def main(): continue command, username, domain = parts[:3] - password = ":".join(parts[3:]) if len(parts) > 3 else None # Reconstruct password + password = ( + ":".join(parts[3:]) if len(parts) > 3 else None + ) # Reconstruct password if command == "auth": if password and check_credentials(username, password): @@ -71,5 +77,6 @@ def main(): log(f"Error: {str(e)}\n") print("0", flush=True) # Return failure for any error + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/core/clients/__init__.py b/core/clients/__init__.py index 4327ec3..fe5350f 100644 --- a/core/clients/__init__.py +++ b/core/clients/__init__.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod + from core.util import logs diff --git a/core/clients/signal.py b/core/clients/signal.py index 10c37c0..0a14ac1 100644 --- a/core/clients/signal.py +++ b/core/clients/signal.py @@ -8,7 +8,6 @@ from django.urls import reverse from signalbot import Command, Context, SignalBot from core.clients import ClientBase, signalapi -from core.lib.prompts.functions import delete_messages, truncate_and_summarize from core.messaging import ai, history, natural, replies, utils from core.models import Chat, Manipulation, PersonIdentifier, QueuedMessage from core.util import logs @@ -25,11 +24,90 @@ SIGNAL_PORT = 8080 SIGNAL_URL = f"{SIGNAL_HOST}:{SIGNAL_PORT}" + +def _get_nested(payload, path): + current = payload + for key in path: + if not isinstance(current, dict): + return None + current = current.get(key) + return current + + +def _looks_like_signal_attachment(entry): + return isinstance(entry, dict) and ( + "id" in entry or "attachmentId" in entry or "contentType" in entry + ) + + +def _normalize_attachment(entry): + attachment_id = entry.get("id") or entry.get("attachmentId") + if attachment_id is None: + return None + return { + "id": attachment_id, + "content_type": entry.get("contentType", "application/octet-stream"), + "filename": entry.get("filename") or str(attachment_id), + "size": entry.get("size") or 0, + "width": entry.get("width"), + "height": entry.get("height"), + } + + +def _extract_attachments(raw_payload): + envelope = raw_payload.get("envelope", {}) + candidate_paths = [ + ("dataMessage", "attachments"), + ("syncMessage", "sentMessage", "attachments"), + ("syncMessage", "editMessage", "dataMessage", "attachments"), + ] + results = [] + seen = set() + + for path in candidate_paths: + found = _get_nested(envelope, path) + if not isinstance(found, list): + continue + for entry in found: + normalized = _normalize_attachment(entry) + if not normalized: + continue + key = str(normalized["id"]) + if key in seen: + continue + seen.add(key) + results.append(normalized) + + # Fallback: scan for attachment-shaped lists under envelope. + if not results: + stack = [envelope] + while stack: + node = stack.pop() + if isinstance(node, dict): + for value in node.values(): + stack.append(value) + elif isinstance(node, list): + if node and all(_looks_like_signal_attachment(item) for item in node): + for entry in node: + normalized = _normalize_attachment(entry) + if not normalized: + continue + key = str(normalized["id"]) + if key in seen: + continue + seen.add(key) + results.append(normalized) + else: + for value in node: + stack.append(value) + return results + + class NewSignalBot(SignalBot): def __init__(self, ur, service, config): self.ur = ur self.service = service - self.signal_rest = config["signal_service"] # keep your own copy + self.signal_rest = config["signal_service"] # keep your own copy self.phone_number = config["phone_number"] super().__init__(config) self.log = logs.get_logger("signalI") @@ -46,7 +124,9 @@ class NewSignalBot(SignalBot): try: resp = await session.get(uri) if resp.status != 200: - self.log.error(f"contacts lookup failed: {resp.status} {await resp.text()}") + self.log.error( + f"contacts lookup failed: {resp.status} {await resp.text()}" + ) return None contacts_data = await resp.json() @@ -95,6 +175,7 @@ class HandleMessage(Command): self.ur = ur self.service = service return super().__init__(*args, **kwargs) + async def handle(self, c: Context): msg = { "source": c.message.source, @@ -106,10 +187,15 @@ class HandleMessage(Command): "group": c.message.group, "reaction": c.message.reaction, "mentions": c.message.mentions, - "raw_message": c.message.raw_message + "raw_message": c.message.raw_message, } raw = json.loads(c.message.raw_message) - dest = raw.get("envelope", {}).get("syncMessage", {}).get("sentMessage", {}).get("destinationUuid") + dest = ( + raw.get("envelope", {}) + .get("syncMessage", {}) + .get("sentMessage", {}) + .get("destinationUuid") + ) account = raw.get("account", "") source_name = raw.get("envelope", {}).get("sourceName", "") @@ -125,9 +211,9 @@ class HandleMessage(Command): is_from_bot = source_uuid == c.bot.bot_uuid is_to_bot = dest == c.bot.bot_uuid or dest is None - reply_to_self = same_recipient and is_from_bot # Reply - reply_to_others = is_to_bot and not same_recipient # Reply - is_outgoing_message = is_from_bot and not is_to_bot # Do not reply + reply_to_self = same_recipient and is_from_bot # Reply + reply_to_others = is_to_bot and not same_recipient # Reply + is_outgoing_message = is_from_bot and not is_to_bot # Do not reply # Determine the identifier to use identifier_uuid = dest if is_from_bot else source_uuid @@ -135,20 +221,8 @@ class HandleMessage(Command): log.warning("No Signal identifier available for message routing.") return - # Handle attachments - attachments = raw.get("envelope", {}).get("syncMessage", {}).get("sentMessage", {}).get("attachments", []) - if not attachments: - attachments = raw.get("envelope", {}).get("dataMessage", {}).get("attachments", []) - attachment_list = [] - for attachment in attachments: - attachment_list.append({ - "id": attachment["id"], - "content_type": attachment["contentType"], - "filename": attachment["filename"], - "size": attachment["size"], - "width": attachment.get("width"), - "height": attachment.get("height"), - }) + # Handle attachments across multiple Signal payload variants. + attachment_list = _extract_attachments(raw) # Get users/person identifiers for this Signal sender/recipient. identifiers = await sync_to_async(list)( @@ -160,9 +234,16 @@ class HandleMessage(Command): xmpp_attachments = [] # Asynchronously fetch all attachments - tasks = [signalapi.fetch_signal_attachment(att["id"]) for att in attachment_list] - fetched_attachments = await asyncio.gather(*tasks) log.info(f"ATTACHMENT LIST {attachment_list}") + if attachment_list: + tasks = [ + signalapi.fetch_signal_attachment(att["id"]) for att in attachment_list + ] + fetched_attachments = await asyncio.gather(*tasks) + else: + envelope = raw.get("envelope", {}) + log.info(f"No attachments found. Envelope keys: {list(envelope.keys())}") + fetched_attachments = [] for fetched, att in zip(fetched_attachments, attachment_list): if not fetched: @@ -170,12 +251,14 @@ class HandleMessage(Command): continue # Attach fetched file to XMPP - xmpp_attachments.append({ - "content": fetched["content"], - "content_type": fetched["content_type"], - "filename": fetched["filename"], - "size": fetched["size"], - }) + xmpp_attachments.append( + { + "content": fetched["content"], + "content_type": fetched["content_type"], + "filename": fetched["filename"], + "size": fetched["size"], + } + ) # Forward incoming Signal messages to XMPP and apply mutate rules. for identifier in identifiers: @@ -200,7 +283,9 @@ class HandleMessage(Command): ) log.info("Running Signal mutate prompt") result = await ai.run_prompt(prompt, manip.ai) - log.info(f"Sending {len(xmpp_attachments)} attachments from Signal to XMPP.") + log.info( + f"Sending {len(xmpp_attachments)} attachments from Signal to XMPP." + ) await self.ur.xmpp.client.send_from_external( user, identifier, @@ -209,7 +294,9 @@ class HandleMessage(Command): attachments=xmpp_attachments, ) else: - log.info(f"Sending {len(xmpp_attachments)} attachments from Signal to XMPP.") + log.info( + f"Sending {len(xmpp_attachments)} attachments from Signal to XMPP." + ) await self.ur.xmpp.client.send_from_external( user, identifier, @@ -219,9 +306,7 @@ class HandleMessage(Command): ) # TODO: Permission checks - manips = await sync_to_async(list)( - Manipulation.objects.filter(enabled=True) - ) + manips = await sync_to_async(list)(Manipulation.objects.filter(enabled=True)) session_cache = {} stored_messages = set() for manip in manips: @@ -233,7 +318,9 @@ class HandleMessage(Command): person__in=manip.group.people.all(), ) except PersonIdentifier.DoesNotExist: - log.warning(f"{manip.name}: Message from unknown identifier {identifier_uuid}.") + log.warning( + f"{manip.name}: Message from unknown identifier {identifier_uuid}." + ) continue # Find/create ChatSession once per user/person. @@ -241,7 +328,9 @@ class HandleMessage(Command): if session_key in session_cache: chat_session = session_cache[session_key] else: - chat_session = await history.get_chat_session(manip.user, person_identifier) + chat_session = await history.get_chat_session( + manip.user, person_identifier + ) session_cache[session_key] = chat_session # Store each incoming/outgoing event once per session. @@ -270,10 +359,7 @@ class HandleMessage(Command): elif manip.mode in ["active", "notify", "instant"]: await utils.update_last_interaction(chat_session) prompt = replies.generate_reply_prompt( - msg, - person_identifier.person, - manip, - chat_history + msg, person_identifier.person, manip, chat_history ) log.info("Running context prompt") @@ -307,14 +393,13 @@ class HandleMessage(Command): custom_author="BOT", ) - await delete_messages(existing_queue) + await history.delete_queryset(existing_queue) qm = await history.store_own_message( session=chat_session, text=result, ts=ts + 1, manip=manip, queue=True, - ) accept = reverse( "message_accept_api", kwargs={"message_id": qm.id} @@ -333,9 +418,6 @@ class HandleMessage(Command): else: log.error(f"Mode {manip.mode} is not implemented") - # Manage truncation & summarization - await truncate_and_summarize(chat_session, manip.ai) - await sync_to_async(Chat.objects.update_or_create)( source_uuid=source_uuid, defaults={ @@ -353,9 +435,10 @@ class SignalClient(ClientBase): ur, self.service, { - "signal_service": SIGNAL_URL, - "phone_number": "+447490296227", - }) + "signal_service": SIGNAL_URL, + "phone_number": "+447490296227", + }, + ) self.client.register(HandleMessage(self.ur, self.service)) diff --git a/core/clients/signalapi.py b/core/clients/signalapi.py index 9348bce..6a38923 100644 --- a/core/clients/signalapi.py +++ b/core/clients/signalapi.py @@ -1,12 +1,12 @@ -from rest_framework import status - -import requests -from requests.exceptions import RequestException -import orjson -from django.conf import settings -import aiohttp -import base64 import asyncio +import base64 + +import aiohttp +import orjson +import requests +from django.conf import settings +from requests.exceptions import RequestException +from rest_framework import status async def start_typing(uuid): @@ -18,6 +18,7 @@ async def start_typing(uuid): async with session.put(url, json=data) as response: return await response.text() # Optional: Return response content + async def stop_typing(uuid): base = getattr(settings, "SIGNAL_HTTP_URL", "http://signal:8080").rstrip("/") url = f"{base}/v1/typing_indicator/{settings.SIGNAL_NUMBER}" @@ -27,6 +28,7 @@ async def stop_typing(uuid): async with session.delete(url, json=data) as response: return await response.text() # Optional: Return response content + async def download_and_encode_base64(file_url, filename, content_type): """ Downloads a file from a given URL asynchronously, converts it to Base64, @@ -51,12 +53,15 @@ async def download_and_encode_base64(file_url, filename, content_type): base64_encoded = base64.b64encode(file_data).decode("utf-8") # Format according to Signal's expected structure - return f"data:{content_type};filename={filename};base64,{base64_encoded}" + return ( + f"data:{content_type};filename={filename};base64,{base64_encoded}" + ) except aiohttp.ClientError as e: # log.error(f"Failed to download file: {file_url}, error: {e}") return None + async def send_message_raw(recipient_uuid, text=None, attachments=[]): """ Sends a message using the Signal REST API, ensuring attachment links are not included in the text body. @@ -75,11 +80,14 @@ async def send_message_raw(recipient_uuid, text=None, attachments=[]): data = { "recipients": [recipient_uuid], "number": settings.SIGNAL_NUMBER, - "base64_attachments": [] + "base64_attachments": [], } # Asynchronously download and encode all attachments - tasks = [download_and_encode_base64(att["url"], att["filename"], att["content_type"]) for att in attachments] + tasks = [ + download_and_encode_base64(att["url"], att["filename"], att["content_type"]) + for att in attachments + ] encoded_attachments = await asyncio.gather(*tasks) # Filter out failed downloads (None values) @@ -87,7 +95,7 @@ async def send_message_raw(recipient_uuid, text=None, attachments=[]): # Remove the message body if it only contains an attachment link if text and (text.strip() in [att["url"] for att in attachments]): - #log.info("Removing message body since it only contains an attachment link.") + # log.info("Removing message body since it only contains an attachment link.") text = None # Don't send the link as text if text: @@ -103,6 +111,7 @@ async def send_message_raw(recipient_uuid, text=None, attachments=[]): return ts if ts else False return False + async def fetch_signal_attachment(attachment_id): """ Asynchronously fetches an attachment from Signal. @@ -111,7 +120,7 @@ async def fetch_signal_attachment(attachment_id): attachment_id (str): The Signal attachment ID. Returns: - dict | None: + dict | None: { "content": , "content_type": , @@ -128,7 +137,9 @@ async def fetch_signal_attachment(attachment_id): if response.status != 200: return None # Failed request - content_type = response.headers.get("Content-Type", "application/octet-stream") + content_type = response.headers.get( + "Content-Type", "application/octet-stream" + ) content = await response.read() size = int(response.headers.get("Content-Length", len(content))) @@ -150,7 +161,6 @@ async def fetch_signal_attachment(attachment_id): return None # Network error - def download_and_encode_base64_sync(file_url, filename, content_type): """ Downloads a file from a given URL, converts it to Base64, and returns it in Signal's expected format. @@ -173,7 +183,7 @@ def download_and_encode_base64_sync(file_url, filename, content_type): # Format according to Signal's expected structure return f"data:{content_type};filename={filename};base64,{base64_encoded}" except requests.RequestException as e: - #log.error(f"Failed to download file: {file_url}, error: {e}") + # log.error(f"Failed to download file: {file_url}, error: {e}") return None @@ -193,18 +203,20 @@ def send_message_raw_sync(recipient_uuid, text=None, attachments=[]): data = { "recipients": [recipient_uuid], "number": settings.SIGNAL_NUMBER, - "base64_attachments": [] + "base64_attachments": [], } # Convert attachments to Base64 for att in attachments: - base64_data = download_and_encode_base64_sync(att["url"], att["filename"], att["content_type"]) + base64_data = download_and_encode_base64_sync( + att["url"], att["filename"], att["content_type"] + ) if base64_data: data["base64_attachments"].append(base64_data) # Remove the message body if it only contains an attachment link if text and (text.strip() in [att["url"] for att in attachments]): - #log.info("Removing message body since it only contains an attachment link.") + # log.info("Removing message body since it only contains an attachment link.") text = None # Don't send the link as text if text: @@ -214,10 +226,12 @@ def send_message_raw_sync(recipient_uuid, text=None, attachments=[]): response = requests.post(url, json=data, timeout=10) response.raise_for_status() except requests.RequestException as e: - #log.error(f"Failed to send Signal message: {e}") + # log.error(f"Failed to send Signal message: {e}") return False - if response.status_code == status.HTTP_201_CREATED: # Signal server returns 201 on success + if ( + response.status_code == status.HTTP_201_CREATED + ): # Signal server returns 201 on success try: ts = orjson.loads(response.text).get("timestamp", None) return ts if ts else False diff --git a/core/clients/xmpp.py b/core/clients/xmpp.py index c7fd767..cd4a791 100644 --- a/core/clients/xmpp.py +++ b/core/clients/xmpp.py @@ -1,20 +1,30 @@ -from core.clients import ClientBase -from django.conf import settings -from slixmpp.componentxmpp import ComponentXMPP -from django.conf import settings -from core.models import User, Person, PersonIdentifier, ChatSession, Manipulation -from asgiref.sync import sync_to_async -from django.utils.timezone import now import asyncio -from core.clients import signalapi -from slixmpp.xmlstream import register_stanza_plugin -from slixmpp.plugins.xep_0085.stanza import Active, Composing, Paused, Inactive, Gone -from slixmpp.stanza import Message -from slixmpp.xmlstream.stanzabase import ET + import aiohttp -from core.messaging import history +from asgiref.sync import sync_to_async +from django.conf import settings +from django.utils.timezone import now +from slixmpp.componentxmpp import ComponentXMPP +from slixmpp.plugins.xep_0085.stanza import Active, Composing, Gone, Inactive, Paused +from slixmpp.stanza import Message +from slixmpp.xmlstream import register_stanza_plugin +from slixmpp.xmlstream.stanzabase import ET + +from core.clients import ClientBase, signalapi +from core.messaging import ai, history, replies, utils +from core.models import ( + ChatSession, + Manipulation, + PatternMitigationAutoSettings, + PatternMitigationGame, + PatternMitigationPlan, + PatternMitigationRule, + Person, + PersonIdentifier, + User, + WorkspaceConversation, +) from core.util import logs -from core.messaging import replies, utils, ai class XMPPComponent(ComponentXMPP): @@ -51,7 +61,9 @@ class XMPPComponent(ComponentXMPP): self.add_event_handler("presence_subscribed", self.on_presence_subscribed) self.add_event_handler("presence_unsubscribe", self.on_presence_unsubscribe) self.add_event_handler("presence_unsubscribed", self.on_presence_unsubscribed) - self.add_event_handler("roster_subscription_request", self.on_roster_subscription_request) + self.add_event_handler( + "roster_subscription_request", self.on_roster_subscription_request + ) # Chat state handlers self.add_event_handler("chatstate_active", self.on_chatstate_active) @@ -73,13 +85,15 @@ class XMPPComponent(ComponentXMPP): def get_identifier(self, msg): # Extract sender JID (full format: user@domain/resource) sender_jid = str(msg["from"]) - + # Split into username@domain and optional resource sender_parts = sender_jid.split("/", 1) sender_bare_jid = sender_parts[0] # Always present: user@domain sender_username, sender_domain = sender_bare_jid.split("@", 1) - - sender_resource = sender_parts[1] if len(sender_parts) > 1 else None # Extract resource if present + + sender_resource = ( + sender_parts[1] if len(sender_parts) > 1 else None + ) # Extract resource if present # Extract recipient JID (should match component JID format) recipient_jid = str(msg["to"]) @@ -100,7 +114,6 @@ class XMPPComponent(ComponentXMPP): person_name = recipient_username.title() service = None - try: # Lookup user in Django self.log.info(f"User {sender_username}") @@ -112,22 +125,255 @@ class XMPPComponent(ComponentXMPP): # Ensure a PersonIdentifier exists for this user, person, and service self.log.info(f"Identifier {service}") - identifier = PersonIdentifier.objects.get(user=user, person=person, service=service) - + identifier = PersonIdentifier.objects.get( + user=user, person=person, service=service + ) return identifier - - except (User.DoesNotExist, Person.DoesNotExist, PersonIdentifier.DoesNotExist): - # If any lookup fails, reject the subscription + except Exception as e: + self.log.error(f"Failed to resolve identifier from XMPP message: {e}") return None + def _get_workspace_conversation(self, user, person): + conversation, _ = WorkspaceConversation.objects.get_or_create( + user=user, + platform_type="signal", + title=f"{person.name} Workspace", + defaults={"platform_thread_id": str(person.id)}, + ) + conversation.participants.add(person) + return conversation + + def _get_or_create_plan(self, user, person): + conversation = self._get_workspace_conversation(user, person) + plan = conversation.mitigation_plans.order_by("-updated_at").first() + if plan is None: + plan = PatternMitigationPlan.objects.create( + user=user, + conversation=conversation, + title=f"{person.name} Pattern Mitigation", + objective="Mitigate repeated friction loops.", + fundamental_items=[], + creation_mode="guided", + status="draft", + ) + PatternMitigationRule.objects.create( + user=user, + plan=plan, + title="Safety Before Analysis", + content="Prioritize de-escalation before analysis.", + enabled=True, + ) + PatternMitigationGame.objects.create( + user=user, + plan=plan, + title="Two-Turn Pause", + instructions="Use two short turns then pause with a return time.", + enabled=True, + ) + return plan + + async def _handle_mitigation_command(self, sender_user, body, sym): + def parse_parts(raw): + return [part.strip() for part in raw.split("|")] + + command = body.strip() + if command == ".mitigation help": + sym( + "Mitigation commands: " + ".mitigation list | " + ".mitigation show | " + ".mitigation rule-add ||<content> | " + ".mitigation rule-del <person>|<title> | " + ".mitigation game-add <person>|<title>|<instructions> | " + ".mitigation game-del <person>|<title> | " + ".mitigation auto <person>|on|off | " + ".mitigation auto-status <person>" + ) + return True + + if command == ".mitigation list": + plans = await sync_to_async(list)( + PatternMitigationPlan.objects.filter(user=sender_user) + .select_related("conversation") + .order_by("-updated_at")[:15] + ) + if not plans: + sym("No mitigation plans found.") + return True + rows = [] + for plan in plans: + person_name = ( + plan.conversation.participants.order_by("name").first().name + if plan.conversation.participants.exists() + else "Unknown" + ) + rows.append(f"{person_name}: {plan.title}") + sym("Plans: " + " | ".join(rows)) + return True + + if command.startswith(".mitigation show "): + person_name = command.replace(".mitigation show ", "", 1).strip().title() + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + plan = await sync_to_async(self._get_or_create_plan)(sender_user, person) + rule_count = await sync_to_async(plan.rules.count)() + game_count = await sync_to_async(plan.games.count)() + sym(f"{person.name}: {plan.title} | rules={rule_count} games={game_count}") + return True + + if command.startswith(".mitigation rule-add "): + payload = command.replace(".mitigation rule-add ", "", 1) + parts = parse_parts(payload) + if len(parts) < 3: + sym("Usage: .mitigation rule-add <person>|<title>|<content>") + return True + person_name, title, content = parts[0].title(), parts[1], "|".join(parts[2:]) + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + plan = await sync_to_async(self._get_or_create_plan)(sender_user, person) + await sync_to_async(PatternMitigationRule.objects.create)( + user=sender_user, + plan=plan, + title=title[:255], + content=content, + enabled=True, + ) + sym("Rule added.") + return True + + if command.startswith(".mitigation rule-del "): + payload = command.replace(".mitigation rule-del ", "", 1) + parts = parse_parts(payload) + if len(parts) < 2: + sym("Usage: .mitigation rule-del <person>|<title>") + return True + person_name, title = parts[0].title(), "|".join(parts[1:]) + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + plan = await sync_to_async(self._get_or_create_plan)(sender_user, person) + deleted, _ = await sync_to_async( + lambda: PatternMitigationRule.objects.filter( + user=sender_user, + plan=plan, + title__iexact=title, + ).delete() + )() + sym("Rule deleted." if deleted else "Rule not found.") + return True + + if command.startswith(".mitigation game-add "): + payload = command.replace(".mitigation game-add ", "", 1) + parts = parse_parts(payload) + if len(parts) < 3: + sym("Usage: .mitigation game-add <person>|<title>|<instructions>") + return True + person_name, title, content = parts[0].title(), parts[1], "|".join(parts[2:]) + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + plan = await sync_to_async(self._get_or_create_plan)(sender_user, person) + await sync_to_async(PatternMitigationGame.objects.create)( + user=sender_user, + plan=plan, + title=title[:255], + instructions=content, + enabled=True, + ) + sym("Game added.") + return True + + if command.startswith(".mitigation game-del "): + payload = command.replace(".mitigation game-del ", "", 1) + parts = parse_parts(payload) + if len(parts) < 2: + sym("Usage: .mitigation game-del <person>|<title>") + return True + person_name, title = parts[0].title(), "|".join(parts[1:]) + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + plan = await sync_to_async(self._get_or_create_plan)(sender_user, person) + deleted, _ = await sync_to_async( + lambda: PatternMitigationGame.objects.filter( + user=sender_user, + plan=plan, + title__iexact=title, + ).delete() + )() + sym("Game deleted." if deleted else "Game not found.") + return True + + if command.startswith(".mitigation auto "): + payload = command.replace(".mitigation auto ", "", 1) + parts = parse_parts(payload) + if len(parts) < 2: + sym("Usage: .mitigation auto <person>|on|off") + return True + person_name, state = parts[0].title(), parts[1].lower() + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + conversation = await sync_to_async(self._get_workspace_conversation)(sender_user, person) + auto_obj, _ = await sync_to_async(PatternMitigationAutoSettings.objects.get_or_create)( + user=sender_user, + conversation=conversation, + ) + auto_obj.enabled = state in {"on", "true", "1", "yes"} + await sync_to_async(auto_obj.save)(update_fields=["enabled", "updated_at"]) + sym(f"Automation {'enabled' if auto_obj.enabled else 'disabled'} for {person.name}.") + return True + + if command.startswith(".mitigation auto-status "): + person_name = command.replace(".mitigation auto-status ", "", 1).strip().title() + person = await sync_to_async( + lambda: Person.objects.filter(user=sender_user, name__iexact=person_name).first() + )() + if not person: + sym("Unknown person.") + return True + conversation = await sync_to_async(self._get_workspace_conversation)(sender_user, person) + auto_obj, _ = await sync_to_async(PatternMitigationAutoSettings.objects.get_or_create)( + user=sender_user, + conversation=conversation, + ) + sym( + f"{person.name}: auto={'on' if auto_obj.enabled else 'off'}, " + f"pattern={'on' if auto_obj.auto_pattern_recognition else 'off'}, " + f"corrections={'on' if auto_obj.auto_create_corrections else 'off'}" + ) + return True + + return False + def update_roster(self, jid, name=None): """ Adds or updates a user in the roster. """ iq = self.Iq() - iq['type'] = 'set' - iq['roster']['items'] = {jid: {'name': name or jid}} - + iq["type"] = "set" + iq["roster"]["items"] = {jid: {"name": name or jid}} + iq.send() self.log.info(f"Updated roster: Added {jid} ({name})") @@ -171,7 +417,6 @@ class XMPPComponent(ComponentXMPP): identifier = self.get_identifier(msg) - def on_presence_available(self, pres): """ Handle when a user becomes available. @@ -214,10 +459,12 @@ class XMPPComponent(ComponentXMPP): Accept only if the recipient has a contact matching the sender. """ - sender_jid = str(pres['from']).split('/')[0] # Bare JID (user@domain) - recipient_jid = str(pres['to']).split('/')[0] + sender_jid = str(pres["from"]).split("/")[0] # Bare JID (user@domain) + recipient_jid = str(pres["to"]).split("/")[0] - self.log.info(f"Received subscription request from {sender_jid} to {recipient_jid}") + self.log.info( + f"Received subscription request from {sender_jid} to {recipient_jid}" + ) try: # Extract sender and recipient usernames @@ -248,7 +495,9 @@ class XMPPComponent(ComponentXMPP): # Accept the subscription self.send_presence(ptype="subscribed", pto=sender_jid, pfrom=component_jid) - self.log.info(f"Accepted subscription from {sender_jid}, sent from {component_jid}") + self.log.info( + f"Accepted subscription from {sender_jid}, sent from {component_jid}" + ) # Send a presence request **from the recipient to the sender** (ASKS THEM TO ACCEPT BACK) # self.send_presence(ptype="subscribe", pto=sender_jid, pfrom=component_jid) @@ -262,16 +511,16 @@ class XMPPComponent(ComponentXMPP): self.send_presence(ptype="available", pto=sender_jid, pfrom=component_jid) self.log.info(f"Sent presence update from {component_jid} to {sender_jid}") - except (User.DoesNotExist, Person.DoesNotExist, PersonIdentifier.DoesNotExist): # If any lookup fails, reject the subscription - self.log.warning(f"Subscription request from {sender_jid} rejected (recipient does not have this contact).") + self.log.warning( + f"Subscription request from {sender_jid} rejected (recipient does not have this contact)." + ) self.send_presence(ptype="unsubscribed", pto=sender_jid) except ValueError: return - def on_presence_subscribed(self, pres): """ Handle successful subscription confirmations. @@ -325,16 +574,16 @@ class XMPPComponent(ComponentXMPP): # self.log.error("No XEP-0363 upload service found.") # return None - #self.log.info(f"Upload service: {upload_service}") + # self.log.info(f"Upload service: {upload_service}") upload_service_jid = "share.zm.is" try: - slot = await self['xep_0363'].request_slot( + slot = await self["xep_0363"].request_slot( jid=upload_service_jid, filename=filename, content_type=content_type, - size=size + size=size, ) if slot is None: @@ -350,8 +599,12 @@ class XMPPComponent(ComponentXMPP): put_url = put_element.attrib.get("url") # Extract the Authorization header correctly - header_element = put_element.find(f"./{namespace}header[@name='Authorization']") - auth_header = header_element.text.strip() if header_element is not None else None + header_element = put_element.find( + f"./{namespace}header[@name='Authorization']" + ) + auth_header = ( + header_element.text.strip() if header_element is not None else None + ) if not get_url or not put_url: self.log.error(f"Missing URLs in upload slot: {slot}") @@ -363,7 +616,6 @@ class XMPPComponent(ComponentXMPP): self.log.error(f"Exception while requesting upload slot: {e}") return None - async def message(self, msg): """ Process incoming XMPP messages. @@ -374,13 +626,15 @@ class XMPPComponent(ComponentXMPP): # Extract sender JID (full format: user@domain/resource) sender_jid = str(msg["from"]) - + # Split into username@domain and optional resource sender_parts = sender_jid.split("/", 1) sender_bare_jid = sender_parts[0] # Always present: user@domain sender_username, sender_domain = sender_bare_jid.split("@", 1) - - sender_resource = sender_parts[1] if len(sender_parts) > 1 else None # Extract resource if present + + sender_resource = ( + sender_parts[1] if len(sender_parts) > 1 else None + ) # Extract resource if present # Extract recipient JID (should match component JID format) recipient_jid = str(msg["to"]) @@ -399,19 +653,23 @@ class XMPPComponent(ComponentXMPP): # Extract attachments from standard XMPP <attachments> (if present) for att in msg.xml.findall(".//{urn:xmpp:attachments}attachment"): - attachments.append({ - "url": att.attrib.get("url"), - "filename": att.attrib.get("filename"), - "content_type": att.attrib.get("content_type"), - }) + attachments.append( + { + "url": att.attrib.get("url"), + "filename": att.attrib.get("filename"), + "content_type": att.attrib.get("content_type"), + } + ) # Extract attachments from XEP-0066 <x><url> format (Out of Band Data) for oob in msg.xml.findall(".//{jabber:x:oob}x/{jabber:x:oob}url"): - attachments.append({ - "url": oob.text, - "filename": oob.text.split("/")[-1], # Extract filename from URL - "content_type": "application/octet-stream", # Generic content-type - }) + attachments.append( + { + "url": oob.text, + "filename": oob.text.split("/")[-1], # Extract filename from URL + "content_type": "application/octet-stream", # Generic content-type + } + ) self.log.info(f"Extracted {len(attachments)} attachments from XMPP message.") # Log extracted information with variable name annotations @@ -426,7 +684,9 @@ class XMPPComponent(ComponentXMPP): # Ensure recipient domain matches our configured component expected_domain = settings.XMPP_JID # 'jews.zm.is' in your config if recipient_domain != expected_domain: - self.log.warning(f"Invalid recipient domain: {recipient_domain}, expected {expected_domain}") + self.log.warning( + f"Invalid recipient domain: {recipient_domain}, expected {expected_domain}" + ) return # Lookup sender in Django's User model @@ -452,6 +712,16 @@ class XMPPComponent(ComponentXMPP): contact_names = [person.name for person in persons] response_text = f"Contacts: " + ", ".join(contact_names) sym(response_text) + elif body == ".help": + sym("Commands: .contacts, .whoami, .mitigation help") + elif body.startswith(".mitigation"): + handled = await self._handle_mitigation_command( + sender_user, + body, + sym, + ) + if not handled: + sym("Unknown mitigation command. Try .mitigation help") elif body == ".whoami": sym(str(sender_user.__dict__)) else: @@ -468,7 +738,7 @@ class XMPPComponent(ComponentXMPP): recipient_service = None recipient_name = recipient_name.title() - + try: person = Person.objects.get(user=sender_user, name=recipient_name) except Person.DoesNotExist: @@ -476,21 +746,22 @@ class XMPPComponent(ComponentXMPP): if recipient_service: try: - identifier = PersonIdentifier.objects.get(user=sender_user, - person=person, - service=recipient_service) + identifier = PersonIdentifier.objects.get( + user=sender_user, person=person, service=recipient_service + ) except PersonIdentifier.DoesNotExist: sym("This service identifier does not exist.") else: # Get a random identifier - identifier = PersonIdentifier.objects.filter(user=sender_user, - person=person).first() + identifier = PersonIdentifier.objects.filter( + user=sender_user, person=person + ).first() recipient_service = identifier.service # sym(str(person.__dict__)) # sym(f"Service: {recipient_service}") - #tss = await identifier.send(body, attachments=attachments) + # tss = await identifier.send(body, attachments=attachments) # AM FIXING https://git.zm.is/XF/GIA/issues/5 session, _ = await sync_to_async(ChatSession.objects.get_or_create)( identifier=identifier, @@ -502,7 +773,7 @@ class XMPPComponent(ComponentXMPP): sender="XMPP", text=body, ts=int(now().timestamp() * 1000), - #outgoing=detail.is_outgoing_message, ????????? TODO: + # outgoing=detail.is_outgoing_message, ????????? TODO: ) self.log.info("Stored a message sent from XMPP in the history.") @@ -526,11 +797,11 @@ class XMPPComponent(ComponentXMPP): chat_history = await history.get_chat_history(session) await utils.update_last_interaction(session) prompt = replies.generate_mutate_reply_prompt( - body, - identifier.person, - manip, - chat_history, - ) + body, + identifier.person, + manip, + chat_history, + ) self.log.info("Running XMPP context prompt") result = await ai.run_prompt(prompt, manip.ai) self.log.info(f"RESULT {result}") @@ -546,17 +817,21 @@ class XMPPComponent(ComponentXMPP): ) self.log.info(f"Message sent with modifications") - - async def request_upload_slots(self, recipient_jid, attachments): """Requests upload slots for multiple attachments concurrently.""" upload_tasks = [ - self.request_upload_slot(recipient_jid, att["filename"], att["content_type"], att["size"]) + self.request_upload_slot( + recipient_jid, att["filename"], att["content_type"], att["size"] + ) for att in attachments ] upload_slots = await asyncio.gather(*upload_tasks) - return [(att, slot) for att, slot in zip(attachments, upload_slots) if slot is not None] + return [ + (att, slot) + for att, slot in zip(attachments, upload_slots) + if slot is not None + ] async def upload_and_send(self, att, upload_slot, recipient_jid, sender_jid): """Uploads a file and immediately sends the corresponding XMPP message.""" @@ -567,19 +842,29 @@ class XMPPComponent(ComponentXMPP): async with aiohttp.ClientSession() as session: try: - async with session.put(put_url, data=att["content"], headers=headers) as response: + async with session.put( + put_url, data=att["content"], headers=headers + ) as response: if response.status not in (200, 201): - self.log.error(f"Upload failed: {response.status} {await response.text()}") + self.log.error( + f"Upload failed: {response.status} {await response.text()}" + ) return - self.log.info(f"Successfully uploaded {att['filename']} to {upload_url}") + self.log.info( + f"Successfully uploaded {att['filename']} to {upload_url}" + ) # Send XMPP message immediately after successful upload - await self.send_xmpp_message(recipient_jid, sender_jid, upload_url, attachment_url=upload_url) + await self.send_xmpp_message( + recipient_jid, sender_jid, upload_url, attachment_url=upload_url + ) except Exception as e: self.log.error(f"Error uploading {att['filename']} to XMPP: {e}") - async def send_xmpp_message(self, recipient_jid, sender_jid, body_text, attachment_url=None): + async def send_xmpp_message( + self, recipient_jid, sender_jid, body_text, attachment_url=None + ): """Sends an XMPP message with either text or an attachment URL.""" msg = self.make_message(mto=recipient_jid, mfrom=sender_jid, mtype="chat") msg["body"] = body_text # Body must contain only text or the URL @@ -594,7 +879,9 @@ class XMPPComponent(ComponentXMPP): self.log.info(f"Sending XMPP message: {msg.xml}") msg.send() - async def send_from_external(self, user, person_identifier, text, is_outgoing_message, attachments=[]): + async def send_from_external( + self, user, person_identifier, text, is_outgoing_message, attachments=[] + ): """Handles sending XMPP messages with text and attachments.""" sender_jid = f"{person_identifier.person.name.lower()}|{person_identifier.service}@{settings.XMPP_JID}" @@ -614,11 +901,12 @@ class XMPPComponent(ComponentXMPP): self.log.info(f"Got upload slots") if not valid_uploads: self.log.warning("No valid upload slots obtained.") - #return + # return # Step 3: Upload each file and send its message immediately after upload upload_tasks = [ - self.upload_and_send(att, slot, recipient_jid, sender_jid) for att, slot in valid_uploads + self.upload_and_send(att, slot, recipient_jid, sender_jid) + for att, slot in valid_uploads ] await asyncio.gather(*upload_tasks) # Upload files concurrently @@ -634,12 +922,12 @@ class XMPPClient(ClientBase): port=settings.XMPP_PORT, ) - self.client.register_plugin('xep_0030') # Service Discovery - self.client.register_plugin('xep_0004') # Data Forms - self.client.register_plugin('xep_0060') # PubSub - self.client.register_plugin('xep_0199') # XMPP Ping + self.client.register_plugin("xep_0030") # Service Discovery + self.client.register_plugin("xep_0004") # Data Forms + self.client.register_plugin("xep_0060") # PubSub + self.client.register_plugin("xep_0199") # XMPP Ping self.client.register_plugin("xep_0085") # Chat State Notifications - self.client.register_plugin('xep_0363') # HTTP File Upload + self.client.register_plugin("xep_0363") # HTTP File Upload def start(self): self.log.info("XMPP client starting...") @@ -648,4 +936,4 @@ class XMPPClient(ClientBase): self.client.loop = self.loop self.client.connect() - #self.client.process() \ No newline at end of file + # self.client.process() diff --git a/core/db/sql.py b/core/db/sql.py index 1c74112..3cf487e 100644 --- a/core/db/sql.py +++ b/core/db/sql.py @@ -1,26 +1,25 @@ import aiomysql -from core.util import logs from core.schemas import mc_s +from core.util import logs mysql_pool = None log = logs.get_logger("sql") DB_URL = "giadb" + + async def init_mysql_pool(): """ Initialize the MySQL connection pool. """ global mysql_pool mysql_pool = await aiomysql.create_pool( - host=DB_URL, - port=9306, - db="Manticore", - minsize=1, - maxsize=10 + host=DB_URL, port=9306, db="Manticore", minsize=1, maxsize=10 ) + async def close_mysql_pool(): """Close the MySQL connection pool properly.""" global mysql_pool @@ -42,11 +41,9 @@ async def create_index(): for name, schema in schemas.items(): schema_types = ", ".join([f"{k} {v}" for k, v in schema.items()]) - create_query = ( - f"create table if not exists {name}({schema_types}) engine='columnar'" - ) + create_query = f"create table if not exists {name}({schema_types}) engine='columnar'" log.info(f"Schema types {create_query}") - await cur.execute(create_query) # SQLi + await cur.execute(create_query) # SQLi except aiomysql.Error as e: log.error(f"MySQL error: {e}") @@ -60,4 +57,4 @@ async def main(): created = True except Exception as e: log.error(f"Error creating index: {e}") - await asyncio.sleep(1) # Block the thread, just wait for the DB \ No newline at end of file + await asyncio.sleep(1) # Block the thread, just wait for the DB diff --git a/core/forms.py b/core/forms.py index c0ad540..2ff4d4d 100644 --- a/core/forms.py +++ b/core/forms.py @@ -3,7 +3,19 @@ from django.contrib.auth.forms import UserCreationForm from django.forms import ModelForm from mixins.restrictions import RestrictedFormMixin -from .models import NotificationSettings, User, AI, PersonIdentifier, Person, Group, Persona, Manipulation, ChatSession, Message, QueuedMessage +from .models import ( + AI, + ChatSession, + Group, + Manipulation, + Message, + NotificationSettings, + Person, + Persona, + PersonIdentifier, + QueuedMessage, + User, +) # Create your forms here. @@ -48,6 +60,7 @@ class CustomUserCreationForm(UserCreationForm): model = User fields = "__all__" + class AIForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = AI @@ -61,6 +74,7 @@ class AIForm(RestrictedFormMixin, forms.ModelForm): "model": "Select the AI model to be used.", } + class PersonIdentifierForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = PersonIdentifier @@ -70,10 +84,21 @@ class PersonIdentifierForm(RestrictedFormMixin, forms.ModelForm): "service": "The platform associated with this identifier (e.g., Signal, Instagram).", } + class PersonForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = Person - fields = ("name", "summary", "profile", "revealed", "dislikes", "likes", "sentiment", "timezone", "last_interaction") + fields = ( + "name", + "summary", + "profile", + "revealed", + "dislikes", + "likes", + "sentiment", + "timezone", + "last_interaction", + ) help_texts = { "name": "The full name of the person.", "summary": "A brief summary or description of this person.", @@ -86,6 +111,7 @@ class PersonForm(RestrictedFormMixin, forms.ModelForm): "last_interaction": "The date and time of the last recorded interaction.", } + class GroupForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = Group @@ -94,6 +120,7 @@ class GroupForm(RestrictedFormMixin, forms.ModelForm): "name": "The name of the group.", "people": "People who are part of this group.", } + people = forms.ModelMultipleChoiceField( queryset=Person.objects.all(), widget=forms.CheckboxSelectMultiple, @@ -101,13 +128,27 @@ class GroupForm(RestrictedFormMixin, forms.ModelForm): required=False, ) + class PersonaForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = Persona fields = ( - "alias", "mbti", "mbti_identity", "inner_story", "core_values", "communication_style", - "flirting_style", "humor_style", "likes", "dislikes", "tone", - "response_tactics", "persuasion_tactics", "boundaries", "trust", "adaptability" + "alias", + "mbti", + "mbti_identity", + "inner_story", + "core_values", + "communication_style", + "flirting_style", + "humor_style", + "likes", + "dislikes", + "tone", + "response_tactics", + "persuasion_tactics", + "boundaries", + "trust", + "adaptability", ) help_texts = { "alias": "The preferred name or identity for this persona.", @@ -128,6 +169,7 @@ class PersonaForm(RestrictedFormMixin, forms.ModelForm): "adaptability": "How easily this persona shifts tones or styles (0-100).", } + class ManipulationForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = Manipulation @@ -135,7 +177,7 @@ class ManipulationForm(RestrictedFormMixin, forms.ModelForm): help_texts = { "name": "The name of this manipulation strategy.", "group": "The group involved in this manipulation strategy.", - #"self": "Group for own UUIDs.", + # "self": "Group for own UUIDs.", "ai": "The AI associated with this manipulation.", "persona": "The persona used for this manipulation.", "enabled": "Whether this manipulation is enabled.", @@ -153,6 +195,7 @@ class SessionForm(RestrictedFormMixin, forms.ModelForm): "summary": "Summary of chat transcript.", } + class MessageForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = Message @@ -164,6 +207,7 @@ class MessageForm(RestrictedFormMixin, forms.ModelForm): "custom_author": "For detecting USER and BOT messages.", } + class QueueForm(RestrictedFormMixin, forms.ModelForm): class Meta: model = QueuedMessage @@ -172,4 +216,20 @@ class QueueForm(RestrictedFormMixin, forms.ModelForm): "session": "Chat session this message will be sent in.", "manipulation": "Manipulation that generated the message.", "text": "Content of the proposed message.", - } \ No newline at end of file + } + + +class AIWorkspaceWindowForm(forms.Form): + """Controls the message window size for AI workspace previews.""" + + limit = forms.ChoiceField( + choices=( + ("20", "Last 20"), + ("50", "Last 50"), + ("100", "Last 100"), + ), + initial="20", + required=True, + help_text="How many most-recent messages to load for the selected person.", + widget=forms.Select(attrs={"class": "is-fullwidth"}), + ) diff --git a/core/lib/deferred.py b/core/lib/deferred.py index 45d2f5c..270c18f 100644 --- a/core/lib/deferred.py +++ b/core/lib/deferred.py @@ -1,26 +1,26 @@ # Deferred processing library -from core.util import logs -from pydantic import BaseModel +import asyncio from typing import Annotated, Optional from uuid import UUID -from pydantic import ValidationError -from core.models import QueuedMessage, Message, PersonIdentifier, User -from core.clients import signal -from core.lib.prompts.functions import delete_messages + from asgiref.sync import sync_to_async from django.conf import settings -from core.clients import signalapi -import asyncio - +from pydantic import BaseModel, ValidationError +from core.clients import signal, signalapi +from core.lib.prompts.functions import delete_messages +from core.models import Message, PersonIdentifier, QueuedMessage, User +from core.util import logs log = logs.get_logger("deferred") + class DeferredDetail(BaseModel): reply_to_self: bool reply_to_others: bool is_outgoing_message: bool + class DeferredRequest(BaseModel): type: str method: str @@ -32,33 +32,35 @@ class DeferredRequest(BaseModel): detail: Optional[DeferredDetail] = None attachments: Optional[list] = None + async def send_message(db_obj): - recipient_uuid = db_obj.session.identifier.identifier - text = db_obj.text + recipient_uuid = db_obj.session.identifier.identifier + text = db_obj.text - send = lambda x: signalapi.send_message_raw(recipient_uuid, x) # returns ts - start_t = lambda: signalapi.start_typing(recipient_uuid) - stop_t = lambda: signalapi.stop_typing(recipient_uuid) + send = lambda x: signalapi.send_message_raw(recipient_uuid, x) # returns ts + start_t = lambda: signalapi.start_typing(recipient_uuid) + stop_t = lambda: signalapi.stop_typing(recipient_uuid) + + tss = await natural.natural_send_message( + text, + send, + start_t, + stop_t, + ) # list of ts + # result = await send_message_raw(recipient_uuid, text) + await sync_to_async(db_obj.delete)() + result = [x for x in tss if x] # all trueish ts + if result: # if at least one message was sent + ts1 = result.pop() # pick a time + log.info(f"signal message create {text}") + await sync_to_async(Message.objects.create)( + user=db_obj.session.user, + session=db_obj.session, + custom_author="BOT", + text=text, + ts=ts1, # use that time in db + ) - tss = await natural.natural_send_message( - text, - send, - start_t, - stop_t, - ) # list of ts - #result = await send_message_raw(recipient_uuid, text) - await sync_to_async(db_obj.delete)() - result = [x for x in tss if x] # all trueish ts - if result: # if at least one message was sent - ts1 = result.pop() # pick a time - log.info(f"signal message create {text}") - await sync_to_async(Message.objects.create)( - user=db_obj.session.user, - session=db_obj.session, - custom_author="BOT", - text=text, - ts=ts1, # use that time in db - ) async def process_deferred(data: dict, **kwargs): try: @@ -68,12 +70,11 @@ async def process_deferred(data: dict, **kwargs): except ValidationError as e: log.info(f"Validation Error: {e}") return - + method = validated_data.method user_id = validated_data.user_id message_id = validated_data.message_id - if method == "accept_message": try: message = await sync_to_async(QueuedMessage.objects.get)( @@ -91,7 +92,7 @@ async def process_deferred(data: dict, **kwargs): else: log.warning(f"Protocol not supported: {message.session.identifier.service}") return - elif method == "xmpp": # send xmpp message + elif method == "xmpp": # send xmpp message xmpp = kwargs.get("xmpp") service = validated_data.service msg = validated_data.msg @@ -115,18 +116,28 @@ async def process_deferred(data: dict, **kwargs): continue # Attach fetched file to XMPP - xmpp_attachments.append({ - "content": fetched["content"], - "content_type": fetched["content_type"], - "filename": fetched["filename"], - "size": fetched["size"], - }) + xmpp_attachments.append( + { + "content": fetched["content"], + "content_type": fetched["content_type"], + "filename": fetched["filename"], + "size": fetched["size"], + } + ) for identifier in identifiers: - #recipient_jid = f"{identifier.user.username}@{settings.XMPP_ADDRESS}" + # recipient_jid = f"{identifier.user.username}@{settings.XMPP_ADDRESS}" user = identifier.user - log.info(f"Sending {len(xmpp_attachments)} attachments from Signal to XMPP.") - await xmpp.send_from_external(user, identifier, msg, validated_data.detail, attachments=xmpp_attachments) + log.info( + f"Sending {len(xmpp_attachments)} attachments from Signal to XMPP." + ) + await xmpp.send_from_external( + user, + identifier, + msg, + validated_data.detail, + attachments=xmpp_attachments, + ) else: log.warning(f"Method not yet supported: {method}") - return \ No newline at end of file + return diff --git a/core/lib/prompts/bases.py b/core/lib/prompts/bases.py index c3e4844..7ceadb2 100644 --- a/core/lib/prompts/bases.py +++ b/core/lib/prompts/bases.py @@ -339,4 +339,4 @@ To make comments about being messaged late, keep in mind THEIR time zone. Contact: hi (their time zone is latvia and my current time is 22:30) Me: hi, it’s late there. what’s up? -""" \ No newline at end of file +""" diff --git a/core/lib/prompts/functions.py b/core/lib/prompts/functions.py index 5e20675..3322adb 100644 --- a/core/lib/prompts/functions.py +++ b/core/lib/prompts/functions.py @@ -1,147 +1,12 @@ -from core.lib.prompts import bases -from openai import AsyncOpenAI +""" +Prompt utility helpers. + +Legacy summarization-based history compaction was intentionally removed. +History is now preserved in storage and bounded only at prompt-build time. +""" + from asgiref.sync import sync_to_async -from core.models import Message, ChatSession, AI, Person, Manipulation -from core.util import logs -import json -from django.utils import timezone -from core.messaging import ai -from core.messaging.utils import messages_to_string - -SUMMARIZE_WHEN_EXCEEDING = 10 -SUMMARIZE_BY = 5 - -MAX_SUMMARIES = 3 # Keep last 5 summaries - -log = logs.get_logger("prompts") async def delete_messages(queryset): await sync_to_async(queryset.delete, thread_sensitive=True)() - -async def truncate_and_summarize( - chat_session: ChatSession, - ai_obj: AI, - ): - """ - Summarizes messages in chunks to prevent unchecked growth. - - Summarizes only non-summary messages. - - Deletes older summaries if too many exist. - - Ensures only messages belonging to `chat_session.user` are modified. - """ - user = chat_session.user # Store the user for ownership checks - - # 🔹 Get non-summary messages owned by the session's user - messages = await sync_to_async(list)( - Message.objects.filter(session=chat_session, user=user) - .exclude(custom_author="SUM") - .order_by("ts") - ) - - num_messages = len(messages) - - if num_messages >= SUMMARIZE_WHEN_EXCEEDING: - log.info(f"Summarizing {SUMMARIZE_BY} messages for session {chat_session.id}") - - # Get the first `SUMMARIZE_BY` non-summary messages - chunk_to_summarize = messages[:SUMMARIZE_BY] - - if not chunk_to_summarize: - log.warning("No messages available to summarize (only summaries exist). Skipping summarization.") - return - - last_ts = chunk_to_summarize[-1].ts # Preserve timestamp - - # 🔹 Get past summaries, keeping only the last few (owned by the session user) - summary_messages = await sync_to_async(list)( - Message.objects.filter(session=chat_session, user=user, custom_author="SUM") - .order_by("ts") - ) - - # Delete old summaries if there are too many - if len(summary_messages) >= MAX_SUMMARIES: - summary_text = await summarize_conversation(chat_session, summary_messages, ai_obj, is_summary=True) - - chat_session.summary = summary_text - await sync_to_async(chat_session.save)() - log.info(f"Updated ChatSession summary with {len(summary_messages)} consolidated summaries.") - - num_to_delete = len(summary_messages) - MAX_SUMMARIES - # await sync_to_async( - # Message.objects.filter(session=chat_session, user=user, id__in=[msg.id for msg in summary_messages[:num_to_delete]]) - # .delete() - # )() - await delete_messages( - Message.objects.filter( - session=chat_session, - user=user, - id__in=[msg.id for msg in summary_messages[:num_to_delete]] - ) - ) - log.info(f"Deleted {num_to_delete} old summaries.") - - # 🔹 Summarize conversation chunk - summary_text = await summarize_conversation(chat_session, chunk_to_summarize, ai_obj) - - # 🔹 Replace old messages with the summary - # await sync_to_async( - # Message.objects.filter(session=chat_session, user=user, id__in=[msg.id for msg in chunk_to_summarize]) - # .delete() - # )() - await delete_messages(Message.objects.filter(session=chat_session, user=user, id__in=[msg.id for msg in chunk_to_summarize])) - log.info(f"Deleted {len(chunk_to_summarize)} messages, replacing with summary.") - - # 🔹 Store new summary message (ensuring session=user consistency) - await sync_to_async(Message.objects.create)( - user=user, - session=chat_session, - custom_author="SUM", - text=summary_text, - ts=last_ts, # Preserve timestamp - ) - - # 🔹 Update ChatSession summary with latest merged summary - # chat_session.summary = summary_text - # await sync_to_async(chat_session.save)() - - - - -async def summarize_conversation( - chat_session: ChatSession, - messages: list[Message], - ai_obj, - is_summary=False, -): - """ - Summarizes all stored messages into a single summary. - - - If `is_summary=True`, treats input as previous summaries and merges them while keeping detail. - - If `is_summary=False`, summarizes raw chat messages concisely. - """ - - log.info(f"Summarizing messages for session {chat_session.id}") - - # Convert messages to structured text format - message_texts = messages_to_string(messages) - #log.info(f"Raw messages to summarize:\n{message_texts}") - - # Select appropriate summarization instruction - instruction = ( - "Merge and refine these past summaries, keeping critical details and structure intact." - if is_summary - else "Summarize this conversation concisely, maintaining important details and tone." - ) - - summary_prompt = [ - {"role": "system", "content": instruction}, - {"role": "user", "content": f"Conversation:\n{message_texts}\n\nProvide a clear and structured summary:"}, - ] - - # Generate AI-based summary - summary_text = await ai.run_prompt(summary_prompt, ai_obj) - #log.info(f"Generated Summary: {summary_text}") - - return f"Summary: {summary_text}" - - diff --git a/core/management/commands/scheduling.py b/core/management/commands/scheduling.py index 2a42c5f..481bc88 100644 --- a/core/management/commands/scheduling.py +++ b/core/management/commands/scheduling.py @@ -18,7 +18,6 @@ async def job(interval_seconds): """ - class Command(BaseCommand): def handle(self, *args, **options): """ diff --git a/core/management/commands/ur.py b/core/management/commands/ur.py index c6436a5..c54fec6 100644 --- a/core/management/commands/ur.py +++ b/core/management/commands/ur.py @@ -1,11 +1,14 @@ -from core.util import logs -from django.core.management.base import BaseCommand -from django.conf import settings -from core.modules.router import UnifiedRouter import asyncio +from django.conf import settings +from django.core.management.base import BaseCommand + +from core.modules.router import UnifiedRouter +from core.util import logs + log = logs.get_logger("UR") + class Command(BaseCommand): def handle(self, *args, **options): loop = asyncio.new_event_loop() @@ -13,6 +16,6 @@ class Command(BaseCommand): instance = UnifiedRouter(loop) - #instance.start() + # instance.start() - instance.run() \ No newline at end of file + instance.run() diff --git a/core/messaging/ai.py b/core/messaging/ai.py index f8e426e..bd421f3 100644 --- a/core/messaging/ai.py +++ b/core/messaging/ai.py @@ -1,14 +1,15 @@ from openai import AsyncOpenAI, OpenAI -from core.models import Message, ChatSession, AI, Person, Manipulation + +from core.models import AI, ChatSession, Manipulation, Message, Person async def run_prompt( - prompt: list[str], - ai: AI, - ): + prompt: list[str], + ai: AI, +): cast = {"api_key": ai.api_key} if ai.base_url is not None: - cast["api_key"] = ai.base_url + cast["base_url"] = ai.base_url client = AsyncOpenAI(**cast) response = await client.chat.completions.create( model=ai.model, diff --git a/core/messaging/analysis.py b/core/messaging/analysis.py index 2b8f17a..24ba7d4 100644 --- a/core/messaging/analysis.py +++ b/core/messaging/analysis.py @@ -1,13 +1,16 @@ -from core.lib.prompts import bases -from openai import AsyncOpenAI -from asgiref.sync import sync_to_async -from core.models import Message, ChatSession, AI, Person, Manipulation -from core.util import logs -import json import asyncio -from django.utils import timezone +import json import random +from asgiref.sync import sync_to_async +from django.utils import timezone +from openai import AsyncOpenAI + +from core.lib.prompts import bases +from core.models import AI, ChatSession, Manipulation, Message, Person +from core.util import logs + + def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str): """ Generate a structured prompt using the attributes of the provided Person and Manipulation models. @@ -18,7 +21,6 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history system_message = ( "You are my digital persona, responding on my behalf while embodying my personality, preferences, and unique style.\n\n" - "### Persona Profile ###\n" f"- **MBTI:** {persona.mbti} ({persona.mbti_identity} balance)\n" f"- **Tone:** {persona.tone} | **Humor:** {persona.humor_style}\n" @@ -29,7 +31,6 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history f"- **Response Tactics:** {persona.response_tactics}\n" f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n" f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n" - "### Contact Information ###\n" f"- **Summary:** {person.summary or 'N/A'}\n" f"- **Profile:** {person.profile or 'N/A'}\n" @@ -38,10 +39,8 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history f"- **Timezone:** {person.timezone or 'N/A'}\n" f"- **Last Interaction:** {person.last_interaction or 'Never'}\n" f"- **Current Date/Time:** {now}\n\n" - "### Conversation Context ###\n" f"{chat_history if chat_history else 'No prior chat history.'}\n\n" - "### Response Guidelines ###\n" "- **Engagement**: Keep responses engaging, with a balance of wit, depth, and confidence.\n" "- **Flirting**: Be direct, playful, and, when appropriate, subtly provocative—without hesitation.\n" @@ -56,10 +55,11 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history {"role": "user", "content": user_message}, ] + async def run_context_prompt( - prompt: list[str], - ai: AI, - ): + prompt: list[str], + ai: AI, +): cast = {"api_key": ai.api_key} if ai.base_url is not None: cast["api_key"] = ai.base_url @@ -70,4 +70,4 @@ async def run_context_prompt( ) content = response.choices[0].message.content - return content \ No newline at end of file + return content diff --git a/core/messaging/history.py b/core/messaging/history.py index e365cf1..e6f93ed 100644 --- a/core/messaging/history.py +++ b/core/messaging/history.py @@ -1,19 +1,136 @@ -from core.util import logs -from core.models import Message, ChatSession, QueuedMessage from asgiref.sync import sync_to_async +from django.conf import settings + from core.messaging.utils import messages_to_string +from core.models import ChatSession, Message, QueuedMessage +from core.util import logs log = logs.get_logger("history") -async def get_chat_history(session): - stored_messages = await sync_to_async(list)( - Message.objects.filter(session=session, user=session.user).order_by("ts") +# Prompt-window controls: +# - Full message history is always persisted in the database. +# - Only the prompt input window is reduced. +# - Max values are hard safety rails; runtime chooses a smaller adaptive subset. +# - Min value prevents overly aggressive clipping on very long average messages. +DEFAULT_PROMPT_HISTORY_MAX_MESSAGES = getattr( + settings, "PROMPT_HISTORY_MAX_MESSAGES", 120 +) +DEFAULT_PROMPT_HISTORY_MAX_CHARS = getattr(settings, "PROMPT_HISTORY_MAX_CHARS", 24000) +DEFAULT_PROMPT_HISTORY_MIN_MESSAGES = getattr( + settings, "PROMPT_HISTORY_MIN_MESSAGES", 24 +) + + +def _build_recent_history(messages, max_chars): + """ + Build the final prompt transcript under a strict character budget. + + Method: + 1. Iterate messages from newest to oldest so recency is prioritized. + 2. For each message, estimate the rendered line length exactly as it will + appear in the prompt transcript. + 3. Stop once adding another line would exceed `max_chars`, while still + guaranteeing at least one message can be included. + 4. Reverse back to chronological order for readability in prompts. + """ + if not messages: + return "" + + selected = [] + total_chars = 0 + # Recency-first packing, then reorder to chronological output later. + for msg in reversed(messages): + line = f"[{msg.ts}] <{msg.custom_author if msg.custom_author else msg.session.identifier.person.name}> {msg.text}" + line_len = len(line) + 1 + # Keep at least one line even if it alone exceeds max_chars. + if selected and (total_chars + line_len) > max_chars: + break + selected.append(msg) + total_chars += line_len + + selected.reverse() + return messages_to_string(selected) + + +def _compute_adaptive_message_limit(messages, max_messages, max_chars): + """ + Derive how many messages to include before final char-budget packing. + + This function intentionally avoids hand-picked threshold buckets. + Instead, it computes a budget-derived estimate: + - Build a recent sample (up to 80 messages) representing current chat style. + - Measure *rendered* line lengths (timestamp + author + text), not raw text. + - Estimate average line length from that sample. + - Convert char budget into message budget: floor(max_chars / avg_line_len). + - Clamp to configured min/max rails. + + Why two stages: + - Stage A (this function): estimate count from current message density. + - Stage B (`_build_recent_history`): enforce exact char ceiling. + This keeps behavior stable while guaranteeing hard prompt budget compliance. + """ + if not messages: + return DEFAULT_PROMPT_HISTORY_MIN_MESSAGES + + sample = messages[-min(len(messages), 80) :] + rendered_lengths = [] + for msg in sample: + author = ( + msg.custom_author + if msg.custom_author + else msg.session.identifier.person.name + ) + text = msg.text or "" + # Match the line shape used in _build_recent_history/messages_to_string. + rendered_lengths.append(len(f"[{msg.ts}] <{author}> {text}") + 1) + + # Defensive denominator: never divide by zero. + avg_line_len = ( + (sum(rendered_lengths) / len(rendered_lengths)) if rendered_lengths else 1.0 ) - recent_chat_history = messages_to_string(stored_messages) - chat_history = f"Chat Summary:\n{session.summary}\n\nRecent Messages:\n{recent_chat_history}" if session.summary else f"Recent Messages:\n{recent_chat_history}" + avg_line_len = max(avg_line_len, 1.0) + + budget_based = int(max_chars / avg_line_len) + adaptive = max(DEFAULT_PROMPT_HISTORY_MIN_MESSAGES, budget_based) + adaptive = min(max_messages, adaptive) + return max(1, adaptive) + + +async def get_chat_history( + session, + max_messages=DEFAULT_PROMPT_HISTORY_MAX_MESSAGES, + max_chars=DEFAULT_PROMPT_HISTORY_MAX_CHARS, +): + """ + Return prompt-ready chat history with adaptive windowing and hard budget limits. + + Pipeline: + 1. Fetch a bounded recent slice from DB (performance guard). + 2. Estimate adaptive message count from observed rendered message density. + 3. Keep only the newest `adaptive_limit` messages. + 4. Pack those lines under `max_chars` exactly. + """ + # Storage remains complete; only prompt context is reduced. + fetch_limit = max(max_messages * 3, 200) + fetch_limit = min(fetch_limit, 1000) + stored_messages = await sync_to_async(list)( + Message.objects.filter(session=session, user=session.user).order_by("-ts")[ + :fetch_limit + ] + ) + stored_messages.reverse() + adaptive_limit = _compute_adaptive_message_limit( + stored_messages, + max_messages=max_messages, + max_chars=max_chars, + ) + selected_messages = stored_messages[-adaptive_limit:] + recent_chat_history = _build_recent_history(selected_messages, max_chars=max_chars) + chat_history = f"Recent Messages:\n{recent_chat_history}" return chat_history + async def get_chat_session(user, identifier): chat_session, _ = await sync_to_async(ChatSession.objects.get_or_create)( identifier=identifier, @@ -21,6 +138,7 @@ async def get_chat_session(user, identifier): ) return chat_session + async def store_message(session, sender, text, ts, outgoing=False): log.info(f"STORE MESSAGE {text}") msg = await sync_to_async(Message.objects.create)( @@ -29,11 +147,12 @@ async def store_message(session, sender, text, ts, outgoing=False): sender_uuid=sender, text=text, ts=ts, - custom_author="USER" if outgoing else None + custom_author="USER" if outgoing else None, ) return msg + async def store_own_message(session, text, ts, manip=None, queue=False): log.info(f"STORE OWN MESSAGE {text}") cast = { @@ -53,4 +172,8 @@ async def store_own_message(session, text, ts, manip=None, queue=False): **cast, ) - return msg \ No newline at end of file + return msg + + +async def delete_queryset(queryset): + await sync_to_async(queryset.delete, thread_sensitive=True)() diff --git a/core/messaging/natural.py b/core/messaging/natural.py index 138a769..4f2ac5a 100644 --- a/core/messaging/natural.py +++ b/core/messaging/natural.py @@ -1,12 +1,10 @@ import asyncio import random -async def natural_send_message(text, - send, - start_typing, - stop_typing, - skip_thinking=False - ): + +async def natural_send_message( + text, send, start_typing, stop_typing, skip_thinking=False +): """ Parses and sends messages with natural delays based on message length. @@ -41,7 +39,9 @@ async def natural_send_message(text, # Decide when to start thinking *before* typing if not skip_thinking: if natural_delay > 3.5: # Only delay if response is long - await asyncio.sleep(natural_delay - 3.5) # "Thinking" pause before typing + await asyncio.sleep( + natural_delay - 3.5 + ) # "Thinking" pause before typing # Start typing await start_typing() @@ -55,4 +55,4 @@ async def natural_send_message(text, # Optional: Small buffer between messages to prevent rapid-fire responses await asyncio.sleep(0.5) - return ids \ No newline at end of file + return ids diff --git a/core/messaging/replies.py b/core/messaging/replies.py index 8261a9e..97929d3 100644 --- a/core/messaging/replies.py +++ b/core/messaging/replies.py @@ -1,18 +1,21 @@ -from core.lib.prompts import bases -from asgiref.sync import sync_to_async -from core.models import Message, ChatSession, AI, Person, Manipulation -from core.util import logs -import json import asyncio -from django.utils import timezone +import json import random +from asgiref.sync import sync_to_async +from django.utils import timezone + +from core.lib.prompts import bases +from core.models import AI, ChatSession, Manipulation, Message, Person +from core.util import logs + log = logs.get_logger("replies") + def should_reply( - reply_to_self, - reply_to_others, - is_outgoing_message, + reply_to_self, + reply_to_others, + is_outgoing_message, ): reply = False if reply_to_self: @@ -26,7 +29,14 @@ def should_reply( return reply -def generate_mutate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str, mutate: bool = False): + +def generate_mutate_reply_prompt( + msg: dict, + person: Person, + manip: Manipulation, + chat_history: str, + mutate: bool = False, +): """ Strictly rewrites the message in the persona’s tone and style while keeping the original meaning. No added explanations. @@ -66,16 +76,12 @@ def generate_mutate_reply_prompt(msg: dict, person: Person, manip: Manipulation, f"- **Response Tactics:** {persona.response_tactics}\n" f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n" f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n" - "### STRICT RULES ###\n" f"{strict_rules}\n\n" - "### TRANSFORMATION GUIDELINES ###\n" f"{transformation_guidelines}\n\n" - "### Original Message ###\n" f"{msg}\n\n" - "### Rewritten Message ###\n" "(DO NOT include anything except the rewritten text. NO extra comments or formatting.)" ) @@ -83,8 +89,13 @@ def generate_mutate_reply_prompt(msg: dict, person: Person, manip: Manipulation, return [{"role": "system", "content": system_message}] - -def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str, mutate: bool = False): +def generate_reply_prompt( + msg: dict, + person: Person, + manip: Manipulation, + chat_history: str, + mutate: bool = False, +): """ Generate a structured prompt using the attributes of the provided Person and Manipulation models. """ @@ -108,7 +119,6 @@ def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_h "You are my digital persona, responding on my behalf while embodying my personality, preferences, and unique style.\n\n" "You must strictly apply the following persona-based filtering rules when modifying the message:\n\n" f"{filter_rules}\n\n" - "### Persona Profile ###\n" f"- **MBTI:** {persona.mbti} ({persona.mbti_identity} balance)\n" f"- **Tone:** {persona.tone} | **Humor:** {persona.humor_style}\n" @@ -119,7 +129,6 @@ def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_h f"- **Response Tactics:** {persona.response_tactics}\n" f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n" f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n" - "### Contact Information ###\n" f"- **Summary:** {person.summary or 'N/A'}\n" f"- **Profile:** {person.profile or 'N/A'}\n" @@ -128,7 +137,6 @@ def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_h f"- **Timezone:** {person.timezone or 'N/A'}\n" f"- **Last Interaction:** {person.last_interaction or 'Never'}\n" f"- **Current Date/Time:** {now}\n\n" - "### Conversation Context ###\n" f"{chat_history if chat_history else 'No prior chat history.'}\n\n" ) diff --git a/core/messaging/utils.py b/core/messaging/utils.py index dc652cb..134d823 100644 --- a/core/messaging/utils.py +++ b/core/messaging/utils.py @@ -12,9 +12,10 @@ def messages_to_string(messages: list): ] return "\n".join(message_texts) + async def update_last_interaction(session): now = timezone.now() session.identifier.person.last_interaction = now session.last_interaction = now await sync_to_async(session.identifier.person.save)() - await sync_to_async(session.save)() \ No newline at end of file + await sync_to_async(session.save)() diff --git a/core/migrations/0005_ai_person_group_persona_manipulation_and_more.py b/core/migrations/0005_ai_person_group_persona_manipulation_and_more.py index c0d8ea7..597b452 100644 --- a/core/migrations/0005_ai_person_group_persona_manipulation_and_more.py +++ b/core/migrations/0005_ai_person_group_persona_manipulation_and_more.py @@ -1,7 +1,8 @@ # Generated by Django 5.1.5 on 2025-02-06 21:57 -import django.db.models.deletion import uuid + +import django.db.models.deletion from django.conf import settings from django.db import migrations, models diff --git a/core/migrations/0008_alter_ai_user_alter_group_user_and_more.py b/core/migrations/0008_alter_ai_user_alter_group_user_and_more.py index 25e4be1..27ed30a 100644 --- a/core/migrations/0008_alter_ai_user_alter_group_user_and_more.py +++ b/core/migrations/0008_alter_ai_user_alter_group_user_and_more.py @@ -1,7 +1,8 @@ # Generated by Django 5.1.5 on 2025-02-07 12:05 -import django.db.models.deletion import uuid + +import django.db.models.deletion from django.conf import settings from django.db import migrations, models diff --git a/core/migrations/0009_alter_chatsession_identifier_alter_manipulation_ai_and_more.py b/core/migrations/0009_alter_chatsession_identifier_alter_manipulation_ai_and_more.py index 0ba521c..8cd3fc7 100644 --- a/core/migrations/0009_alter_chatsession_identifier_alter_manipulation_ai_and_more.py +++ b/core/migrations/0009_alter_chatsession_identifier_alter_manipulation_ai_and_more.py @@ -1,7 +1,8 @@ # Generated by Django 5.1.5 on 2025-02-07 13:56 -import django.db.models.deletion import uuid + +import django.db.models.deletion from django.db import migrations, models diff --git a/core/migrations/0014_queuedmessage.py b/core/migrations/0014_queuedmessage.py index 9c9323b..1539282 100644 --- a/core/migrations/0014_queuedmessage.py +++ b/core/migrations/0014_queuedmessage.py @@ -1,7 +1,8 @@ # Generated by Django 5.1.5 on 2025-02-08 16:07 -import django.db.models.deletion import uuid + +import django.db.models.deletion from django.conf import settings from django.db import migrations, models diff --git a/core/migrations/0016_airequest_airesult_workspaceconversation_and_more.py b/core/migrations/0016_airequest_airesult_workspaceconversation_and_more.py new file mode 100644 index 0000000..0e3cdff --- /dev/null +++ b/core/migrations/0016_airequest_airesult_workspaceconversation_and_more.py @@ -0,0 +1,95 @@ +# Generated by Django 5.2.11 on 2026-02-14 22:52 + +import django.db.models.deletion +import uuid +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0015_manipulation_filter_enabled_alter_manipulation_mode'), + ] + + operations = [ + migrations.CreateModel( + name='AIRequest', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), + ('window_spec', models.JSONField(default=dict)), + ('message_ids', models.JSONField(blank=True, default=list)), + ('user_notes', models.TextField(blank=True, default='')), + ('operation', models.CharField(choices=[('summarise', 'Summarise'), ('draft_reply', 'Draft Reply'), ('critique', 'Critique'), ('repair', 'Repair'), ('extract_patterns', 'Extract Patterns'), ('memory_propose', 'Memory Propose')], max_length=32)), + ('policy_snapshot', models.JSONField(blank=True, default=dict)), + ('status', models.CharField(choices=[('queued', 'Queued'), ('running', 'Running'), ('done', 'Done'), ('failed', 'Failed')], default='queued', max_length=16)), + ('error', models.TextField(blank=True, default='')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('started_at', models.DateTimeField(blank=True, null=True)), + ('finished_at', models.DateTimeField(blank=True, null=True)), + ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ], + ), + migrations.CreateModel( + name='AIResult', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), + ('summary_m3', models.TextField(blank=True, default='')), + ('draft_replies', models.JSONField(blank=True, default=list)), + ('risk_flags', models.JSONField(blank=True, default=list)), + ('memory_proposals', models.JSONField(blank=True, default=list)), + ('citations', models.JSONField(blank=True, default=list)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('ai_request', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='result', to='core.airequest')), + ], + ), + migrations.CreateModel( + name='WorkspaceConversation', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), + ('title', models.CharField(blank=True, default='', max_length=255)), + ('platform_type', models.CharField(choices=[('signal', 'Signal'), ('instagram', 'Instagram')], default='signal', max_length=255)), + ('platform_thread_id', models.CharField(blank=True, default='', max_length=255)), + ('last_event_ts', models.BigIntegerField(blank=True, null=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('participants', models.ManyToManyField(blank=True, related_name='workspace_conversations', to='core.person')), + ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='workspace_conversations', to=settings.AUTH_USER_MODEL)), + ], + ), + migrations.CreateModel( + name='MessageEvent', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), + ('ts', models.BigIntegerField(db_index=True)), + ('direction', models.CharField(choices=[('in', 'Inbound'), ('out', 'Outbound')], max_length=8)), + ('sender_uuid', models.CharField(blank=True, db_index=True, default='', max_length=255)), + ('text', models.TextField(blank=True, default='')), + ('attachments', models.JSONField(blank=True, default=list)), + ('raw_payload_ref', models.JSONField(blank=True, default=dict)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('conversation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='core.workspaceconversation')), + ], + options={ + 'ordering': ['ts'], + }, + ), + migrations.CreateModel( + name='MemoryItem', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), + ('type', models.CharField(choices=[('M1', 'Durable Fact/Preference'), ('M2', 'Relationship State'), ('M3', 'Conversation Working Summary')], max_length=2)), + ('status', models.CharField(choices=[('proposed', 'Proposed'), ('active', 'Active'), ('deprecated', 'Deprecated')], default='proposed', max_length=16)), + ('content', models.JSONField(blank=True, default=dict)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('source_request', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.airequest')), + ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ('conversation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memory_items', to='core.workspaceconversation')), + ], + ), + migrations.AddField( + model_name='airequest', + name='conversation', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ai_requests', to='core.workspaceconversation'), + ), + ] diff --git a/core/migrations/0017_remove_airesult_risk_flags_and_more.py b/core/migrations/0017_remove_airesult_risk_flags_and_more.py new file mode 100644 index 0000000..f25b28f --- /dev/null +++ b/core/migrations/0017_remove_airesult_risk_flags_and_more.py @@ -0,0 +1,359 @@ +# Generated by Django 5.2.11 on 2026-02-15 00:14 + +import core.models +import django.db.models.deletion +import uuid +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0016_airequest_airesult_workspaceconversation_and_more'), + ] + + operations = [ + migrations.RemoveField( + model_name='airesult', + name='risk_flags', + ), + migrations.RemoveField( + model_name='airesult', + name='summary_m3', + ), + migrations.RemoveField( + model_name='memoryitem', + name='type', + ), + migrations.AddField( + model_name='airesult', + name='interaction_signals', + field=models.JSONField(blank=True, default=list, help_text="Structured positive/neutral/risk signals inferred for this run. Example item: {'label':'repair_attempt','valence':'positive','message_event_ids':[...]}."), + ), + migrations.AddField( + model_name='airesult', + name='user', + field=models.ForeignKey(default=core.models.get_default_workspace_user_pk, help_text='Owner of this AI result row (required for restricted CRUD filtering).', on_delete=django.db.models.deletion.CASCADE, related_name='workspace_ai_results', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='airesult', + name='working_summary', + field=models.TextField(blank=True, default='', help_text='Conversation working summary generated for this run.'), + ), + migrations.AddField( + model_name='memoryitem', + name='memory_kind', + field=models.CharField(choices=[('fact', 'Durable Fact/Preference'), ('state', 'Relationship State'), ('summary', 'Conversation Working Summary')], default=1, help_text='Memory kind: fact/state/summary.', max_length=16), + preserve_default=False, + ), + migrations.AddField( + model_name='messageevent', + name='source_system', + field=models.CharField(choices=[('signal', 'Signal'), ('xmpp', 'XMPP'), ('workspace', 'Workspace'), ('ai', 'AI')], default='signal', help_text='System that produced this event record.', max_length=32), + ), + migrations.AddField( + model_name='messageevent', + name='user', + field=models.ForeignKey(default=core.models.get_default_workspace_user_pk, help_text='Owner of this message event row (required for restricted CRUD filtering).', on_delete=django.db.models.deletion.CASCADE, related_name='workspace_message_events', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='workspaceconversation', + name='commitment_confidence', + field=models.FloatField(default=0.0, help_text='Confidence in commitment scores (0.0-1.0).'), + ), + migrations.AddField( + model_name='workspaceconversation', + name='commitment_inbound_score', + field=models.FloatField(blank=True, help_text='Estimated commitment score for counterpart -> user direction (0-100). Null while calibrating.', null=True), + ), + migrations.AddField( + model_name='workspaceconversation', + name='commitment_last_computed_at', + field=models.DateTimeField(blank=True, help_text='Timestamp of the latest commitment computation.', null=True), + ), + migrations.AddField( + model_name='workspaceconversation', + name='commitment_outbound_score', + field=models.FloatField(blank=True, help_text='Estimated commitment score for user -> counterpart direction (0-100). Null while calibrating.', null=True), + ), + migrations.AddField( + model_name='workspaceconversation', + name='last_ai_run_at', + field=models.DateTimeField(blank=True, help_text='Last time any AIRequest finished for this conversation.', null=True), + ), + migrations.AddField( + model_name='workspaceconversation', + name='participant_feedback', + field=models.JSONField(blank=True, default=dict, help_text="Per-person interaction feedback map keyed by person UUID. Example: {'<person_uuid>': {'state': 'withdrawing', 'note': 'short replies'}}."), + ), + migrations.AddField( + model_name='workspaceconversation', + name='stability_confidence', + field=models.FloatField(default=0.0, help_text='Confidence in stability_score (0.0-1.0).'), + ), + migrations.AddField( + model_name='workspaceconversation', + name='stability_last_computed_at', + field=models.DateTimeField(blank=True, help_text='Timestamp of the latest stability computation.', null=True), + ), + migrations.AddField( + model_name='workspaceconversation', + name='stability_sample_days', + field=models.PositiveIntegerField(default=0, help_text='How many calendar days of data were used for stability.'), + ), + migrations.AddField( + model_name='workspaceconversation', + name='stability_sample_messages', + field=models.PositiveIntegerField(default=0, help_text='How many messages were used to compute stability.'), + ), + migrations.AddField( + model_name='workspaceconversation', + name='stability_score', + field=models.FloatField(blank=True, help_text='Relationship stability score (0-100). Null while calibrating.', null=True), + ), + migrations.AddField( + model_name='workspaceconversation', + name='stability_state', + field=models.CharField(choices=[('calibrating', 'Calibrating'), ('stable', 'Stable'), ('watch', 'Watch'), ('fragile', 'Fragile')], default='calibrating', help_text='UI label for relationship stability, baseline-aware.', max_length=32), + ), + migrations.AlterField( + model_name='airequest', + name='conversation', + field=models.ForeignKey(help_text='Conversation analyzed by this request.', on_delete=django.db.models.deletion.CASCADE, related_name='ai_requests', to='core.workspaceconversation'), + ), + migrations.AlterField( + model_name='airequest', + name='created_at', + field=models.DateTimeField(auto_now_add=True, help_text='Request creation timestamp.'), + ), + migrations.AlterField( + model_name='airequest', + name='error', + field=models.TextField(blank=True, default='', help_text="Error details when status='failed'."), + ), + migrations.AlterField( + model_name='airequest', + name='finished_at', + field=models.DateTimeField(blank=True, help_text='Worker completion timestamp.', null=True), + ), + migrations.AlterField( + model_name='airequest', + name='id', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this AI request.', primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='airequest', + name='message_ids', + field=models.JSONField(blank=True, default=list, help_text='Resolved ordered MessageEvent IDs included in this run.'), + ), + migrations.AlterField( + model_name='airequest', + name='operation', + field=models.CharField(choices=[('summarise', 'Summarise'), ('draft_reply', 'Draft Reply'), ('critique', 'Critique'), ('repair', 'Repair'), ('extract_patterns', 'Extract Patterns'), ('memory_propose', 'Memory Propose'), ('state_detect', 'State Detect'), ('rewrite_style', 'Rewrite Style'), ('send_readiness', 'Send Readiness'), ('timeline_brief', 'Timeline Brief')], help_text='Requested AI operation type.', max_length=32), + ), + migrations.AlterField( + model_name='airequest', + name='policy_snapshot', + field=models.JSONField(blank=True, default=dict, help_text='Effective manipulation/policy values captured at request time, so results remain auditable even if policies change later.'), + ), + migrations.AlterField( + model_name='airequest', + name='started_at', + field=models.DateTimeField(blank=True, help_text='Worker start timestamp.', null=True), + ), + migrations.AlterField( + model_name='airequest', + name='status', + field=models.CharField(choices=[('queued', 'Queued'), ('running', 'Running'), ('done', 'Done'), ('failed', 'Failed')], default='queued', help_text='Worker lifecycle state for this request.', max_length=16), + ), + migrations.AlterField( + model_name='airequest', + name='user', + field=models.ForeignKey(help_text='User who initiated this request.', on_delete=django.db.models.deletion.CASCADE, related_name='workspace_ai_requests', to=settings.AUTH_USER_MODEL), + ), + migrations.AlterField( + model_name='airequest', + name='user_notes', + field=models.TextField(blank=True, default='', help_text='Optional user intent/context notes injected into the prompt.'), + ), + migrations.AlterField( + model_name='airequest', + name='window_spec', + field=models.JSONField(default=dict, help_text='Selection spec (last_n/since_ts/between_ts/include_attachments/etc). Should be dynamically resolved by available context/token budget.'), + ), + migrations.AlterField( + model_name='airesult', + name='ai_request', + field=models.OneToOneField(help_text='Owning AI request for this result.', on_delete=django.db.models.deletion.CASCADE, related_name='result', to='core.airequest'), + ), + migrations.AlterField( + model_name='airesult', + name='citations', + field=models.JSONField(blank=True, default=list, help_text='Referenced MessageEvent IDs supporting generated claims.'), + ), + migrations.AlterField( + model_name='airesult', + name='created_at', + field=models.DateTimeField(auto_now_add=True, help_text='Result creation timestamp.'), + ), + migrations.AlterField( + model_name='airesult', + name='draft_replies', + field=models.JSONField(blank=True, default=list, help_text='Draft reply candidates, typically with tone and rationale.'), + ), + migrations.AlterField( + model_name='airesult', + name='id', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this AI result payload.', primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='airesult', + name='memory_proposals', + field=models.JSONField(blank=True, default=list, help_text='Proposed memory entries, typically requiring user approval.'), + ), + migrations.AlterField( + model_name='memoryitem', + name='content', + field=models.JSONField(blank=True, default=dict, help_text='Structured memory payload (schema can evolve by type).'), + ), + migrations.AlterField( + model_name='memoryitem', + name='conversation', + field=models.ForeignKey(help_text='Conversation scope this memory item belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='memory_items', to='core.workspaceconversation'), + ), + migrations.AlterField( + model_name='memoryitem', + name='created_at', + field=models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.'), + ), + migrations.AlterField( + model_name='memoryitem', + name='id', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this memory item.', primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='memoryitem', + name='source_request', + field=models.ForeignKey(blank=True, help_text='AIRequest that originated this memory, if any.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.airequest'), + ), + migrations.AlterField( + model_name='memoryitem', + name='status', + field=models.CharField(choices=[('proposed', 'Proposed'), ('active', 'Active'), ('deprecated', 'Deprecated')], default='proposed', help_text='Lifecycle state, especially for approval-gated memories.', max_length=16), + ), + migrations.AlterField( + model_name='memoryitem', + name='updated_at', + field=models.DateTimeField(auto_now=True, help_text='Last update timestamp.'), + ), + migrations.AlterField( + model_name='memoryitem', + name='user', + field=models.ForeignKey(help_text='Owner of the memory item.', on_delete=django.db.models.deletion.CASCADE, related_name='workspace_memory_items', to=settings.AUTH_USER_MODEL), + ), + migrations.AlterField( + model_name='messageevent', + name='attachments', + field=models.JSONField(blank=True, default=list, help_text='Attachment metadata list associated with this message.'), + ), + migrations.AlterField( + model_name='messageevent', + name='conversation', + field=models.ForeignKey(help_text='AI workspace conversation this message belongs to. This is not the transport-native thread object.', on_delete=django.db.models.deletion.CASCADE, related_name='events', to='core.workspaceconversation'), + ), + migrations.AlterField( + model_name='messageevent', + name='created_at', + field=models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.'), + ), + migrations.AlterField( + model_name='messageevent', + name='direction', + field=models.CharField(choices=[('in', 'Inbound'), ('out', 'Outbound')], help_text="Direction relative to workspace owner: 'in' from counterpart(s), 'out' from user/bot side.", max_length=8), + ), + migrations.AlterField( + model_name='messageevent', + name='id', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this message event.', primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='messageevent', + name='raw_payload_ref', + field=models.JSONField(blank=True, default=dict, help_text='Raw source payload or reference pointer for audit/debug.'), + ), + migrations.AlterField( + model_name='messageevent', + name='sender_uuid', + field=models.CharField(blank=True, db_index=True, default='', help_text='Source sender UUID/identifier for correlation.', max_length=255), + ), + migrations.AlterField( + model_name='messageevent', + name='text', + field=models.TextField(blank=True, default='', help_text='Normalized message text body.'), + ), + migrations.AlterField( + model_name='messageevent', + name='ts', + field=models.BigIntegerField(db_index=True, help_text='Event timestamp (unix ms) as reported by source_system.'), + ), + migrations.AlterField( + model_name='personidentifier', + name='service', + field=models.CharField(choices=[('signal', 'Signal'), ('xmpp', 'XMPP'), ('instagram', 'Instagram')], max_length=255), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='created_at', + field=models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.'), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='id', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this workspace conversation.', primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='last_event_ts', + field=models.BigIntegerField(blank=True, help_text='Latest message timestamp (unix ms) currently known.', null=True), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='participants', + field=models.ManyToManyField(blank=True, help_text='Resolved people participating in this conversation.', related_name='workspace_conversations', to='core.person'), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='platform_thread_id', + field=models.CharField(blank=True, default='', help_text='Platform-native thread/group identifier when available.', max_length=255), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='platform_type', + field=models.CharField(choices=[('signal', 'Signal'), ('xmpp', 'XMPP'), ('instagram', 'Instagram')], default='signal', help_text='Primary transport for this conversation (reuses SERVICE_CHOICES).', max_length=255), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='title', + field=models.CharField(blank=True, default='', help_text='Human-friendly label shown in the workspace sidebar.', max_length=255), + ), + migrations.AlterField( + model_name='workspaceconversation', + name='user', + field=models.ForeignKey(help_text='Owner of this conversation workspace.', on_delete=django.db.models.deletion.CASCADE, related_name='workspace_conversations', to=settings.AUTH_USER_MODEL), + ), + migrations.CreateModel( + name='AIResultSignal', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this result signal.', primary_key=True, serialize=False)), + ('label', models.CharField(help_text="Short signal label, e.g. 'withdrawing', 'repair_attempt'.", max_length=128)), + ('valence', models.CharField(choices=[('positive', 'Positive'), ('neutral', 'Neutral'), ('risk', 'Risk')], default='neutral', help_text='Signal polarity: positive, neutral, or risk.', max_length=16)), + ('score', models.FloatField(blank=True, help_text='Optional model confidence/strength (0.0-1.0).', null=True)), + ('rationale', models.TextField(blank=True, default='', help_text='Human-readable explanation for this signal.')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('ai_result', models.ForeignKey(help_text='AI result that produced this signal.', on_delete=django.db.models.deletion.CASCADE, related_name='signals', to='core.airesult')), + ('message_event', models.ForeignKey(blank=True, help_text='Optional specific message event referenced by this signal.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ai_signals', to='core.messageevent')), + ('user', models.ForeignKey(help_text='Owner of this signal row (required for restricted CRUD filtering).', on_delete=django.db.models.deletion.CASCADE, related_name='workspace_ai_result_signals', to=settings.AUTH_USER_MODEL)), + ], + ), + ] diff --git a/core/migrations/0018_patternmitigationplan_patternmitigationmessage_and_more.py b/core/migrations/0018_patternmitigationplan_patternmitigationmessage_and_more.py new file mode 100644 index 0000000..6280132 --- /dev/null +++ b/core/migrations/0018_patternmitigationplan_patternmitigationmessage_and_more.py @@ -0,0 +1,84 @@ +# Generated by Django 5.2.11 on 2026-02-15 00:58 + +import django.db.models.deletion +import uuid +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0017_remove_airesult_risk_flags_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='PatternMitigationPlan', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this mitigation plan.', primary_key=True, serialize=False)), + ('title', models.CharField(blank=True, default='', help_text='Display title for this plan.', max_length=255)), + ('objective', models.TextField(blank=True, default='', help_text='High-level objective this plan is meant to achieve.')), + ('fundamental_items', models.JSONField(blank=True, default=list, help_text='Foundational agreed items/principles for this plan.')), + ('creation_mode', models.CharField(choices=[('auto', 'Auto'), ('guided', 'Guided')], default='auto', help_text='Whether plan artifacts were generated automatically or user-guided.', max_length=16)), + ('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('archived', 'Archived')], default='draft', help_text='Lifecycle status of the plan.', max_length=16)), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('updated_at', models.DateTimeField(auto_now=True, help_text='Last update timestamp.')), + ('conversation', models.ForeignKey(help_text='Workspace conversation this plan belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='mitigation_plans', to='core.workspaceconversation')), + ('source_ai_result', models.ForeignKey(blank=True, help_text='AI result that initiated this plan, if any.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='mitigation_plans', to='core.airesult')), + ('user', models.ForeignKey(help_text='Owner of this plan.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_mitigation_plans', to=settings.AUTH_USER_MODEL)), + ], + ), + migrations.CreateModel( + name='PatternMitigationMessage', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this mitigation message.', primary_key=True, serialize=False)), + ('role', models.CharField(choices=[('user', 'User'), ('assistant', 'Assistant'), ('system', 'System')], help_text='Message speaker role.', max_length=16)), + ('text', models.TextField(help_text='Message content.')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('user', models.ForeignKey(help_text='Owner of this message.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_mitigation_messages', to=settings.AUTH_USER_MODEL)), + ('plan', models.ForeignKey(help_text='Parent mitigation plan.', on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='core.patternmitigationplan')), + ], + options={ + 'ordering': ['created_at'], + }, + ), + migrations.CreateModel( + name='PatternMitigationGame', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this game.', primary_key=True, serialize=False)), + ('title', models.CharField(help_text='Game title.', max_length=255)), + ('instructions', models.TextField(blank=True, default='', help_text='Gameplay/instruction text.')), + ('enabled', models.BooleanField(default=True, help_text='Whether this game is currently enabled.')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('user', models.ForeignKey(help_text='Owner of this game.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_mitigation_games', to=settings.AUTH_USER_MODEL)), + ('plan', models.ForeignKey(help_text='Parent mitigation plan.', on_delete=django.db.models.deletion.CASCADE, related_name='games', to='core.patternmitigationplan')), + ], + ), + migrations.CreateModel( + name='PatternArtifactExport', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this export artifact.', primary_key=True, serialize=False)), + ('artifact_type', models.CharField(choices=[('rulebook', 'Rulebook'), ('rules', 'Rules'), ('games', 'Games')], help_text='Artifact category being exported.', max_length=32)), + ('export_format', models.CharField(choices=[('markdown', 'Markdown'), ('json', 'JSON'), ('text', 'Text')], default='markdown', help_text='Serialized output format.', max_length=16)), + ('protocol_version', models.CharField(default='artifact-v1', help_text='Artifact export protocol version.', max_length=32)), + ('payload', models.TextField(blank=True, default='', help_text='Serialized artifact body/content.')), + ('meta', models.JSONField(blank=True, default=dict, help_text='Additional export metadata (counts, hints, source IDs).')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('user', models.ForeignKey(help_text='Owner of this export artifact.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_artifact_exports', to=settings.AUTH_USER_MODEL)), + ('plan', models.ForeignKey(help_text='Source mitigation plan.', on_delete=django.db.models.deletion.CASCADE, related_name='exports', to='core.patternmitigationplan')), + ], + ), + migrations.CreateModel( + name='PatternMitigationRule', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this rule.', primary_key=True, serialize=False)), + ('title', models.CharField(help_text='Rule title.', max_length=255)), + ('content', models.TextField(blank=True, default='', help_text='Rule definition/details.')), + ('enabled', models.BooleanField(default=True, help_text='Whether this rule is currently enabled.')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('plan', models.ForeignKey(help_text='Parent mitigation plan.', on_delete=django.db.models.deletion.CASCADE, related_name='rules', to='core.patternmitigationplan')), + ('user', models.ForeignKey(help_text='Owner of this rule.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_mitigation_rules', to=settings.AUTH_USER_MODEL)), + ], + ), + ] diff --git a/core/migrations/0019_patternmitigationcorrection.py b/core/migrations/0019_patternmitigationcorrection.py new file mode 100644 index 0000000..8ca5057 --- /dev/null +++ b/core/migrations/0019_patternmitigationcorrection.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.11 on 2026-02-15 01:13 + +import django.db.models.deletion +import uuid +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0018_patternmitigationplan_patternmitigationmessage_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='PatternMitigationCorrection', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this correction item.', primary_key=True, serialize=False)), + ('title', models.CharField(help_text='Correction title.', max_length=255)), + ('clarification', models.TextField(blank=True, default='', help_text='Joint clarification text intended to reduce interpretation drift.')), + ('enabled', models.BooleanField(default=True, help_text='Whether this correction item is currently enabled.')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('plan', models.ForeignKey(help_text='Parent mitigation plan.', on_delete=django.db.models.deletion.CASCADE, related_name='corrections', to='core.patternmitigationplan')), + ('user', models.ForeignKey(help_text='Owner of this correction item.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_mitigation_corrections', to=settings.AUTH_USER_MODEL)), + ], + ), + ] diff --git a/core/migrations/0020_patternmitigationcorrection_language_style_and_more.py b/core/migrations/0020_patternmitigationcorrection_language_style_and_more.py new file mode 100644 index 0000000..eba2c3f --- /dev/null +++ b/core/migrations/0020_patternmitigationcorrection_language_style_and_more.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.11 on 2026-02-15 01:38 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0019_patternmitigationcorrection'), + ] + + operations = [ + migrations.AddField( + model_name='patternmitigationcorrection', + name='language_style', + field=models.CharField(choices=[('same', 'Same Language'), ('adapted', 'Adapted Language')], default='adapted', help_text='Whether to keep wording identical or adapt it per recipient.', max_length=16), + ), + migrations.AddField( + model_name='patternmitigationcorrection', + name='perspective', + field=models.CharField(choices=[('third_person', 'Third Person'), ('first_person', 'First Person')], default='third_person', help_text='Narrative perspective used when framing this correction.', max_length=32), + ), + migrations.AddField( + model_name='patternmitigationcorrection', + name='share_target', + field=models.CharField(choices=[('self', 'Self'), ('other', 'Other Party'), ('both', 'Both Parties')], default='both', help_text='Who this insight is intended to be shared with.', max_length=16), + ), + migrations.AddField( + model_name='patternmitigationcorrection', + name='source_phrase', + field=models.TextField(blank=True, default='', help_text="Situation/message fragment this correction responds to, e.g. 'she says ...'."), + ), + migrations.AlterField( + model_name='patternartifactexport', + name='artifact_type', + field=models.CharField(choices=[('rulebook', 'Rulebook'), ('rules', 'Rules'), ('games', 'Games'), ('corrections', 'Corrections')], help_text='Artifact category being exported.', max_length=32), + ), + ] diff --git a/core/migrations/0021_alter_patternmitigationcorrection_clarification_and_more.py b/core/migrations/0021_alter_patternmitigationcorrection_clarification_and_more.py new file mode 100644 index 0000000..3f8dbc7 --- /dev/null +++ b/core/migrations/0021_alter_patternmitigationcorrection_clarification_and_more.py @@ -0,0 +1,43 @@ +# Generated by Django 5.2.11 on 2026-02-15 02:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0020_patternmitigationcorrection_language_style_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='patternmitigationcorrection', + name='clarification', + field=models.TextField(blank=True, default='', help_text='Joint clarification text intended to reduce interpretation drift. Example: \'When you say "you ignore me", I hear fear of disconnection, not blame.\''), + ), + migrations.AlterField( + model_name='patternmitigationcorrection', + name='language_style', + field=models.CharField(choices=[('same', 'Same Language'), ('adapted', 'Adapted Language')], default='adapted', help_text='Whether to keep wording identical or adapt it per recipient. Example: same text for both parties, or softened/adapted wording for recipient.', max_length=16), + ), + migrations.AlterField( + model_name='patternmitigationcorrection', + name='perspective', + field=models.CharField(choices=[('third_person', 'Third Person'), ('second_person', 'Second Person'), ('first_person', 'First Person')], default='third_person', help_text="Narrative perspective used when framing this correction. Examples: third person ('she says'), second person ('you say'), first person ('I say').", max_length=32), + ), + migrations.AlterField( + model_name='patternmitigationcorrection', + name='share_target', + field=models.CharField(choices=[('self', 'Self'), ('other', 'Other Party'), ('both', 'Both Parties')], default='both', help_text='Who this insight is intended to be shared with. Example: self, other, or both.', max_length=16), + ), + migrations.AlterField( + model_name='patternmitigationcorrection', + name='source_phrase', + field=models.TextField(blank=True, default='', help_text='Situation/message fragment this correction responds to. Example: \'she says: "you never listen"\' or \'you say: "you are dismissing me"\'.'), + ), + migrations.AlterField( + model_name='patternmitigationcorrection', + name='title', + field=models.CharField(help_text="Correction title. Example: 'Assumption vs intent mismatch'.", max_length=255), + ), + ] diff --git a/core/migrations/0022_patternmitigationautosettings.py b/core/migrations/0022_patternmitigationautosettings.py new file mode 100644 index 0000000..6807f5d --- /dev/null +++ b/core/migrations/0022_patternmitigationautosettings.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.11 on 2026-02-15 02:38 + +import django.db.models.deletion +import uuid +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0021_alter_patternmitigationcorrection_clarification_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='PatternMitigationAutoSettings', + fields=[ + ('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Stable identifier for this automation settings row.', primary_key=True, serialize=False)), + ('enabled', models.BooleanField(default=False, help_text='Master toggle for mitigation automation in this conversation.')), + ('auto_pattern_recognition', models.BooleanField(default=True, help_text='Run pattern/violation recognition automatically when triggered.')), + ('auto_create_mitigation', models.BooleanField(default=False, help_text='Create a baseline mitigation plan automatically when missing.')), + ('auto_create_corrections', models.BooleanField(default=False, help_text='Create correction items automatically from detected violations.')), + ('auto_notify_enabled', models.BooleanField(default=False, help_text='Send NTFY notifications when new violations are detected.')), + ('ntfy_topic_override', models.CharField(blank=True, help_text='Optional NTFY topic override for automation notifications.', max_length=255, null=True)), + ('ntfy_url_override', models.CharField(blank=True, help_text='Optional NTFY server URL override for automation notifications.', max_length=255, null=True)), + ('sample_message_window', models.PositiveIntegerField(default=40, help_text='How many recent messages to include in each automation check.')), + ('check_cooldown_seconds', models.PositiveIntegerField(default=300, help_text='Minimum seconds between automatic checks for this conversation.')), + ('last_checked_event_ts', models.BigIntegerField(blank=True, help_text='Latest source message timestamp included in automation checks.', null=True)), + ('last_run_at', models.DateTimeField(blank=True, help_text='Timestamp when automation last ran.', null=True)), + ('last_result_summary', models.TextField(blank=True, default='', help_text='Human-readable summary from the last automation run.')), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Row creation timestamp.')), + ('updated_at', models.DateTimeField(auto_now=True, help_text='Last update timestamp.')), + ('conversation', models.OneToOneField(help_text='Conversation scope this automation config applies to.', on_delete=django.db.models.deletion.CASCADE, related_name='mitigation_auto_settings', to='core.workspaceconversation')), + ('user', models.ForeignKey(help_text='Owner of this automation settings row.', on_delete=django.db.models.deletion.CASCADE, related_name='pattern_mitigation_auto_settings', to=settings.AUTH_USER_MODEL)), + ], + ), + ] diff --git a/core/models.py b/core/models.py index 59d3cfd..fe84462 100644 --- a/core/models.py +++ b/core/models.py @@ -1,16 +1,20 @@ import logging +import hashlib import uuid from django.conf import settings +from django.contrib.auth import get_user_model from django.contrib.auth.models import AbstractUser from django.db import models -from core.lib.notify import raw_sendmsg + from core.clients import signalapi +from core.lib.notify import raw_sendmsg logger = logging.getLogger(__name__) SERVICE_CHOICES = ( ("signal", "Signal"), + ("xmpp", "XMPP"), ("instagram", "Instagram"), ) MBTI_CHOICES = ( @@ -37,6 +41,39 @@ MODEL_CHOICES = ( ("gpt-4o", "GPT 4o"), ) + +def _attribute_display_id(kind, *parts): + """ + Build a deterministic short display id from object attributes. + + Format: + - 3 lowercase letters + - 4 fixed digits + Example: `kqa1042` + """ + raw = "|".join([kind, *[str(part or "") for part in parts]]) + digest = hashlib.sha1(raw.encode("utf-8")).hexdigest() + + n_letters = int(digest[:8], 16) + letters = [] + for _ in range(3): + letters.append(chr(ord("a") + (n_letters % 26))) + n_letters //= 26 + + digits = int(digest[8:16], 16) % 10000 + return f"{''.join(letters)}{digits:04d}" + + +def get_default_workspace_user_pk(): + """ + Fallback owner used when adding non-null `user` FKs to existing rows. + """ + user_pk = ( + get_user_model().objects.order_by("id").values_list("id", flat=True).first() + ) + return user_pk or 1 + + class User(AbstractUser): # Stripe customer ID stripe_id = models.CharField(max_length=255, null=True, blank=True) @@ -63,8 +100,6 @@ class User(AbstractUser): raw_sendmsg(*args, **kwargs, url=notification_settings.ntfy_url, topic=topic) - - class NotificationSettings(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) ntfy_topic = models.CharField(max_length=255, null=True, blank=True) @@ -73,12 +108,14 @@ class NotificationSettings(models.Model): def __str__(self): return f"Notification settings for {self.user}" + class Chat(models.Model): source_number = models.CharField(max_length=32, null=True, blank=True) source_uuid = models.CharField(max_length=255, null=True, blank=True) source_name = models.CharField(max_length=255, null=True, blank=True) account = models.CharField(max_length=32, null=True, blank=True) + class AI(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) base_url = models.CharField(max_length=255, null=True, blank=True) @@ -110,6 +147,7 @@ class Person(models.Model): def __str__(self): return self.name + class PersonIdentifier(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) identifier = models.CharField(max_length=255) @@ -136,7 +174,8 @@ class PersonIdentifier(models.Model): class ChatSession(models.Model): - """Represents an ongoing chat session, stores summarized history.""" + """Represents an ongoing chat session for persisted message history.""" + id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(User, on_delete=models.CASCADE) identifier = models.ForeignKey(PersonIdentifier, on_delete=models.CASCADE) @@ -146,8 +185,10 @@ class ChatSession(models.Model): def __str__(self): return f"{self.identifier.person.name} ({self.identifier.service})" + class QueuedMessage(models.Model): """Stores individual messages linked to a ChatSession.""" + user = models.ForeignKey(User, on_delete=models.CASCADE) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) session = models.ForeignKey(ChatSession, on_delete=models.CASCADE) @@ -161,8 +202,10 @@ class QueuedMessage(models.Model): class Meta: ordering = ["ts"] + class Message(models.Model): """Stores individual messages linked to a ChatSession.""" + user = models.ForeignKey(User, on_delete=models.CASCADE) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) session = models.ForeignKey(ChatSession, on_delete=models.CASCADE) @@ -175,6 +218,7 @@ class Message(models.Model): class Meta: ordering = ["ts"] + class Group(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(User, on_delete=models.CASCADE) @@ -184,22 +228,33 @@ class Group(models.Model): def __str__(self): return self.name + class Persona(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(User, on_delete=models.CASCADE) # Core Identity # id = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) - alias = models.CharField(max_length=255, blank=True, null=True) # Preferred name or persona alias + alias = models.CharField( + max_length=255, blank=True, null=True + ) # Preferred name or persona alias mbti = models.CharField(max_length=255, choices=MBTI_CHOICES, blank=True, null=True) # -1: assertive, +1: assertive mbti_identity = models.FloatField(default=0.0) # Key Behavioral Traits for Chat Responses - inner_story = models.TextField(blank=True, null=True) # Internal philosophy & worldview - core_values = models.TextField(blank=True, null=True) # What drives their decisions & interactions - communication_style = models.TextField(blank=True, null=True) # How they speak & interact - flirting_style = models.TextField(blank=True, null=True) # How they express attraction + inner_story = models.TextField( + blank=True, null=True + ) # Internal philosophy & worldview + core_values = models.TextField( + blank=True, null=True + ) # What drives their decisions & interactions + communication_style = models.TextField( + blank=True, null=True + ) # How they speak & interact + flirting_style = models.TextField( + blank=True, null=True + ) # How they express attraction humor_style = models.CharField( max_length=50, choices=[ @@ -210,14 +265,15 @@ class Persona(models.Model): ("sarcastic", "Sarcastic"), ("intellectual", "Intellectual"), ], - blank=True, null=True + blank=True, + null=True, ) # Defines their approach to humor # Conversational Preferences likes = models.TextField(blank=True, null=True) # Topics they enjoy discussing dislikes = models.TextField(blank=True, null=True) # Topics or behaviors they avoid tone = models.CharField( - max_length=50, + max_length=50, choices=[ ("formal", "Formal"), ("casual", "Casual"), @@ -225,27 +281,39 @@ class Persona(models.Model): ("serious", "Serious"), ("warm", "Warm"), ("detached", "Detached"), - ], - blank=True, null=True + ], + blank=True, + null=True, ) # Defines preferred conversational tone # Emotional & Strategic Interaction - response_tactics = models.TextField(blank=True, null=True) # How they handle gaslighting, guilt-tripping, etc. - persuasion_tactics = models.TextField(blank=True, null=True) # How they convince others - boundaries = models.TextField(blank=True, null=True) # What they refuse to tolerate in conversations + response_tactics = models.TextField( + blank=True, null=True + ) # How they handle gaslighting, guilt-tripping, etc. + persuasion_tactics = models.TextField( + blank=True, null=True + ) # How they convince others + boundaries = models.TextField( + blank=True, null=True + ) # What they refuse to tolerate in conversations - trust = models.IntegerField(default=50) # Percentage of initial trust given in interactions - adaptability = models.IntegerField(default=70) # How easily they shift tones or styles + trust = models.IntegerField( + default=50 + ) # Percentage of initial trust given in interactions + adaptability = models.IntegerField( + default=70 + ) # How easily they shift tones or styles def __str__(self): return f"{self.alias} ({self.mbti}) [{self.tone} {self.humor_style}]" + class Manipulation(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(User, on_delete=models.CASCADE) name = models.CharField(max_length=255) group = models.ForeignKey(Group, on_delete=models.CASCADE) - #self = models.ForeignKey(Group, on_delete=models.CASCADE) + # self = models.ForeignKey(Group, on_delete=models.CASCADE) ai = models.ForeignKey(AI, on_delete=models.CASCADE) persona = models.ForeignKey(Persona, on_delete=models.CASCADE) enabled = models.BooleanField(default=False) @@ -260,13 +328,1034 @@ class Manipulation(models.Model): ("mutate", "Change messages sent on XMPP using the persona"), ("silent", "Do not generate or send replies"), ], - blank=True, null=True + blank=True, + null=True, ) def __str__(self): return f"{self.name} [{self.group}]" +class WorkspaceConversation(models.Model): + """ + Canonical conversation workspace used by the UI. + + This is intentionally distinct from ChatSession: + - ChatSession is operational history for existing manip/reply flows. + - WorkspaceConversation is analysis/workspace context for AI tooling. + + TODO: + Identify conversation "active periods" dynamically using observed mutual + response cadence (global + per-person baseline), rather than fixed windows. + """ + + class StabilityState(models.TextChoices): + CALIBRATING = "calibrating", "Calibrating" + STABLE = "stable", "Stable" + WATCH = "watch", "Watch" + FRAGILE = "fragile", "Fragile" + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this workspace conversation.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="workspace_conversations", + help_text="Owner of this conversation workspace.", + ) + title = models.CharField( + max_length=255, + blank=True, + default="", + help_text="Human-friendly label shown in the workspace sidebar.", + ) + platform_type = models.CharField( + max_length=255, + choices=SERVICE_CHOICES, + default="signal", + help_text="Primary transport for this conversation (reuses SERVICE_CHOICES).", + ) + platform_thread_id = models.CharField( + max_length=255, + blank=True, + default="", + help_text="Platform-native thread/group identifier when available.", + ) + participants = models.ManyToManyField( + Person, + related_name="workspace_conversations", + blank=True, + help_text="Resolved people participating in this conversation.", + ) + participant_feedback = models.JSONField( + default=dict, + blank=True, + help_text=( + "Per-person interaction feedback map keyed by person UUID. " + "Example: {'<person_uuid>': {'state': 'withdrawing', 'note': 'short replies'}}." + ), + ) + last_event_ts = models.BigIntegerField( + null=True, + blank=True, + help_text="Latest message timestamp (unix ms) currently known.", + ) + last_ai_run_at = models.DateTimeField( + null=True, + blank=True, + help_text="Last time any AIRequest finished for this conversation.", + ) + stability_state = models.CharField( + max_length=32, + choices=StabilityState.choices, + default=StabilityState.CALIBRATING, + help_text="UI label for relationship stability, baseline-aware.", + ) + stability_score = models.FloatField( + null=True, + blank=True, + help_text="Relationship stability score (0-100). Null while calibrating.", + ) + stability_confidence = models.FloatField( + default=0.0, + help_text="Confidence in stability_score (0.0-1.0).", + ) + stability_sample_messages = models.PositiveIntegerField( + default=0, + help_text="How many messages were used to compute stability.", + ) + stability_sample_days = models.PositiveIntegerField( + default=0, + help_text="How many calendar days of data were used for stability.", + ) + stability_last_computed_at = models.DateTimeField( + null=True, + blank=True, + help_text="Timestamp of the latest stability computation.", + ) + commitment_outbound_score = models.FloatField( + null=True, + blank=True, + help_text=( + "Estimated commitment score for user -> counterpart direction (0-100). " + "Null while calibrating." + ), + ) + commitment_inbound_score = models.FloatField( + null=True, + blank=True, + help_text=( + "Estimated commitment score for counterpart -> user direction (0-100). " + "Null while calibrating." + ), + ) + commitment_confidence = models.FloatField( + default=0.0, + help_text="Confidence in commitment scores (0.0-1.0).", + ) + commitment_last_computed_at = models.DateTimeField( + null=True, + blank=True, + help_text="Timestamp of the latest commitment computation.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + def __str__(self): + return self.title or f"{self.platform_type}:{self.id}" + + +class MessageEvent(models.Model): + """ + Normalized message event used by workspace timeline and AI selection windows. + """ + + SOURCE_SYSTEM_CHOICES = ( + ("signal", "Signal"), + ("xmpp", "XMPP"), + ("workspace", "Workspace"), + ("ai", "AI"), + ) + + DIRECTION_CHOICES = ( + ("in", "Inbound"), + ("out", "Outbound"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this message event.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="workspace_message_events", + default=get_default_workspace_user_pk, + help_text="Owner of this message event row (required for restricted CRUD filtering).", + ) + conversation = models.ForeignKey( + WorkspaceConversation, + on_delete=models.CASCADE, + related_name="events", + help_text=( + "AI workspace conversation this message belongs to. " + "This is not the transport-native thread object." + ), + ) + source_system = models.CharField( + max_length=32, + choices=SOURCE_SYSTEM_CHOICES, + default="signal", + help_text="System that produced this event record.", + ) + ts = models.BigIntegerField( + db_index=True, + help_text="Event timestamp (unix ms) as reported by source_system.", + ) + direction = models.CharField( + max_length=8, + choices=DIRECTION_CHOICES, + help_text=( + "Direction relative to workspace owner: " + "'in' from counterpart(s), 'out' from user/bot side." + ), + ) + sender_uuid = models.CharField( + max_length=255, + blank=True, + default="", + db_index=True, + help_text="Source sender UUID/identifier for correlation.", + ) + text = models.TextField( + blank=True, + default="", + help_text="Normalized message text body.", + ) + attachments = models.JSONField( + default=list, + blank=True, + help_text="Attachment metadata list associated with this message.", + ) + raw_payload_ref = models.JSONField( + default=dict, + blank=True, + help_text="Raw source payload or reference pointer for audit/debug.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + class Meta: + ordering = ["ts"] + + +class AIRequest(models.Model): + """ + User-initiated AI run against a selected message window. + + TODO: + Resolve message window dynamically based on available model context budget + and content size (chunk-aware), not fixed hard caps. + """ + + STATUS_CHOICES = ( + ("queued", "Queued"), + ("running", "Running"), + ("done", "Done"), + ("failed", "Failed"), + ) + + OPERATION_CHOICES = ( + ("summarise", "Summarise"), + ("draft_reply", "Draft Reply"), + ("critique", "Critique"), + ("repair", "Repair"), + ("extract_patterns", "Extract Patterns"), + ("memory_propose", "Memory Propose"), + ("state_detect", "State Detect"), + ("rewrite_style", "Rewrite Style"), + ("send_readiness", "Send Readiness"), + ("timeline_brief", "Timeline Brief"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this AI request.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="workspace_ai_requests", + help_text="User who initiated this request.", + ) + conversation = models.ForeignKey( + WorkspaceConversation, + on_delete=models.CASCADE, + related_name="ai_requests", + help_text="Conversation analyzed by this request.", + ) + window_spec = models.JSONField( + default=dict, + help_text=( + "Selection spec (last_n/since_ts/between_ts/include_attachments/etc). " + "Should be dynamically resolved by available context/token budget." + ), + ) + message_ids = models.JSONField( + default=list, + blank=True, + help_text="Resolved ordered MessageEvent IDs included in this run.", + ) + user_notes = models.TextField( + blank=True, + default="", + help_text="Optional user intent/context notes injected into the prompt.", + ) + operation = models.CharField( + max_length=32, + choices=OPERATION_CHOICES, + help_text="Requested AI operation type.", + ) + policy_snapshot = models.JSONField( + default=dict, + blank=True, + help_text=( + "Effective manipulation/policy values captured at request time, " + "so results remain auditable even if policies change later." + ), + ) + status = models.CharField( + max_length=16, + choices=STATUS_CHOICES, + default="queued", + help_text="Worker lifecycle state for this request.", + ) + error = models.TextField( + blank=True, + default="", + help_text="Error details when status='failed'.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Request creation timestamp.", + ) + started_at = models.DateTimeField( + null=True, + blank=True, + help_text="Worker start timestamp.", + ) + finished_at = models.DateTimeField( + null=True, + blank=True, + help_text="Worker completion timestamp.", + ) + + +class AIResult(models.Model): + """ + Persisted output payload for a completed AIRequest. + """ + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this AI result payload.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="workspace_ai_results", + default=get_default_workspace_user_pk, + help_text="Owner of this AI result row (required for restricted CRUD filtering).", + ) + ai_request = models.OneToOneField( + AIRequest, + on_delete=models.CASCADE, + related_name="result", + help_text="Owning AI request for this result.", + ) + working_summary = models.TextField( + blank=True, + default="", + help_text="Conversation working summary generated for this run.", + ) + draft_replies = models.JSONField( + default=list, + blank=True, + help_text="Draft reply candidates, typically with tone and rationale.", + ) + interaction_signals = models.JSONField( + default=list, + blank=True, + help_text=( + "Structured positive/neutral/risk signals inferred for this run. " + "Example item: {'label':'repair_attempt','valence':'positive','message_event_ids':[...]}." + ), + ) + memory_proposals = models.JSONField( + default=list, + blank=True, + help_text="Proposed memory entries, typically requiring user approval.", + ) + citations = models.JSONField( + default=list, + blank=True, + help_text="Referenced MessageEvent IDs supporting generated claims.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Result creation timestamp.", + ) + + +class MemoryItem(models.Model): + """ + Durable/semi-durable memory used to provide continuity across AI runs. + """ + + MEMORY_KIND_CHOICES = ( + ("fact", "Durable Fact/Preference"), + ("state", "Relationship State"), + ("summary", "Conversation Working Summary"), + ) + + STATUS_CHOICES = ( + ("proposed", "Proposed"), + ("active", "Active"), + ("deprecated", "Deprecated"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this memory item.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="workspace_memory_items", + help_text="Owner of the memory item.", + ) + conversation = models.ForeignKey( + WorkspaceConversation, + on_delete=models.CASCADE, + related_name="memory_items", + help_text="Conversation scope this memory item belongs to.", + ) + memory_kind = models.CharField( + max_length=16, + choices=MEMORY_KIND_CHOICES, + help_text="Memory kind: fact/state/summary.", + ) + status = models.CharField( + max_length=16, + choices=STATUS_CHOICES, + default="proposed", + help_text="Lifecycle state, especially for approval-gated memories.", + ) + content = models.JSONField( + default=dict, + blank=True, + help_text="Structured memory payload (schema can evolve by type).", + ) + source_request = models.ForeignKey( + AIRequest, + on_delete=models.SET_NULL, + null=True, + blank=True, + help_text="AIRequest that originated this memory, if any.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Last update timestamp.", + ) + + +class AIResultSignal(models.Model): + """ + Message-linked evidence signal produced by an AIResult. + + This lets the UI point to concrete messages (good or bad) rather than + generic flags. + """ + + VALENCE_CHOICES = ( + ("positive", "Positive"), + ("neutral", "Neutral"), + ("risk", "Risk"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this result signal.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="workspace_ai_result_signals", + help_text="Owner of this signal row (required for restricted CRUD filtering).", + ) + ai_result = models.ForeignKey( + AIResult, + on_delete=models.CASCADE, + related_name="signals", + help_text="AI result that produced this signal.", + ) + message_event = models.ForeignKey( + MessageEvent, + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="ai_signals", + help_text="Optional specific message event referenced by this signal.", + ) + label = models.CharField( + max_length=128, + help_text="Short signal label, e.g. 'withdrawing', 'repair_attempt'.", + ) + valence = models.CharField( + max_length=16, + choices=VALENCE_CHOICES, + default="neutral", + help_text="Signal polarity: positive, neutral, or risk.", + ) + score = models.FloatField( + null=True, + blank=True, + help_text="Optional model confidence/strength (0.0-1.0).", + ) + rationale = models.TextField( + blank=True, + default="", + help_text="Human-readable explanation for this signal.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + +class PatternMitigationPlan(models.Model): + """ + Stores a mitigation plan generated from extracted interaction patterns. + + The plan is the parent container for rules, games, mitigation chat, + and artifact exports. + """ + + STATUS_CHOICES = ( + ("draft", "Draft"), + ("active", "Active"), + ("archived", "Archived"), + ) + + CREATION_MODE_CHOICES = ( + ("auto", "Auto"), + ("guided", "Guided"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this mitigation plan.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_mitigation_plans", + help_text="Owner of this plan.", + ) + conversation = models.ForeignKey( + WorkspaceConversation, + on_delete=models.CASCADE, + related_name="mitigation_plans", + help_text="Workspace conversation this plan belongs to.", + ) + source_ai_result = models.ForeignKey( + AIResult, + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="mitigation_plans", + help_text="AI result that initiated this plan, if any.", + ) + title = models.CharField( + max_length=255, + blank=True, + default="", + help_text="Display title for this plan.", + ) + objective = models.TextField( + blank=True, + default="", + help_text="High-level objective this plan is meant to achieve.", + ) + fundamental_items = models.JSONField( + default=list, + blank=True, + help_text="Foundational agreed items/principles for this plan.", + ) + creation_mode = models.CharField( + max_length=16, + choices=CREATION_MODE_CHOICES, + default="auto", + help_text="Whether plan artifacts were generated automatically or user-guided.", + ) + status = models.CharField( + max_length=16, + choices=STATUS_CHOICES, + default="draft", + help_text="Lifecycle status of the plan.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Last update timestamp.", + ) + + +class PatternMitigationRule(models.Model): + """ + Rule artifact attached to a mitigation plan. + """ + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this rule.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_mitigation_rules", + help_text="Owner of this rule.", + ) + plan = models.ForeignKey( + PatternMitigationPlan, + on_delete=models.CASCADE, + related_name="rules", + help_text="Parent mitigation plan.", + ) + title = models.CharField( + max_length=255, + help_text="Rule title.", + ) + content = models.TextField( + blank=True, + default="", + help_text="Rule definition/details.", + ) + enabled = models.BooleanField( + default=True, + help_text="Whether this rule is currently enabled.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + @property + def display_id(self): + return _attribute_display_id( + "rule", + self.plan_id, + self.title, + self.content, + self.enabled, + ) + + +class PatternMitigationGame(models.Model): + """ + Game artifact attached to a mitigation plan. + """ + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this game.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_mitigation_games", + help_text="Owner of this game.", + ) + plan = models.ForeignKey( + PatternMitigationPlan, + on_delete=models.CASCADE, + related_name="games", + help_text="Parent mitigation plan.", + ) + title = models.CharField( + max_length=255, + help_text="Game title.", + ) + instructions = models.TextField( + blank=True, + default="", + help_text="Gameplay/instruction text.", + ) + enabled = models.BooleanField( + default=True, + help_text="Whether this game is currently enabled.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + @property + def display_id(self): + return _attribute_display_id( + "game", + self.plan_id, + self.title, + self.instructions, + self.enabled, + ) + + +class PatternMitigationCorrection(models.Model): + """ + Shared clarification artifact used to prevent circular misunderstandings. + """ + + PERSPECTIVE_CHOICES = ( + ("third_person", "Third Person"), + ("second_person", "Second Person"), + ("first_person", "First Person"), + ) + SHARE_TARGET_CHOICES = ( + ("self", "Self"), + ("other", "Other Party"), + ("both", "Both Parties"), + ) + LANGUAGE_STYLE_CHOICES = ( + ("same", "Same Language"), + ("adapted", "Adapted Language"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this correction item.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_mitigation_corrections", + help_text="Owner of this correction item.", + ) + plan = models.ForeignKey( + PatternMitigationPlan, + on_delete=models.CASCADE, + related_name="corrections", + help_text="Parent mitigation plan.", + ) + title = models.CharField( + max_length=255, + help_text="Correction title. Example: 'Assumption vs intent mismatch'.", + ) + clarification = models.TextField( + blank=True, + default="", + help_text=( + "Joint clarification text intended to reduce interpretation drift. " + "Example: 'When you say \"you ignore me\", I hear fear of disconnection, not blame.'" + ), + ) + source_phrase = models.TextField( + blank=True, + default="", + help_text=( + "Situation/message fragment this correction responds to. " + "Example: 'she says: \"you never listen\"' or 'you say: \"you are dismissing me\"'." + ), + ) + perspective = models.CharField( + max_length=32, + choices=PERSPECTIVE_CHOICES, + default="third_person", + help_text=( + "Narrative perspective used when framing this correction. " + "Examples: third person ('she says'), second person ('you say'), first person ('I say')." + ), + ) + share_target = models.CharField( + max_length=16, + choices=SHARE_TARGET_CHOICES, + default="both", + help_text="Who this insight is intended to be shared with. Example: self, other, or both.", + ) + language_style = models.CharField( + max_length=16, + choices=LANGUAGE_STYLE_CHOICES, + default="adapted", + help_text=( + "Whether to keep wording identical or adapt it per recipient. " + "Example: same text for both parties, or softened/adapted wording for recipient." + ), + ) + enabled = models.BooleanField( + default=True, + help_text="Whether this correction item is currently enabled.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + @property + def display_id(self): + return _attribute_display_id( + "correction", + self.plan_id, + self.title, + self.source_phrase, + self.clarification, + self.perspective, + self.share_target, + self.language_style, + self.enabled, + ) + + +class PatternMitigationMessage(models.Model): + """ + Conversation log between user and AI within a mitigation plan. + """ + + ROLE_CHOICES = ( + ("user", "User"), + ("assistant", "Assistant"), + ("system", "System"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this mitigation message.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_mitigation_messages", + help_text="Owner of this message.", + ) + plan = models.ForeignKey( + PatternMitigationPlan, + on_delete=models.CASCADE, + related_name="messages", + help_text="Parent mitigation plan.", + ) + role = models.CharField( + max_length=16, + choices=ROLE_CHOICES, + help_text="Message speaker role.", + ) + text = models.TextField( + help_text="Message content.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + class Meta: + ordering = ["created_at"] + + +class PatternMitigationAutoSettings(models.Model): + """ + Automation controls for mitigation analysis in a workspace conversation. + + These settings let the user enable periodic/triggered checks for pattern + violations, optional correction creation, and optional notifications. + """ + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this automation settings row.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_mitigation_auto_settings", + help_text="Owner of this automation settings row.", + ) + conversation = models.OneToOneField( + WorkspaceConversation, + on_delete=models.CASCADE, + related_name="mitigation_auto_settings", + help_text="Conversation scope this automation config applies to.", + ) + enabled = models.BooleanField( + default=False, + help_text="Master toggle for mitigation automation in this conversation.", + ) + auto_pattern_recognition = models.BooleanField( + default=True, + help_text="Run pattern/violation recognition automatically when triggered.", + ) + auto_create_mitigation = models.BooleanField( + default=False, + help_text="Create a baseline mitigation plan automatically when missing.", + ) + auto_create_corrections = models.BooleanField( + default=False, + help_text="Create correction items automatically from detected violations.", + ) + auto_notify_enabled = models.BooleanField( + default=False, + help_text="Send NTFY notifications when new violations are detected.", + ) + ntfy_topic_override = models.CharField( + max_length=255, + null=True, + blank=True, + help_text="Optional NTFY topic override for automation notifications.", + ) + ntfy_url_override = models.CharField( + max_length=255, + null=True, + blank=True, + help_text="Optional NTFY server URL override for automation notifications.", + ) + sample_message_window = models.PositiveIntegerField( + default=40, + help_text="How many recent messages to include in each automation check.", + ) + check_cooldown_seconds = models.PositiveIntegerField( + default=300, + help_text="Minimum seconds between automatic checks for this conversation.", + ) + last_checked_event_ts = models.BigIntegerField( + null=True, + blank=True, + help_text="Latest source message timestamp included in automation checks.", + ) + last_run_at = models.DateTimeField( + null=True, + blank=True, + help_text="Timestamp when automation last ran.", + ) + last_result_summary = models.TextField( + blank=True, + default="", + help_text="Human-readable summary from the last automation run.", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + updated_at = models.DateTimeField( + auto_now=True, + help_text="Last update timestamp.", + ) + + def __str__(self): + return f"Auto settings for {self.conversation_id}" + + +class PatternArtifactExport(models.Model): + """ + Export protocol record for rules/games/rulebooks generated from a plan. + """ + + ARTIFACT_TYPE_CHOICES = ( + ("rulebook", "Rulebook"), + ("rules", "Rules"), + ("games", "Games"), + ("corrections", "Corrections"), + ) + + FORMAT_CHOICES = ( + ("markdown", "Markdown"), + ("json", "JSON"), + ("text", "Text"), + ) + + id = models.UUIDField( + primary_key=True, + default=uuid.uuid4, + editable=False, + help_text="Stable identifier for this export artifact.", + ) + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + related_name="pattern_artifact_exports", + help_text="Owner of this export artifact.", + ) + plan = models.ForeignKey( + PatternMitigationPlan, + on_delete=models.CASCADE, + related_name="exports", + help_text="Source mitigation plan.", + ) + artifact_type = models.CharField( + max_length=32, + choices=ARTIFACT_TYPE_CHOICES, + help_text="Artifact category being exported.", + ) + export_format = models.CharField( + max_length=16, + choices=FORMAT_CHOICES, + default="markdown", + help_text="Serialized output format.", + ) + protocol_version = models.CharField( + max_length=32, + default="artifact-v1", + help_text="Artifact export protocol version.", + ) + payload = models.TextField( + blank=True, + default="", + help_text="Serialized artifact body/content.", + ) + meta = models.JSONField( + default=dict, + blank=True, + help_text="Additional export metadata (counts, hints, source IDs).", + ) + created_at = models.DateTimeField( + auto_now_add=True, + help_text="Row creation timestamp.", + ) + + # class Perms(models.Model): # class Meta: # permissions = ( diff --git a/core/modules/router.py b/core/modules/router.py index a386270..4b266d3 100644 --- a/core/modules/router.py +++ b/core/modules/router.py @@ -1,7 +1,7 @@ -from core.util import logs - from core.clients.signal import SignalClient from core.clients.xmpp import XMPPClient +from core.util import logs + class UnifiedRouter(object): """ @@ -22,10 +22,9 @@ class UnifiedRouter(object): self.xmpp.start() self.signal.start() - def run(self): try: - #self.xmpp.client.client.process() + # self.xmpp.client.client.process() # self.xmpp.start() print("IN RUN BEFORE START") self._start() diff --git a/core/templates/base.html b/core/templates/base.html index 92d5489..4cb1137 100644 --- a/core/templates/base.html +++ b/core/templates/base.html @@ -234,7 +234,6 @@ <a class="navbar-item" href="{% url 'home' %}"> Home </a> - {% if user.is_authenticated %} <div class="navbar-item has-dropdown is-hoverable"> @@ -284,9 +283,6 @@ <a class="navbar-item" href="{% url 'sessions' type='page' %}"> Sessions </a> - <a class="navbar-item" href="{% url 'queues' type='page' %}"> - Queued Messages - </a> </div> </div> @@ -311,6 +307,14 @@ </div> <div class="navbar-end"> + {% if user.is_authenticated %} + <a class="navbar-item" href="{% url 'ai_workspace' %}"> + AI + </a> + <a class="navbar-item" href="{% url 'queues' type='page' %}"> + Queue + </a> + {% endif %} <div class="navbar-item"> <div class="buttons"> {% if not user.is_authenticated %} diff --git a/core/templates/index.html b/core/templates/index.html index 3ea846e..28dae25 100644 --- a/core/templates/index.html +++ b/core/templates/index.html @@ -9,7 +9,7 @@ <script> var grid = GridStack.init({ cellHeight: 20, - cellWidth: 50, + cellWidth: 45, cellHeightUnit: 'px', auto: true, float: true, @@ -78,9 +78,9 @@ // } grid.compact(); }); - </script> - <div> - {% block load_widgets %} + </script> + <div> + {% block load_widgets %} <!-- <div hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' hx-get="#" @@ -88,7 +88,7 @@ hx-trigger="load" hx-swap="afterend" style="display: none;"></div> --> - {% endblock %} - </div> + {% endblock %} + </div> {% endblock %} diff --git a/core/templates/pages/ai-workspace.html b/core/templates/pages/ai-workspace.html new file mode 100644 index 0000000..1768a30 --- /dev/null +++ b/core/templates/pages/ai-workspace.html @@ -0,0 +1,11 @@ +{% extends "index.html" %} + +{% block load_widgets %} + <div + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-get="{% url 'ai_workspace_contacts' type='widget' %}" + hx-target="#widgets-here" + hx-trigger="load" + hx-swap="afterend" + style="display: none;"></div> +{% endblock %} diff --git a/core/templates/pages/signal.html b/core/templates/pages/signal.html index a346939..afe0a02 100644 --- a/core/templates/pages/signal.html +++ b/core/templates/pages/signal.html @@ -1,7 +1,7 @@ {% extends "index.html" %} {% block load_widgets %} - <div + <div hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' hx-get="{% url 'signal_accounts' type='widget' %}" hx-target="#widgets-here" diff --git a/core/templates/partials/ai-workspace-ai-result.html b/core/templates/partials/ai-workspace-ai-result.html new file mode 100644 index 0000000..61af2ea --- /dev/null +++ b/core/templates/partials/ai-workspace-ai-result.html @@ -0,0 +1,213 @@ +<div style="margin-bottom: 0.5rem;"> + <div class="tags has-addons" style="display: inline-flex; margin-bottom: 0.4rem;"> + <span class="tag is-dark"> + <i class="fa-solid fa-wand-magic-sparkles" aria-hidden="true"></i> + </span> + <span class="tag is-white" style="border: 1px solid rgba(0, 0, 0, 0.2);"> + AI {{ operation_label }} + </span> + </div> + + {% if error %} + <div class="notification is-danger is-light" style="padding: 0.6rem;"> + {{ result_text }} + </div> + {% else %} + {% if operation == "artifacts" %} + {% if latest_plan %} + {% include "partials/ai-workspace-mitigation-panel.html" with person=person plan=latest_plan rules=latest_plan_rules games=latest_plan_games corrections=latest_plan_corrections fundamentals_text=latest_plan.fundamental_items|join:"\n" mitigation_messages=latest_plan_messages latest_export=latest_plan_export notice_message=mitigation_notice_message notice_level=mitigation_notice_level auto_settings=latest_auto_settings active_tab="plan_board" %} + {% else %} + <div id="mitigation-shell-{{ person.id }}" class="box" style="padding: 0.65rem; margin-top: 0.2rem; border: 1px dashed rgba(0, 0, 0, 0.25); box-shadow: none;"> + <p class="is-size-7 has-text-grey">No mitigation plan yet. Use the Patterns tab to generate one.</p> + </div> + {% endif %} + {% elif operation == "draft_reply" and draft_replies %} + <div id="draft-host-{{ person.id }}-{{ operation }}" data-selected="0"> + <div class="columns is-multiline" style="margin: 0 -0.35rem;"> + {% for option in draft_replies %} + <div class="column is-12-mobile is-4-tablet" style="padding: 0.35rem;"> + <article + class="draft-option-card {% if forloop.first %}is-selected{% endif %}" + data-index="{{ forloop.counter0 }}" + onclick="giaWorkspaceUseDraft('{{ person.id }}', '{{ operation }}', {{ forloop.counter0 }}); return false;" + style="height: 100%; padding: 0.6rem; border-radius: 9px; border: 1px solid rgba(0, 0, 0, 0.16); background: #fff; cursor: pointer; transition: border-color 120ms ease, box-shadow 120ms ease, background-color 120ms ease;"> + <p class="is-size-7 has-text-weight-semibold is-flex is-align-items-center" style="margin-bottom: 0.35rem; gap: 0.35rem;"> + {% with tone=option.label|default:""|lower %} + {% if tone == "soft" %} + <span class="icon is-small has-text-success"><i class="fa-solid fa-leaf"></i></span> + {% elif tone == "neutral" %} + <span class="icon is-small has-text-info"><i class="fa-solid fa-scale-balanced"></i></span> + {% elif tone == "firm" %} + <span class="icon is-small has-text-danger"><i class="fa-solid fa-shield-heart"></i></span> + {% else %} + <span class="icon is-small has-text-grey"><i class="fa-solid fa-comment-dots"></i></span> + {% endif %} + {% endwith %} + <span>{{ option.label|default:"Option" }}</span> + </p> + <p class="draft-text" style="white-space: pre-wrap; margin-bottom: 0;">{{ option.text }}</p> + </article> + </div> + {% endfor %} + </div> + </div> + + <div id="draft-send-shell-{{ person.id }}-{{ operation }}" style="margin-top: 0.5rem; padding: 0.6rem; border: 1px solid rgba(0, 0, 0, 0.16); border-radius: 8px;"> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_send' type='widget' person_id=person.id %}" + hx-target="#draft-send-status-{{ person.id }}-{{ operation }}" + hx-swap="innerHTML"> + <input type="hidden" id="draft-send-input-{{ person.id }}-{{ operation }}" name="draft_text" value=""> + <input type="hidden" id="draft-send-force-{{ person.id }}-{{ operation }}" name="force_send" value="0"> + <div class="field"> + <label class="label is-small">Draft Preview</label> + <div class="control"> + <textarea id="draft-send-preview-{{ person.id }}-{{ operation }}" class="textarea is-small" rows="4" readonly></textarea> + </div> + </div> + <div class="field" style="margin-bottom: 0.4rem;"> + <div class="control buttons are-small" style="margin: 0; gap: 0.35rem;"> + <button id="draft-send-btn-{{ person.id }}-{{ operation }}" class="button is-small is-link" {% if not send_state.can_send %}disabled{% endif %}> + Send Draft + </button> + <button + type="button" + class="button is-small is-info is-light" + onclick="giaWorkspaceQueueSelectedDraft('{{ person.id }}'); return false;"> + <span class="icon is-small"><i class="fa-solid fa-inbox-in"></i></span> + <span>Add To Queue</span> + </button> + </div> + </div> + <div id="draft-send-status-{{ person.id }}-{{ operation }}"></div> + </form> + </div> + {% else %} + {% if operation == "extract_patterns" %} + <div class="columns is-multiline" style="margin: 0 -0.35rem;"> + {% for section in result_sections %} + <div class="column is-12-mobile is-6-tablet" style="padding: 0.35rem;"> + <article class="box ai-section-box" style="height: 100%; padding: 0.65rem; margin-bottom: 0; border: 1px solid rgba(0, 0, 0, 0.14); box-shadow: none;"> + {% if section.level <= 2 %} + <h3 class="title is-6" style="margin-bottom: 0.45rem;">{{ section.title }}</h3> + {% elif section.level == 3 %} + <h4 class="title is-6" style="margin-bottom: 0.45rem;">{{ section.title }}</h4> + {% else %} + <h5 class="subtitle is-7 has-text-weight-semibold" style="margin-bottom: 0.45rem;">{{ section.title }}</h5> + {% endif %} + + {% for block in section.blocks %} + {% if block.type == "ul" %} + <ul style="margin: 0 0 0.45rem 1.15rem;"> + {% for item in block.items %} + <li style="margin-bottom: 0.25rem;">{{ item }}</li> + {% endfor %} + </ul> + {% else %} + {% for item in block.items %} + <p style="margin-bottom: 0.45rem; white-space: pre-wrap;">{{ item }}</p> + {% endfor %} + {% endif %} + {% endfor %} + </article> + </div> + {% endfor %} + </div> + {% else %} + <div class="ai-section-stack"> + {% for section in result_sections %} + <article class="box ai-section-box" style="padding: 0.65rem; margin-bottom: 0.5rem; border: 1px solid rgba(0, 0, 0, 0.14); box-shadow: none;"> + {% if section.level <= 2 %} + <h3 class="title is-6" style="margin-bottom: 0.45rem;">{{ section.title }}</h3> + {% elif section.level == 3 %} + <h4 class="title is-6" style="margin-bottom: 0.45rem;">{{ section.title }}</h4> + {% else %} + <h5 class="subtitle is-7 has-text-weight-semibold" style="margin-bottom: 0.45rem;">{{ section.title }}</h5> + {% endif %} + + {% for block in section.blocks %} + {% if block.type == "ul" %} + <ul style="margin: 0 0 0.45rem 1.15rem;"> + {% for item in block.items %} + <li style="margin-bottom: 0.25rem;">{{ item }}</li> + {% endfor %} + </ul> + {% else %} + {% for item in block.items %} + <p style="margin-bottom: 0.45rem; white-space: pre-wrap;">{{ item }}</p> + {% endfor %} + {% endif %} + {% endfor %} + </article> + {% endfor %} + </div> + {% endif %} + {% endif %} + + {% if operation == "extract_patterns" %} + <article class="box" style="padding: 0.7rem; margin-top: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.14); box-shadow: none;"> + <p class="is-size-7 has-text-weight-semibold" style="margin-bottom: 0.4rem;">Create Framework / Rules / Games</p> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_create' type='widget' person_id=person.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML" + hx-indicator="#mitigation-create-loading-{{ person.id }}" + style="margin-bottom: 0;"> + <input type="hidden" name="ai_result_id" value="{{ ai_result_id|default:'' }}"> + <textarea name="source_text" style="display: none;">{{ result_text }}</textarea> + + <div class="columns is-multiline" style="margin: 0 -0.3rem;"> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Output</label> + <div class="select is-fullwidth is-small"> + <select name="output_profile" required> + <option value="" selected disabled>Choose one</option> + <option value="framework">Framework (balanced)</option> + <option value="rule">Rule (minimal + strict)</option> + <option value="game">Game (engaging)</option> + </select> + </div> + </div> + + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Context (single freeform input)</label> + <textarea class="textarea is-small" rows="2" name="user_context" placeholder="Optional context or constraints"></textarea> + </div> + </div> + + <button class="button is-small is-primary is-light"> + <span class="icon is-small"><i class="fa-solid fa-chess-board"></i></span> + <span>Create Mitigation Plan</span> + </button> + <span id="mitigation-create-loading-{{ person.id }}" class="tag is-info is-light htmx-indicator" style="margin-left: 0.45rem;"> + <span class="icon is-small"><i class="fa-solid fa-spinner fa-spin"></i></span> + <span>Building mitigation plan...</span> + </span> + </form> + </article> + + <div id="mitigation-shell-{{ person.id }}" class="box" style="padding: 0.65rem; margin-top: 0.65rem; border: 1px dashed rgba(0, 0, 0, 0.25); box-shadow: none;"> + <p class="is-size-7 has-text-grey"> + Plan editing is consolidated in the <strong>Plan</strong> tab. + </p> + </div> + {% endif %} + {% endif %} +</div> + +<style> + .draft-option-card.is-selected { + border-color: rgba(54, 54, 54, 0.85) !important; + border-width: 2px !important; + box-shadow: inset 0 0 0 1px rgba(54, 54, 54, 0.18); + background-color: rgba(54, 54, 54, 0.06) !important; + } + .htmx-indicator { + display: none; + } + .htmx-request.htmx-indicator { + display: inline-flex; + } +</style> diff --git a/core/templates/partials/ai-workspace-mitigation-panel.html b/core/templates/partials/ai-workspace-mitigation-panel.html new file mode 100644 index 0000000..5560640 --- /dev/null +++ b/core/templates/partials/ai-workspace-mitigation-panel.html @@ -0,0 +1,660 @@ +<div id="mitigation-shell-{{ person.id }}" style="margin-top: 0.7rem;"> + <div class="is-flex is-justify-content-space-between is-align-items-start" style="gap: 0.5rem; margin-bottom: 0.5rem;"> + <div> + <p class="is-size-7 has-text-weight-semibold">Pattern Mitigation</p> + <h4 class="title is-6" style="margin-bottom: 0.2rem;">{{ plan.title|default:"Mitigation Plan" }}</h4> + {% if plan.objective %} + <p class="is-size-7">{{ plan.objective }}</p> + {% endif %} + </div> + <span class="tag is-light">{{ plan.creation_mode|title }}</span> + </div> + + {% if notice_message %} + <div class="notification is-{{ notice_level|default:'info' }} is-light" style="padding: 0.5rem 0.65rem; margin-bottom: 0.55rem;"> + {{ notice_message }} + </div> + {% endif %} + + <div class="tabs is-small is-toggle is-toggle-rounded" style="margin-bottom: 0.55rem;"> + <ul> + <li id="mitigation-tab-btn-{{ person.id }}-plan_board" class="is-active"> + <a onclick="giaMitigationShowTab('{{ person.id }}', 'plan_board'); return false;">Rules & Games</a> + </li> + <li id="mitigation-tab-btn-{{ person.id }}-corrections"> + <a onclick="giaMitigationShowTab('{{ person.id }}', 'corrections'); return false;">Corrections</a> + </li> + <li id="mitigation-tab-btn-{{ person.id }}-engage"> + <a onclick="giaMitigationShowTab('{{ person.id }}', 'engage'); return false;">Engage</a> + </li> + <li id="mitigation-tab-btn-{{ person.id }}-fundamentals"> + <a onclick="giaMitigationShowTab('{{ person.id }}', 'fundamentals'); return false;">Fundamentals</a> + </li> + <li id="mitigation-tab-btn-{{ person.id }}-auto"> + <a onclick="giaMitigationShowTab('{{ person.id }}', 'auto'); return false;">Auto</a> + </li> + <li id="mitigation-tab-btn-{{ person.id }}-ask_ai"> + <a onclick="giaMitigationShowTab('{{ person.id }}', 'ask_ai'); return false;">Ask AI</a> + </li> + </ul> + </div> + + <div id="mitigation-tab-{{ person.id }}-plan_board" class="mitigation-tab-pane"> + <div class="is-flex is-justify-content-space-between is-align-items-center" style="gap: 0.5rem; margin-bottom: 0.45rem; flex-wrap: wrap;"> + <p class="is-size-7">Two lanes by type: rules on the left, games on the right.</p> + <div class="buttons are-small" style="margin: 0;"> + <button + class="button is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_create' type='widget' person_id=person.id plan_id=plan.id kind='rule' %}" + hx-vals='{"active_tab":"plan_board"}' + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <span class="icon is-small"><i class="fa-solid fa-plus"></i></span> + <span>Rule</span> + </button> + <button + class="button is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_create' type='widget' person_id=person.id plan_id=plan.id kind='game' %}" + hx-vals='{"active_tab":"plan_board"}' + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <span class="icon is-small"><i class="fa-solid fa-plus"></i></span> + <span>Game</span> + </button> + </div> + </div> + + <div class="columns is-multiline" style="margin: 0 -0.35rem;"> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.35rem;"> + <article class="box" style="min-height: 14rem; border: 1px solid rgba(0, 0, 0, 0.15); box-shadow: none;"> + <div class="is-flex is-justify-content-space-between is-align-items-center" style="gap: 0.4rem; margin-bottom: 0.45rem;"> + <p class="is-size-7 has-text-weight-bold" style="letter-spacing: 0.04em; margin: 0;">RULES</p> + <button + type="button" + class="button is-small is-danger is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_delete_all' type='widget' person_id=person.id plan_id=plan.id kind='rule' %}" + hx-vals='{"active_tab":"plan_board"}' + hx-confirm="Delete all rules?" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML">Delete All</button> + </div> + {% for rule in rules %} + <article class="box" style="padding: 0.55rem; margin-bottom: 0.45rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none;"> + <span class="tag is-light is-small" style="margin-bottom: 0.3rem;">Rule</span> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_save' type='widget' person_id=person.id plan_id=plan.id kind='rule' artifact_id=rule.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <div class="field" style="margin-bottom: 0.35rem;"> + <input class="input is-small" type="text" name="title" value="{{ rule.title }}" data-editable="1" readonly> + </div> + <div class="field" style="margin-bottom: 0.35rem;"> + <textarea class="textarea is-small" rows="3" name="body" data-editable="1" readonly>{{ rule.content }}</textarea> + </div> + <input type="hidden" name="enabled" value="1"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'plan_board' }}"> + <div class="buttons are-small" style="margin: 0;"> + <button type="button" class="button is-link is-light" data-edit-state="view" onclick="giaMitigationToggleEdit(this); return false;">Edit</button> + <button + type="button" + class="button is-danger is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_delete' type='widget' person_id=person.id plan_id=plan.id kind='rule' artifact_id=rule.id %}" + hx-vals='{"active_tab":"plan_board"}' + hx-confirm="Delete this rule?" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML">Delete</button> + </div> + </form> + </article> + {% empty %} + <article class="box" style="padding: 0.65rem; border: 1px dashed rgba(0, 0, 0, 0.2); box-shadow: none;"> + <p class="is-size-7 has-text-grey">No rules yet.</p> + </article> + {% endfor %} + </article> + </div> + + <div class="column is-12-mobile is-6-tablet" style="padding: 0.35rem;"> + <article class="box" style="min-height: 14rem; border: 1px solid rgba(0, 0, 0, 0.15); box-shadow: none;"> + <div class="is-flex is-justify-content-space-between is-align-items-center" style="gap: 0.4rem; margin-bottom: 0.45rem;"> + <p class="is-size-7 has-text-weight-bold" style="letter-spacing: 0.04em; margin: 0;">GAMES</p> + <button + type="button" + class="button is-small is-danger is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_delete_all' type='widget' person_id=person.id plan_id=plan.id kind='game' %}" + hx-vals='{"active_tab":"plan_board"}' + hx-confirm="Delete all games?" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML">Delete All</button> + </div> + {% for game in games %} + <article class="box" style="padding: 0.55rem; margin-bottom: 0.45rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none;"> + <span class="tag is-light is-small" style="margin-bottom: 0.3rem;">Game</span> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_save' type='widget' person_id=person.id plan_id=plan.id kind='game' artifact_id=game.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <div class="field" style="margin-bottom: 0.35rem;"> + <input class="input is-small" type="text" name="title" value="{{ game.title }}" data-editable="1" readonly> + </div> + <div class="field" style="margin-bottom: 0.35rem;"> + <textarea class="textarea is-small" rows="3" name="body" data-editable="1" readonly>{{ game.instructions }}</textarea> + </div> + <input type="hidden" name="enabled" value="1"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'plan_board' }}"> + <div class="buttons are-small" style="margin: 0;"> + <button type="button" class="button is-link is-light" data-edit-state="view" onclick="giaMitigationToggleEdit(this); return false;">Edit</button> + <button + type="button" + class="button is-danger is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_delete' type='widget' person_id=person.id plan_id=plan.id kind='game' artifact_id=game.id %}" + hx-vals='{"active_tab":"plan_board"}' + hx-confirm="Delete this game?" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML">Delete</button> + </div> + </form> + </article> + {% empty %} + <article class="box" style="padding: 0.65rem; border: 1px dashed rgba(0, 0, 0, 0.2); box-shadow: none;"> + <p class="is-size-7 has-text-grey">No games yet.</p> + </article> + {% endfor %} + </article> + </div> + </div> + </div> + + <div id="mitigation-tab-{{ person.id }}-corrections" class="mitigation-tab-pane" style="display: none;"> + <div class="is-flex is-justify-content-space-between is-align-items-center" style="gap: 0.5rem; margin-bottom: 0.45rem; flex-wrap: wrap;"> + <p class="is-size-7">Corrections capture situation-specific clarification points.</p> + <div class="buttons are-small" style="margin: 0;"> + <button + class="button is-small is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_create' type='widget' person_id=person.id plan_id=plan.id kind='correction' %}" + hx-vals='{"active_tab":"corrections"}' + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <span class="icon is-small"><i class="fa-solid fa-plus"></i></span> + <span>Correction</span> + </button> + <button + type="button" + class="button is-small is-danger is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_delete_all' type='widget' person_id=person.id plan_id=plan.id kind='correction' %}" + hx-vals='{"active_tab":"corrections"}' + hx-confirm="Delete all corrections?" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML">Delete All</button> + </div> + </div> + + {% if corrections %} + {% for correction in corrections %} + <article class="box" style="padding: 0.55rem; margin-bottom: 0.5rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none;"> + <span class="tag is-light is-small" style="margin-bottom: 0.3rem;">Correction</span> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_save' type='widget' person_id=person.id plan_id=plan.id kind='correction' artifact_id=correction.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <div class="columns is-multiline" style="margin: 0 -0.3rem;"> + <div class="column is-12" style="padding: 0.3rem;"> + <input class="input is-small" type="text" name="title" value="{{ correction.title }}"> + </div> + <div class="column is-12" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.2rem;">Message Context</label> + <textarea class="textarea is-small" rows="2" name="source_phrase">{{ correction.source_phrase }}</textarea> + </div> + <div class="column is-12" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.2rem;">Insight</label> + <textarea class="textarea is-small" rows="2" name="body">{{ correction.clarification }}</textarea> + </div> + </div> + <input type="hidden" name="enabled" value="1"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'corrections' }}"> + <div class="buttons are-small" style="margin: 0;"> + <button class="button is-small is-link is-light">Save Correction</button> + <button + type="button" + class="button is-small is-danger is-light" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_artifact_delete' type='widget' person_id=person.id plan_id=plan.id kind='correction' artifact_id=correction.id %}" + hx-vals='{"active_tab":"corrections"}' + hx-confirm="Delete this correction?" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML">Delete</button> + </div> + </form> + </article> + {% endfor %} + {% else %} + <article class="box" style="padding: 0.65rem; border: 1px dashed rgba(0, 0, 0, 0.2); box-shadow: none;"> + <p class="is-size-7 has-text-grey">No corrections yet.</p> + </article> + {% endif %} + </div> + + <div id="mitigation-tab-{{ person.id }}-engage" class="mitigation-tab-pane" style="display: none;"> + <article class="box" style="padding: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none; margin-bottom: 0.55rem;"> + <p class="is-size-7" style="margin-bottom: 0.45rem;"> + Build a share-ready message from a rule, game, or correction. Voice framing now lives here. + </p> + <p class="is-size-7" style="margin-bottom: 0;"><strong>Send:</strong> {{ send_state.text }}</p> + </article> + + <form + id="engage-form-{{ person.id }}" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_engage_share' type='widget' person_id=person.id plan_id=plan.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'engage' }}"> + <input type="hidden" id="engage-action-input-{{ person.id }}" name="action" value="preview"> + <input type="hidden" id="engage-force-send-{{ person.id }}" name="force_send" value="0"> + <div class="columns is-multiline" style="margin: 0 -0.3rem;"> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Source</label> + <div class="select is-small is-fullwidth"> + <select name="source_ref" required onchange="giaEngageAutoPreview('{{ person.id }}');"> + {% if engage_options %} + {% for option in engage_options %} + <option value="{{ option.value }}" {% if option.value == engage_form.source_ref %}selected{% endif %}>{{ option.label }}</option> + {% endfor %} + {% else %} + {% for rule in rules %} + <option value="rule:{{ rule.id }}">Rule: {{ rule.title }}</option> + {% endfor %} + {% for game in games %} + <option value="game:{{ game.id }}">Game: {{ game.title }}</option> + {% endfor %} + {% for correction in corrections %} + <option value="correction:{{ correction.id }}">Correction: {{ correction.title }}</option> + {% endfor %} + {% endif %} + </select> + </div> + </div> + + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Framing</label> + <input type="hidden" id="engage-framing-input-{{ person.id }}" name="framing" value="{{ engage_form.framing|default:'dont_change' }}"> + <div id="engage-framing-tabs-{{ person.id }}" class="tabs is-small is-toggle is-toggle-rounded" style="margin-bottom: 0;"> + <ul> + <li class="{% if engage_form.framing == 'dont_change' or engage_form.framing == 'neutral' or engage_form.framing == 'named' or not engage_form.framing %}is-active{% endif %}"> + <a onclick="giaEngageSelect('{{ person.id }}', 'framing', 'dont_change', this); return false;">Don't Change</a> + </li> + <li class="{% if engage_form.framing == 'shared' %}is-active{% endif %}"> + <a onclick="giaEngageSelect('{{ person.id }}', 'framing', 'shared', this); return false;">Shared (We/Us)</a> + </li> + </ul> + </div> + </div> + + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Share With</label> + <input type="hidden" id="engage-share-input-{{ person.id }}" name="share_target" value="{{ engage_form.share_target|default:'self' }}"> + <div id="engage-share-tabs-{{ person.id }}" class="tabs is-small is-toggle is-toggle-rounded" style="margin-bottom: 0;"> + <ul> + <li class="{% if engage_form.share_target == 'self' or not engage_form.share_target %}is-active{% endif %}"> + <a onclick="giaEngageSelect('{{ person.id }}', 'share', 'self', this); return false;">Me</a> + </li> + <li class="{% if engage_form.share_target == 'other' %}is-active{% endif %}"> + <a onclick="giaEngageSelect('{{ person.id }}', 'share', 'other', this); return false;">Other Party</a> + </li> + <li class="{% if engage_form.share_target == 'both' %}is-active{% endif %}"> + <a onclick="giaEngageSelect('{{ person.id }}', 'share', 'both', this); return false;">Both Parties</a> + </li> + </ul> + </div> + </div> + + <div class="column is-12" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Context (optional)</label> + <textarea class="textarea is-small" rows="2" name="context_note" placeholder="One additional note for this share.">{{ engage_form.context_note }}</textarea> + </div> + + </div> + + <div class="buttons are-small" style="margin-top: 0.15rem;"> + <button id="engage-send-btn-{{ person.id }}" type="submit" class="button is-link is-light" onclick="giaEngageSetAction('{{ person.id }}', 'send');" {% if not send_state.can_send %}disabled{% endif %}> + <span class="icon is-small"><i class="fa-solid fa-paper-plane"></i></span> + <span>Send</span> + </button> + <button type="submit" class="button is-info is-light" onclick="giaEngageSetAction('{{ person.id }}', 'queue');"> + <span class="icon is-small"><i class="fa-solid fa-inbox-in"></i></span> + <span>Add To Queue</span> + </button> + </div> + </form> + + {% if engage_preview %} + <article class="box {% if engage_preview_flash %}engage-preview-flash{% endif %}" style="margin-top: 0.6rem; padding: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.14); box-shadow: none;"> + <p class="is-size-7 has-text-weight-semibold" style="margin-bottom: 0.35rem;">Preview</p> + <pre style="margin: 0; white-space: pre-wrap; font-size: 0.78rem; line-height: 1.36;">{{ engage_preview }}</pre> + </article> + {% else %} + <article class="box" style="margin-top: 0.6rem; padding: 0.65rem; border: 1px dashed rgba(0, 0, 0, 0.2); box-shadow: none;"> + <p class="is-size-7 has-text-grey">No preview yet.</p> + </article> + {% endif %} + </div> + + <div id="mitigation-tab-{{ person.id }}-fundamentals" class="mitigation-tab-pane" style="display: none;"> + <div class="columns is-multiline" style="margin: 0 -0.35rem;"> + <div class="column is-12-mobile is-5-tablet" style="padding: 0.35rem;"> + <article class="box" style="padding: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none; height: 100%;"> + <p class="is-size-7 has-text-weight-semibold" style="margin-bottom: 0.4rem;">Current Fundamentals</p> + {% if plan.fundamental_items %} + <div class="content" style="margin-bottom: 0;"> + <ul style="margin-top: 0;"> + {% for item in plan.fundamental_items %} + <li>{{ item }}</li> + {% endfor %} + </ul> + </div> + {% else %} + <p class="is-size-7 has-text-grey">No fundamentals yet.</p> + {% endif %} + </article> + </div> + <div class="column is-12-mobile is-7-tablet" style="padding: 0.35rem;"> + <article class="box" style="padding: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none;"> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_fundamentals_save' type='widget' person_id=person.id plan_id=plan.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'fundamentals' }}"> + <div class="field" style="margin-bottom: 0.45rem;"> + <label class="label is-small">Edit Fundamentals (one per line)</label> + <textarea class="textarea is-small" rows="10" name="fundamentals_text">{{ fundamentals_text }}</textarea> + </div> + <button class="button is-small is-link is-light">Save Fundamentals</button> + </form> + </article> + </div> + </div> + </div> + + <div id="mitigation-tab-{{ person.id }}-auto" class="mitigation-tab-pane" style="display: none;"> + <article class="box" style="padding: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none; margin-bottom: 0.55rem;"> + <p class="is-size-7" style="margin-bottom: 0.35rem;"> + Auto checks read recent message rows and can write linked mitigation objects for this workspace conversation. + </p> + <p class="is-size-7" style="margin-bottom: 0;"> + Last run: {% if auto_settings.last_run_at %}{{ auto_settings.last_run_at }}{% else %}Never{% endif %} + </p> + {% if auto_settings.last_result_summary %} + <p class="is-size-7" style="margin-top: 0.35rem; margin-bottom: 0;">{{ auto_settings.last_result_summary }}</p> + {% endif %} + </article> + + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_auto' type='widget' person_id=person.id plan_id=plan.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <input type="hidden" name="active_tab" value="auto"> + <div class="columns is-multiline" style="margin: 0 -0.3rem;"> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="checkbox is-size-7"><input type="checkbox" name="enabled" value="1" {% if auto_settings.enabled %}checked{% endif %}> Enable auto checks for this Conversation</label> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="checkbox is-size-7"><input type="checkbox" name="auto_pattern_recognition" value="1" {% if auto_settings.auto_pattern_recognition %}checked{% endif %}> Detect pattern signals from Message rows</label> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="checkbox is-size-7"><input type="checkbox" name="auto_create_mitigation" value="1" {% if auto_settings.auto_create_mitigation %}checked{% endif %}> Create a Plan when the Conversation has none</label> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="checkbox is-size-7"><input type="checkbox" name="auto_create_corrections" value="1" {% if auto_settings.auto_create_corrections %}checked{% endif %}> Create Correction rows linked to the Plan</label> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="checkbox is-size-7"><input type="checkbox" name="auto_notify_enabled" value="1" {% if auto_settings.auto_notify_enabled %}checked{% endif %}> Notify when auto writes new Correction rows</label> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Message rows per check</label> + <input class="input is-small" type="number" min="10" max="200" name="sample_message_window" value="{{ auto_settings.sample_message_window }}"> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Cooldown seconds between checks</label> + <input class="input is-small" type="number" min="0" max="86400" name="check_cooldown_seconds" value="{{ auto_settings.check_cooldown_seconds }}"> + </div> + <div class="column is-12-mobile is-6-tablet" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">NTFY topic override for auto</label> + <input class="input is-small" type="text" name="ntfy_topic_override" value="{{ auto_settings.ntfy_topic_override|default:'' }}" placeholder="Optional topic override"> + </div> + <div class="column is-12" style="padding: 0.3rem;"> + <label class="label is-small" style="margin-bottom: 0.25rem;">NTFY URL override for auto</label> + <input class="input is-small" type="text" name="ntfy_url_override" value="{{ auto_settings.ntfy_url_override|default:'' }}" placeholder="Optional NTFY URL override"> + </div> + <div class="column is-12" style="padding: 0.3rem;"> + <p class="is-size-7 has-text-grey">If overrides are empty, notifications fall back to Notification Settings topic/url.</p> + </div> + </div> + + <div class="buttons are-small" style="margin-top: 0.2rem;"> + <button class="button is-link is-light" name="action" value="save">Save Auto Controls</button> + <button class="button is-primary is-light" name="action" value="run_now">Run Check Now</button> + </div> + </form> + </div> + + <div id="mitigation-tab-{{ person.id }}-ask_ai" class="mitigation-tab-pane" style="display: none;"> + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_export' type='widget' person_id=person.id plan_id=plan.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML" + style="margin-bottom: 0.55rem;"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'ask_ai' }}"> + <div class="field is-grouped is-grouped-multiline is-align-items-flex-end" style="margin-bottom: 0; gap: 0.35rem;"> + <div class="control"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Bundle</label> + <div class="select is-small"> + <select name="artifact_type"> + <option value="rulebook">Rulebook</option> + <option value="rules">Rules</option> + <option value="games">Games</option> + <option value="corrections">Corrections</option> + </select> + </div> + </div> + <div class="control"> + <label class="label is-small" style="margin-bottom: 0.25rem;">Format</label> + <div class="select is-small"> + <select name="export_format"> + <option value="markdown">Markdown</option> + <option value="json">JSON</option> + <option value="text">Text</option> + </select> + </div> + </div> + <div class="control"> + <button class="button is-small is-link is-light" style="margin-top: 1.35rem;"> + <span class="icon is-small"><i class="fa-solid fa-file-export"></i></span> + <span>Export</span> + </button> + </div> + </div> + </form> + + {% if latest_export %} + <article class="box" style="padding: 0.55rem; margin-bottom: 0.6rem; border: 1px dashed rgba(0, 0, 0, 0.25); box-shadow: none;"> + <p class="is-size-7 has-text-weight-semibold" style="margin-bottom: 0.3rem;"> + Last Export: {{ latest_export.artifact_type|title }} ({{ latest_export.export_format|upper }}) + </p> + <pre style="max-height: 14rem; overflow: auto; margin: 0; white-space: pre-wrap; font-size: 0.72rem; line-height: 1.28;">{{ latest_export.payload }}</pre> + </article> + {% endif %} + + <article class="box" style="padding: 0.65rem; border: 1px solid rgba(0, 0, 0, 0.12); box-shadow: none;"> + <p class="is-size-7 has-text-weight-semibold" style="margin-bottom: 0.4rem;">Ask AI</p> + + <div style="max-height: 12rem; overflow-y: auto; margin-bottom: 0.55rem; padding-right: 0.2rem;"> + {% for message in mitigation_messages %} + <div style="margin-bottom: 0.45rem;"> + <span class="tag is-light is-small">{{ message.role }}</span> + <div style="margin-top: 0.15rem; white-space: pre-wrap;">{{ message.text }}</div> + </div> + {% empty %} + <p class="is-size-7 has-text-grey">No messages yet.</p> + {% endfor %} + </div> + + <form + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'ai_workspace_mitigation_chat' type='widget' person_id=person.id plan_id=plan.id %}" + hx-target="#mitigation-shell-{{ person.id }}" + hx-swap="outerHTML"> + <input type="hidden" name="active_tab" value="{{ active_tab|default:'ask_ai' }}"> + <div class="field" style="margin-bottom: 0.5rem;"> + <div class="control"> + <textarea name="message" class="textarea is-small" rows="2" placeholder="Refine the plan or request a new lens..."></textarea> + </div> + </div> + <button class="button is-small is-primary is-light"> + <span class="icon is-small"><i class="fa-solid fa-comments"></i></span> + <span>Ask AI</span> + </button> + </form> + </article> + </div> +</div> + +<style> + @keyframes engagePreviewPulse { + 0% { background-color: rgba(255, 255, 255, 1); } + 45% { background-color: rgba(236, 246, 255, 1); } + 100% { background-color: rgba(255, 255, 255, 1); } + } + #mitigation-shell-{{ person.id }} .engage-preview-flash { + animation: engagePreviewPulse 850ms ease-in-out 1; + } +</style> + +<script> + (function() { + const personId = "{{ person.id }}"; + const canSend = "{{ send_state.can_send|yesno:'1,0' }}" === "1"; + function resizeEditableTextareas(root) { + if (!root) return; + root.querySelectorAll('textarea[data-editable="1"]').forEach(function(area) { + area.style.height = "auto"; + area.style.height = Math.max(area.scrollHeight, 72) + "px"; + }); + } + + window.giaEngageSyncSendOverride = function(pid) { + if (pid !== personId) return; + const forceInput = document.getElementById("engage-force-send-" + pid); + const sendBtn = document.getElementById("engage-send-btn-" + pid); + const force = + !!(window.giaWorkspaceState + && window.giaWorkspaceState[pid] + && window.giaWorkspaceState[pid].forceSend); + if (forceInput) { + forceInput.value = force ? "1" : "0"; + } + if (sendBtn) { + sendBtn.disabled = !canSend && !force; + } + }; + + function setActiveTabHiddenFields(tabName) { + const root = document.getElementById("mitigation-shell-" + personId); + if (!root) return; + root.querySelectorAll('input[name="active_tab"]').forEach(function(input) { + input.value = tabName; + }); + resizeEditableTextareas(root); + } + + window.giaMitigationShowTab = function(pid, tabName) { + if (pid !== personId) return; + ["plan_board", "corrections", "engage", "fundamentals", "auto", "ask_ai"].forEach(function(name) { + const pane = document.getElementById("mitigation-tab-" + personId + "-" + name); + const tab = document.getElementById("mitigation-tab-btn-" + personId + "-" + name); + if (!pane || !tab) return; + const active = name === tabName; + pane.style.display = active ? "block" : "none"; + tab.classList.toggle("is-active", active); + }); + setActiveTabHiddenFields(tabName); + }; + + window.giaMitigationToggleEdit = function(button) { + const form = button.closest("form"); + if (!form) return; + const editing = button.dataset.editState === "edit"; + const fields = form.querySelectorAll('[data-editable="1"]'); + if (!editing) { + fields.forEach(function(field) { + field.removeAttribute("readonly"); + }); + button.dataset.editState = "edit"; + button.textContent = "Save"; + button.classList.remove("is-light"); + resizeEditableTextareas(form); + } else { + form.requestSubmit(); + } + }; + + window.giaEngageSetAction = function(pid, action) { + if (pid !== personId) return; + const actionInput = document.getElementById("engage-action-input-" + pid); + if (actionInput) { + actionInput.value = action; + } + if (action === "send") { + window.giaEngageSyncSendOverride(pid); + } + }; + + window.giaEngageAutoPreview = function(pid) { + if (pid !== personId) return; + const form = document.getElementById("engage-form-" + pid); + if (!form) return; + window.giaEngageSetAction(pid, "preview"); + form.requestSubmit(); + }; + + window.giaEngageSelect = function(pid, kind, value, node) { + if (pid !== personId) return; + let inputId = ""; + if (kind === "share") { + inputId = "engage-share-input-" + pid; + } else if (kind === "framing") { + inputId = "engage-framing-input-" + pid; + } + const input = inputId ? document.getElementById(inputId) : null; + if (input) { + input.value = value; + } + const li = node && node.closest ? node.closest("li") : null; + if (!li) return; + const ul = li.parentElement; + if (!ul) return; + Array.from(ul.children).forEach(function(child) { + child.classList.remove("is-active"); + }); + li.classList.add("is-active"); + window.giaEngageAutoPreview(pid); + }; + + window.giaMitigationShowTab(personId, "{{ active_tab|default:'plan_board' }}"); + resizeEditableTextareas(document.getElementById("mitigation-shell-" + personId)); + window.giaEngageSyncSendOverride(personId); + })(); +</script> diff --git a/core/templates/partials/ai-workspace-mitigation-status.html b/core/templates/partials/ai-workspace-mitigation-status.html new file mode 100644 index 0000000..5bb2308 --- /dev/null +++ b/core/templates/partials/ai-workspace-mitigation-status.html @@ -0,0 +1,5 @@ +<div id="mitigation-shell-{{ person.id }}" class="box" style="padding: 0.65rem; margin-top: 0.65rem; border: 1px dashed rgba(0, 0, 0, 0.25); box-shadow: none;"> + <div class="notification is-{{ level|default:'info' }} is-light" style="padding: 0.55rem 0.7rem; margin: 0;"> + {{ message }} + </div> +</div> diff --git a/core/templates/partials/ai-workspace-person-widget.html b/core/templates/partials/ai-workspace-person-widget.html new file mode 100644 index 0000000..d9c92d4 --- /dev/null +++ b/core/templates/partials/ai-workspace-person-widget.html @@ -0,0 +1,698 @@ +<div + class="ai-person-widget" + id="ai-person-widget-{{ person.id }}" + data-run-url-template="{% url 'ai_workspace_run' type='widget' person_id=person.id operation='summarise' %}" + data-send-url="{% url 'ai_workspace_send' type='widget' person_id=person.id %}" + data-queue-url="{% url 'ai_workspace_queue' type='widget' person_id=person.id %}" + data-limit="{{ limit }}" + data-can-send="{{ send_state.can_send|yesno:'1,0' }}"> + <div style="margin-bottom: 0.75rem; padding: 0.5rem 0.25rem; border-bottom: 1px solid rgba(0, 0, 0, 0.12);"> + <p class="is-size-7 has-text-weight-semibold">Selected Person</p> + <h3 class="title is-5" style="margin-bottom: 0.25rem;">{{ person.name }}</h3> + <p class="is-size-7">Showing last {{ limit }} messages.</p> + </div> + + <div class="notification is-{{ send_state.level }} is-light" style="padding: 0.5rem 0.75rem;"> + <div class="is-flex is-justify-content-space-between is-align-items-center" style="gap: 0.4rem; flex-wrap: wrap;"> + <div><strong>Send:</strong> {{ send_state.text }}</div> + <div class="buttons are-small" style="margin: 0;"> + {% if not send_state.can_send %} + <button + type="button" + id="draft-override-top-btn-{{ person.id }}" + class="button is-warning is-light" + onclick="giaWorkspaceEnableSendOverride('{{ person.id }}', 'draft_reply'); return false;"> + <span class="icon is-small"><i class="fa-solid fa-triangle-exclamation"></i></span> + <span>Allow Send In Pane</span> + </button> + {% endif %} + </div> + </div> + <div id="draft-top-status-{{ person.id }}" style="margin-top: 0.5rem;"></div> + </div> + + <form id="ai-op-form-{{ person.id }}" style="margin-bottom: 0.75rem;"> + <input type="hidden" name="limit" value="{{ limit }}"> + <div class="field"> + <label class="label is-small">Notes</label> + <div class="control"> + <textarea class="textarea is-small" name="user_notes" rows="2" placeholder="Optional intent/context"></textarea> + </div> + </div> + </form> + + <div id="ai-response-shell-{{ person.id }}" style="display: block; margin-bottom: 0.9rem;"> + <div class="ai-response-capsule" style="margin-bottom: 0.5rem; border: 1px solid rgba(0, 0, 0, 0.16); border-radius: 8px; padding: 0.5rem 0.6rem;"> + <div class="is-flex is-justify-content-space-between is-align-items-center" style="margin-bottom: 0.4rem;"> + <div class="tabs is-small is-toggle is-toggle-rounded" style="margin-bottom: 0;"> + <ul> + <li id="ai-tab-{{ person.id }}-artifacts"> + <a onclick="giaWorkspaceRun('{{ person.id }}', 'artifacts', false); return false;">Plan</a> + </li> + <li id="ai-tab-{{ person.id }}-summarise"> + <a onclick="giaWorkspaceRun('{{ person.id }}', 'summarise', false); return false;">Summary</a> + </li> + <li id="ai-tab-{{ person.id }}-draft_reply" class="is-active"> + <a onclick="giaWorkspaceRun('{{ person.id }}', 'draft_reply', false); return false;">Draft</a> + </li> + <li id="ai-tab-{{ person.id }}-extract_patterns"> + <a onclick="giaWorkspaceRun('{{ person.id }}', 'extract_patterns', false); return false;">Patterns</a> + </li> + </ul> + </div> + <div class="is-flex is-align-items-center" style="gap: 0.35rem;"> + <span id="ai-cache-indicator-{{ person.id }}" class="tag is-warning is-light is-small" style="display: none;"> + Cached + </span> + <button + type="button" + class="button is-small is-ghost" + title="Refresh current tab" + onclick="giaWorkspaceRefresh('{{ person.id }}'); return false;"> + <span class="icon is-small"><i class="fa-solid fa-rotate-right"></i></span> + </button> + </div> + </div> + + <div id="ai-stage-{{ person.id }}" style="min-height: 7rem;"> + <div id="ai-pane-{{ person.id }}-artifacts" class="ai-pane" style="display: none;"> + <button + type="button" + class="button is-warning is-light is-small is-rounded" + onclick="giaWorkspaceRun('{{ person.id }}', 'artifacts', false); return false;"> + <span class="icon is-small"><i class="fa-solid fa-table-columns"></i></span> + <span>Plan</span> + </button> + </div> + <div id="ai-pane-{{ person.id }}-summarise" class="ai-pane" style="display: none;"> + <button + type="button" + class="button is-link is-light is-small is-rounded" + onclick="giaWorkspaceRun('{{ person.id }}', 'summarise', false); return false;"> + <span class="icon is-small"><i class="fa-solid fa-list-check"></i></span> + <span>Summary</span> + </button> + </div> + <div id="ai-pane-{{ person.id }}-draft_reply" class="ai-pane"> + <button + type="button" + class="button is-primary is-light is-small is-rounded" + onclick="giaWorkspaceRun('{{ person.id }}', 'draft_reply', false); return false;"> + <span class="icon is-small"><i class="fa-solid fa-pen"></i></span> + <span>Draft</span> + </button> + </div> + <div id="ai-pane-{{ person.id }}-extract_patterns" class="ai-pane" style="display: none;"> + <button + type="button" + class="button is-info is-light is-small is-rounded" + onclick="giaWorkspaceRun('{{ person.id }}', 'extract_patterns', false); return false;"> + <span class="icon is-small"><i class="fa-solid fa-wave-square"></i></span> + <span>Patterns</span> + </button> + </div> + </div> + </div> + </div> + + <div id="ai-message-list-{{ person.id }}" style="max-height: 65vh; overflow-y: auto; padding-right: 0.25rem;"> + {% if message_rows %} + {% for row in message_rows %} + <article class="media ai-message-row" data-ts="{{ row.message.ts }}" style="margin-bottom: 0.75rem;"> + <div class="media-content"> + <div + class="content" + style="margin-left: {% if row.direction == 'out' %}15%{% else %}0{% endif %}; margin-right: {% if row.direction == 'in' %}15%{% else %}0{% endif %};"> + <div + style="margin-bottom: 0.25rem; padding: 0.6rem; border-radius: 6px; border: 1px solid rgba(0, 0, 0, 0.15); background: {% if row.direction == 'out' %}#f0f7ff{% else %}transparent{% endif %}; box-shadow: none;"> + <p style="white-space: pre-wrap; margin-bottom: 0.35rem;">{{ row.message.text|default:"(no text)" }}</p> + <p class="is-size-7"> + {{ row.ts_label }} + {% if row.message.custom_author %} + | {{ row.message.custom_author }} + {% endif %} + </p> + </div> + </div> + </div> + </article> + {% endfor %} + {% else %} + <p class="has-text-grey">No messages found for this contact.</p> + {% endif %} + </div> +</div> + +<style> + @keyframes aiFadeInUp { + from { opacity: 0; transform: translateY(6px); } + to { opacity: 1; transform: translateY(0); } + } + .ai-animate-in { + animation: aiFadeInUp 180ms ease-out; + } + .ai-response-capsule { + transition: all 180ms ease-out; + } +</style> + +<script> + (function() { + const personId = "{{ person.id }}"; + const canSend = (document.getElementById("ai-person-widget-" + personId)?.dataset.canSend || "0") === "1"; + const CACHE_TTL_MS = 15 * 60 * 1000; // 15 minutes + const widget = document.getElementById("ai-person-widget-" + personId); + if (!widget) { + return; + } + window.giaWorkspaceState = window.giaWorkspaceState || {}; + window.giaWorkspaceCache = window.giaWorkspaceCache || (function() { + try { + // One-time migration flush to avoid stale cached pane HTML from earlier UI schema. + localStorage.removeItem("gia_workspace_cache_v1"); + localStorage.removeItem("gia_workspace_cache_v2"); + return JSON.parse(localStorage.getItem("gia_workspace_cache_v3") || "{}"); + } catch (e) { + return {}; + } + })(); + + function persistCache() { + try { + localStorage.setItem("gia_workspace_cache_v3", JSON.stringify(window.giaWorkspaceCache)); + } catch (e) { + // Ignore storage write issues. + } + } + + function runUrl(operation) { + const template = widget.dataset.runUrlTemplate || ""; + if (template.indexOf("/summarise/") >= 0) { + return template.replace("/summarise/", "/" + operation + "/"); + } + return template.replace("summarise", operation); + } + + function formData() { + const form = document.getElementById("ai-op-form-" + personId); + const params = new URLSearchParams(new FormData(form)); + return params; + } + + function cacheKey(operation) { + return personId + "|" + operation + "|" + formData().toString(); + } + + function applyForceSendState(operation) { + const force = !!(window.giaWorkspaceState[personId] && window.giaWorkspaceState[personId].forceSend); + const forceInput = document.getElementById("draft-send-force-" + personId + "-" + operation); + const sendBtn = document.getElementById("draft-send-btn-" + personId + "-" + operation); + if (forceInput) { + forceInput.value = force ? "1" : "0"; + } + if (sendBtn && !canSend) { + sendBtn.disabled = !force; + } + } + + function formatUtcLabel(tsMs) { + const ts = Number(tsMs || 0); + if (!ts) { + return ""; + } + const dt = new Date(ts); + function pad(value) { + return String(value).padStart(2, "0"); + } + return ( + dt.getUTCFullYear() + + "-" + pad(dt.getUTCMonth() + 1) + + "-" + pad(dt.getUTCDate()) + + " " + pad(dt.getUTCHours()) + + ":" + pad(dt.getUTCMinutes()) + + " UTC" + ); + } + + function appendOutgoingMessage(tsMs, text, author) { + const host = document.getElementById("ai-message-list-" + personId); + if (!host) { + return; + } + const noMessages = host.querySelector("p.has-text-grey"); + if (noMessages) { + noMessages.remove(); + } + + const article = document.createElement("article"); + article.className = "media ai-message-row"; + article.dataset.ts = String(Number(tsMs || Date.now())); + article.style.marginBottom = "0.75rem"; + + const mediaContent = document.createElement("div"); + mediaContent.className = "media-content"; + + const contentWrap = document.createElement("div"); + contentWrap.className = "content"; + contentWrap.style.marginLeft = "15%"; + contentWrap.style.marginRight = "0"; + + const bubble = document.createElement("div"); + bubble.style.marginBottom = "0.25rem"; + bubble.style.padding = "0.6rem"; + bubble.style.borderRadius = "6px"; + bubble.style.border = "1px solid rgba(0, 0, 0, 0.15)"; + bubble.style.background = "#f0f7ff"; + bubble.style.boxShadow = "none"; + + const bodyP = document.createElement("p"); + bodyP.style.whiteSpace = "pre-wrap"; + bodyP.style.marginBottom = "0.35rem"; + bodyP.textContent = text || "(no text)"; + + const metaP = document.createElement("p"); + metaP.className = "is-size-7"; + metaP.textContent = formatUtcLabel(tsMs); + if (author) { + metaP.textContent += " | " + author; + } + + bubble.appendChild(bodyP); + bubble.appendChild(metaP); + contentWrap.appendChild(bubble); + mediaContent.appendChild(contentWrap); + article.appendChild(mediaContent); + host.appendChild(article); + + const maxRows = Math.max(5, Math.min(parseInt(widget.dataset.limit || "20", 10) || 20, 200)); + const rows = host.querySelectorAll(".ai-message-row"); + if (rows.length > maxRows) { + const removeCount = rows.length - maxRows; + for (let i = 0; i < removeCount; i += 1) { + if (rows[i] && rows[i].parentNode) { + rows[i].parentNode.removeChild(rows[i]); + } + } + } + host.scrollTop = host.scrollHeight; + } + + function getCacheEntry(operation) { + const key = cacheKey(operation); + const raw = window.giaWorkspaceCache[key]; + if (!raw) { + return null; + } + function evict() { + delete window.giaWorkspaceCache[key]; + persistCache(); + } + if (typeof raw === "string") { + // Backward compatibility: old format has no timestamp; treat as expired. + evict(); + return null; + } + if (raw && typeof raw === "object" && typeof raw.html === "string") { + const ts = typeof raw.ts === "number" ? raw.ts : null; + if (!ts) { + evict(); + return null; + } + if ((Date.now() - ts) > CACHE_TTL_MS) { + evict(); + return null; + } + return { html: raw.html, ts: ts }; + } + evict(); + return null; + } + + function formatCacheAge(ts) { + if (!ts) { + return "Cached"; + } + const deltaSec = Math.max(0, Math.floor((Date.now() - ts) / 1000)); + if (deltaSec < 5) return "Cached just now"; + if (deltaSec < 60) return "Cached " + deltaSec + "s ago"; + if (deltaSec < 3600) return "Cached " + Math.floor(deltaSec / 60) + "m ago"; + if (deltaSec < 86400) return "Cached " + Math.floor(deltaSec / 3600) + "h ago"; + return "Cached " + Math.floor(deltaSec / 86400) + "d ago"; + } + + function executeInlineScripts(container) { + if (!container) { + return; + } + const scripts = container.querySelectorAll("script"); + scripts.forEach(function(oldScript) { + const newScript = document.createElement("script"); + if (oldScript.src) { + newScript.src = oldScript.src; + } else { + newScript.textContent = oldScript.textContent || ""; + } + Array.from(oldScript.attributes || []).forEach(function(attr) { + if (attr.name !== "src") { + newScript.setAttribute(attr.name, attr.value); + } + }); + oldScript.parentNode.replaceChild(newScript, oldScript); + }); + } + + function setCachedIndicator(show, ts) { + const indicator = document.getElementById("ai-cache-indicator-" + personId); + if (!indicator) { + return; + } + if (show) { + indicator.textContent = formatCacheAge(ts); + } + indicator.style.display = show ? "inline-flex" : "none"; + } + + function hydrateCachedIfAvailable(operation) { + if (operation === "artifacts") { + return false; + } + const entry = getCacheEntry(operation); + const pane = document.getElementById("ai-pane-" + personId + "-" + operation); + if (!pane) { + return false; + } + if (entry && !pane.dataset.loaded) { + pane.innerHTML = entry.html; + pane.dataset.loaded = "1"; + executeInlineScripts(pane); + if (window.htmx) { + window.htmx.process(pane); + } + return true; + } + return false; + } + + window.giaWorkspaceShowTab = function(pid, operation) { + if (pid !== personId) { + return; + } + ["artifacts", "summarise", "draft_reply", "extract_patterns"].forEach(function(op) { + const tab = document.getElementById("ai-tab-" + personId + "-" + op); + const pane = document.getElementById("ai-pane-" + personId + "-" + op); + if (!tab || !pane) { + return; + } + if (op === operation) { + tab.classList.add("is-active"); + pane.style.display = "block"; + } else { + tab.classList.remove("is-active"); + pane.style.display = "none"; + } + }); + const hydrated = hydrateCachedIfAvailable(operation); + const entry = operation === "artifacts" ? null : getCacheEntry(operation); + setCachedIndicator(hydrated || !!entry, entry ? entry.ts : null); + window.giaWorkspaceState[personId] = window.giaWorkspaceState[personId] || {}; + window.giaWorkspaceState[personId].current = operation; + }; + + window.giaWorkspaceRun = function(pid, operation, forceRefresh) { + if (pid !== personId) { + return; + } + const cacheAllowed = operation !== "artifacts"; + const shell = document.getElementById("ai-response-shell-" + personId); + const pane = document.getElementById("ai-pane-" + personId + "-" + operation); + if (!shell || !pane) { + return; + } + const currentState = window.giaWorkspaceState[personId] || {}; + if (!forceRefresh && currentState.current === operation && pane.dataset.loaded === "1") { + window.giaWorkspaceShowTab(personId, operation); + return; + } + window.giaWorkspaceShowTab(personId, operation); + + const key = cacheKey(operation); + const entry = getCacheEntry(operation); + if (cacheAllowed && !forceRefresh && entry) { + pane.innerHTML = entry.html; + pane.dataset.loaded = "1"; + pane.classList.remove("ai-animate-in"); + void pane.offsetWidth; + pane.classList.add("ai-animate-in"); + setCachedIndicator(true, entry.ts); + if (window.htmx) { + window.htmx.process(pane); + } + if (operation === "draft_reply" && typeof window.giaWorkspaceUseDraft === "function") { + window.giaWorkspaceUseDraft(personId, operation, 0); + } + return; + } + + setCachedIndicator(false, null); + pane.innerHTML = '<div class="notification is-light ai-animate-in">Loading...</div>'; + const url = runUrl(operation) + "?" + formData().toString(); + fetch(url, { method: "GET" }) + .then(function(resp) { return resp.text(); }) + .then(function(html) { + pane.innerHTML = html; + pane.dataset.loaded = "1"; + executeInlineScripts(pane); + pane.classList.remove("ai-animate-in"); + void pane.offsetWidth; + pane.classList.add("ai-animate-in"); + if (cacheAllowed) { + window.giaWorkspaceCache[key] = { + html: html, + ts: Date.now(), + }; + persistCache(); + setCachedIndicator(true, window.giaWorkspaceCache[key].ts); + } else { + setCachedIndicator(false, null); + } + if (window.htmx) { + window.htmx.process(pane); + } + if (operation === "draft_reply" && typeof window.giaWorkspaceUseDraft === "function") { + window.giaWorkspaceUseDraft(personId, operation, 0); + } + }) + .catch(function() { + pane.innerHTML = '<div class="notification is-danger is-light ai-animate-in">Failed to load AI response.</div>'; + }); + }; + + window.giaWorkspaceRefresh = function(pid) { + if (pid !== personId) { + return; + } + const current = (window.giaWorkspaceState[personId] && window.giaWorkspaceState[personId].current) || "summarise"; + window.giaWorkspaceRun(personId, current, true); + }; + + window.giaWorkspaceUseDraft = function(pid, operation, index) { + if (pid !== personId) { + return; + } + const host = document.getElementById("draft-host-" + personId + "-" + operation); + const optionCard = host ? host.querySelector('.draft-option-card[data-index="' + index + '"]') : null; + const option = optionCard ? optionCard.querySelector(".draft-text") : null; + if (!option) { + return; + } + const cards = host ? host.querySelectorAll(".draft-option-card") : []; + cards.forEach(function(el) { el.classList.remove("is-selected"); }); + if (optionCard) { + optionCard.classList.add("is-selected"); + } + host.dataset.selected = String(index); + const sendShell = document.getElementById("draft-send-shell-" + personId + "-" + operation); + const hiddenInput = document.getElementById("draft-send-input-" + personId + "-" + operation); + const preview = document.getElementById("draft-send-preview-" + personId + "-" + operation); + if (!sendShell || !hiddenInput || !preview) { + return; + } + hiddenInput.value = option.textContent.trim(); + preview.value = option.textContent.trim(); + applyForceSendState(operation); + sendShell.classList.remove("ai-animate-in"); + void sendShell.offsetWidth; + sendShell.classList.add("ai-animate-in"); + }; + + window.giaWorkspaceEnableSendOverride = function(pid, operation) { + if (pid !== personId) { + return; + } + window.giaWorkspaceState[personId] = window.giaWorkspaceState[personId] || {}; + window.giaWorkspaceState[personId].forceSend = true; + applyForceSendState(operation); + if (typeof window.giaEngageSyncSendOverride === "function") { + window.giaEngageSyncSendOverride(personId); + } + const overrideBtn = document.getElementById("draft-override-top-btn-" + personId); + if (overrideBtn) { + overrideBtn.classList.remove("is-warning"); + overrideBtn.classList.add("is-success"); + const labelNode = overrideBtn.querySelector("span:last-child"); + if (labelNode) { + labelNode.textContent = "Override Enabled"; + } + } + const statusHost = document.getElementById("draft-top-status-" + personId); + if (statusHost) { + statusHost.innerHTML = '<div class="notification is-success is-light" style="padding: 0.45rem 0.6rem;">Send override enabled for this pane.</div>'; + } + }; + + window.giaWorkspaceQueueSelectedDraft = function(pid) { + if (pid !== personId) { + return; + } + const queueUrl = widget.dataset.queueUrl; + const preview = document.getElementById("draft-send-preview-" + personId + "-draft_reply"); + const statusHost = document.getElementById("draft-top-status-" + personId); + const text = preview ? preview.value.trim() : ""; + if (!text) { + if (statusHost) { + statusHost.innerHTML = '<div class="notification is-warning is-light" style="padding: 0.45rem 0.6rem;">Select a draft first, then queue it.</div>'; + } + return; + } + const payload = new URLSearchParams(); + payload.append("draft_text", text); + fetch(queueUrl, { + method: "POST", + headers: { + "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", + "X-CSRFToken": "{{ csrf_token }}", + }, + body: payload.toString(), + }) + .then(function(resp) { return resp.text(); }) + .then(function(html) { + if (statusHost) { + statusHost.innerHTML = html; + } + }) + .catch(function() { + if (statusHost) { + statusHost.innerHTML = '<div class="notification is-danger is-light" style="padding: 0.45rem 0.6rem;">Failed to queue draft.</div>'; + } + }); + }; + + if (typeof window.giaMitigationShowTab !== "function") { + window.giaMitigationShowTab = function(pid, tabName) { + const names = ["plan_board", "corrections", "engage", "fundamentals", "auto", "ask_ai"]; + names.forEach(function(name) { + const pane = document.getElementById("mitigation-tab-" + pid + "-" + name); + const tab = document.getElementById("mitigation-tab-btn-" + pid + "-" + name); + if (!pane || !tab) { + return; + } + const active = (name === tabName); + pane.style.display = active ? "block" : "none"; + tab.classList.toggle("is-active", active); + }); + const shell = document.getElementById("mitigation-shell-" + pid); + if (!shell) { + return; + } + shell.querySelectorAll('input[name="active_tab"]').forEach(function(input) { + input.value = tabName; + }); + }; + } + + if (typeof window.giaMitigationToggleEdit !== "function") { + window.giaMitigationToggleEdit = function(button) { + const form = button ? button.closest("form") : null; + if (!form) { + return; + } + const editing = button.dataset.editState === "edit"; + const fields = form.querySelectorAll('[data-editable="1"]'); + if (!editing) { + fields.forEach(function(field) { + field.removeAttribute("readonly"); + }); + button.dataset.editState = "edit"; + button.textContent = "Save"; + button.classList.remove("is-light"); + } else { + form.requestSubmit(); + } + }; + } + + if (typeof window.giaEngageSetAction !== "function") { + window.giaEngageSetAction = function(pid, action) { + const actionInput = document.getElementById("engage-action-input-" + pid); + if (actionInput) { + actionInput.value = action; + } + }; + } + + if (typeof window.giaEngageAutoPreview !== "function") { + window.giaEngageAutoPreview = function(pid) { + const form = document.getElementById("engage-form-" + pid); + if (!form) { + return; + } + window.giaEngageSetAction(pid, "preview"); + form.requestSubmit(); + }; + } + + if (typeof window.giaEngageSelect !== "function") { + window.giaEngageSelect = function(pid, kind, value, node) { + let inputId = ""; + if (kind === "share") { + inputId = "engage-share-input-" + pid; + } else if (kind === "framing") { + inputId = "engage-framing-input-" + pid; + } + const input = inputId ? document.getElementById(inputId) : null; + if (input) { + input.value = value; + } + const li = node && node.closest ? node.closest("li") : null; + if (li && li.parentElement) { + Array.from(li.parentElement.children).forEach(function(child) { + child.classList.remove("is-active"); + }); + li.classList.add("is-active"); + } + window.giaEngageAutoPreview(pid); + }; + } + + window.giaWorkspaceMessageListeners = window.giaWorkspaceMessageListeners || {}; + const existingListener = window.giaWorkspaceMessageListeners[personId]; + if (existingListener) { + document.body.removeEventListener("gia-message-sent", existingListener); + } + const messageSentListener = function(evt) { + const detail = (evt && evt.detail) ? evt.detail : {}; + if (!detail || String(detail.person_id || "") !== personId) { + return; + } + appendOutgoingMessage( + Number(detail.ts || Date.now()), + String(detail.text || ""), + String(detail.author || "BOT") + ); + }; + document.body.addEventListener("gia-message-sent", messageSentListener); + window.giaWorkspaceMessageListeners[personId] = messageSentListener; + + window.giaWorkspaceRun(personId, "artifacts", false); + })(); +</script> diff --git a/core/templates/partials/ai-workspace-send-status.html b/core/templates/partials/ai-workspace-send-status.html new file mode 100644 index 0000000..3da8058 --- /dev/null +++ b/core/templates/partials/ai-workspace-send-status.html @@ -0,0 +1,3 @@ +<div class="notification is-{{ level }} is-light" style="padding: 0.55rem 0.75rem;"> + {{ message }} +</div> diff --git a/core/templates/partials/ai-workspace-widget.html b/core/templates/partials/ai-workspace-widget.html new file mode 100644 index 0000000..6746c80 --- /dev/null +++ b/core/templates/partials/ai-workspace-widget.html @@ -0,0 +1,54 @@ +<div class="ai-workspace-widget"> + <div class="columns is-mobile is-gapless"> + <div class="column is-12-mobile is-12-tablet"> + <div style="margin-bottom: 0.75rem; padding: 0.5rem 0.25rem; border-bottom: 1px solid rgba(0, 0, 0, 0.12);"> + <p class="is-size-7 has-text-weight-semibold">AI Workspace</p> + <h3 class="title is-6" style="margin-bottom: 0.5rem;">Choose A Contact</h3> + <p class="is-size-7"> + Pick a person to open their message timeline in a fresh pane. + </p> + </div> + + <form id="ai-window-form" style="margin-bottom: 0.75rem; padding: 0.5rem 0.25rem; border-bottom: 1px solid rgba(0, 0, 0, 0.12);"> + <label class="label is-small" for="id_limit">Window</label> + <div class="select is-fullwidth is-small"> + {{ window_form.limit }} + </div> + <p class="help">{{ window_form.limit.help_text }}</p> + </form> + + <div> + {% if contact_rows %} + <div class="buttons are-small" style="display: grid; gap: 0.5rem;"> + {% for row in contact_rows %} + <button + class="button is-fullwidth" + style="border-radius: 8px; border: 0; background: transparent; box-shadow: none; padding: 0;" + hx-get="{% url 'ai_workspace_person' type='widget' person_id=row.person.id %}" + hx-include="#ai-window-form" + hx-target="#widgets-here" + hx-swap="afterend"> + <span class="tags has-addons" style="display: inline-flex; width: 100%; margin: 0; white-space: nowrap;"> + <span class="tag is-dark" style="min-width: 2.5rem; justify-content: center;"> + <i class="fa-solid fa-comment-dots" aria-hidden="true"></i> + </span> + <span class="tag is-white" style="flex: 1; display: inline-flex; align-items: center; justify-content: space-between; gap: 0.75rem; padding-left: 0.7rem; padding-right: 0.7rem; border-top: 1px solid rgba(0, 0, 0, 0.2); border-bottom: 1px solid rgba(0, 0, 0, 0.2);"> + <span style="display: inline-flex; align-items: baseline; gap: 0.35rem; min-width: 0;"> + <strong>{{ row.person.name }}</strong> + </span> + {% if row.last_ts_label %} + <small style="padding-left: 0.5rem;">{{ row.last_ts_label }}</small> + {% endif %} + </span> + <span class="tag is-dark" style="min-width: 3.25rem; justify-content: center;">{{ row.message_count }}</span> + </span> + </button> + {% endfor %} + </div> + {% else %} + <p class="has-text-grey">No contacts available yet.</p> + {% endif %} + </div> + </div> + </div> +</div> diff --git a/core/templates/partials/group-list.html b/core/templates/partials/group-list.html index 68575d0..48819d4 100644 --- a/core/templates/partials/group-list.html +++ b/core/templates/partials/group-list.html @@ -21,11 +21,11 @@ <tr> <td> <a - class="has-text-grey button nowrap-child" - onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> - <span class="icon" data-tooltip="Copy to clipboard"> - <i class="fa-solid fa-copy" aria-hidden="true"></i> - </span> + class="has-text-grey button nowrap-child" + onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> + <span class="icon" data-tooltip="Copy to clipboard"> + <i class="fa-solid fa-copy" aria-hidden="true"></i> + </span> </a> </td> <td>{{ item.user }}</td> diff --git a/core/templates/partials/manipulation-list.html b/core/templates/partials/manipulation-list.html index 2184daf..752287a 100644 --- a/core/templates/partials/manipulation-list.html +++ b/core/templates/partials/manipulation-list.html @@ -25,11 +25,11 @@ <tr> <td> <a - class="has-text-grey button nowrap-child" - onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> - <span class="icon" data-tooltip="Copy to clipboard"> - <i class="fa-solid fa-copy" aria-hidden="true"></i> - </span> + class="has-text-grey button nowrap-child" + onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> + <span class="icon" data-tooltip="Copy to clipboard"> + <i class="fa-solid fa-copy" aria-hidden="true"></i> + </span> </a> </td> <td>{{ item.name }}</td> diff --git a/core/templates/partials/message-list.html b/core/templates/partials/message-list.html index 357e59c..7faf688 100644 --- a/core/templates/partials/message-list.html +++ b/core/templates/partials/message-list.html @@ -22,24 +22,24 @@ {% for item in object_list %} <tr> <td> - <a + <a class="has-text-grey button nowrap-child" onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> <span class="icon" data-tooltip="Copy to clipboard"> <i class="fa-solid fa-copy" aria-hidden="true"></i> </span> - </a> + </a> </td> <td>{{ item.session }}</td> <td>{{ item.ts }}</td> <td> - <a + <a class="has-text-grey button nowrap-child" onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.sender_uuid }}');"> <span class="icon" data-tooltip="Copy to clipboard"> <i class="fa-solid fa-copy" aria-hidden="true"></i> </span> - </a> + </a> </td> <td>{{ item.text }}</td> <td>{{ item.custom_author }}</td> diff --git a/core/templates/partials/person-list.html b/core/templates/partials/person-list.html index 28e9156..70792bb 100644 --- a/core/templates/partials/person-list.html +++ b/core/templates/partials/person-list.html @@ -22,11 +22,11 @@ <tr> <td> <a - class="has-text-grey button nowrap-child" - onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> - <span class="icon" data-tooltip="Copy to clipboard"> - <i class="fa-solid fa-copy" aria-hidden="true"></i> - </span> + class="has-text-grey button nowrap-child" + onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> + <span class="icon" data-tooltip="Copy to clipboard"> + <i class="fa-solid fa-copy" aria-hidden="true"></i> + </span> </a> </td> <td>{{ item.name }}</td> diff --git a/core/templates/partials/persona-list.html b/core/templates/partials/persona-list.html index dd0e3b9..272221c 100644 --- a/core/templates/partials/persona-list.html +++ b/core/templates/partials/persona-list.html @@ -25,11 +25,11 @@ <tr> <td> <a - class="has-text-grey button nowrap-child" - onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> - <span class="icon" data-tooltip="Copy to clipboard"> - <i class="fa-solid fa-copy" aria-hidden="true"></i> - </span> + class="has-text-grey button nowrap-child" + onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> + <span class="icon" data-tooltip="Copy to clipboard"> + <i class="fa-solid fa-copy" aria-hidden="true"></i> + </span> </a> </td> <td>{{ item.alias }}</td> diff --git a/core/templates/partials/queue-list.html b/core/templates/partials/queue-list.html index 48f99f0..28a2692 100644 --- a/core/templates/partials/queue-list.html +++ b/core/templates/partials/queue-list.html @@ -3,69 +3,93 @@ {% get_last_invalidation 'core.QueuedMessage' as last %} {% include 'mixins/partials/notify.html' %} {% cache 600 objects_queue request.user.id object_list type last %} - <table - class="table is-fullwidth is-hoverable" - hx-target="#{{ context_object_name }}-table" + <div id="{{ context_object_name }}-table" - hx-swap="outerHTML" hx-trigger="{{ context_object_name_singular }}Event from:body" - hx-get="{{ list_url }}"> - <thead> - <th>id</th> - <th>session</th> - <th>manipulation</th> - <th>ts</th> - <th>text</th> - <th>actions</th> - </thead> - {% for item in object_list %} - <tr> - <td> - <a - class="has-text-grey button nowrap-child" - onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> - <span class="icon" data-tooltip="Copy to clipboard"> - <i class="fa-solid fa-copy" aria-hidden="true"></i> - </span> - </a> - </td> - <td>{{ item.session }}</td> - <td>{{ item.manipulation }}</td> - <td>{{ item.ts }}</td> - <td>{{ item.text.length }}</td> - <td> - <div class="buttons"> - <button - hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' - hx-get="{% url 'queue_update' type=type pk=item.id %}" - hx-trigger="click" - hx-target="#{{ type }}s-here" - hx-swap="innerHTML" - class="button"> - <span class="icon-text"> - <span class="icon"> - <i class="fa-solid fa-pencil"></i> - </span> - </span> - </button> - <button - hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' - hx-delete="{% url 'queue_delete' type=type pk=item.id %}" - hx-trigger="click" - hx-target="#modals-here" - hx-swap="innerHTML" - hx-confirm="Are you sure you wish to delete {{ item.id }}?" - class="button"> - <span class="icon-text"> - <span class="icon"> - <i class="fa-solid fa-xmark"></i> - </span> - </span> - </button> - </div> - </td> - </tr> - {% endfor %} + hx-get="{{ list_url }}" + hx-target="#{{ context_object_name }}-table" + hx-swap="outerHTML"> - </table> -{% endcache %} \ No newline at end of file + <div class="is-flex is-justify-content-space-between is-align-items-center" style="margin-bottom: 0.75rem; gap: 0.5rem; flex-wrap: wrap;"> + <div> + <h3 class="title is-6" style="margin-bottom: 0.15rem;">Outgoing Queue</h3> + <p class="is-size-7">Review queued drafts and approve or reject each message.</p> + </div> + <span class="tag is-dark is-medium">{{ object_list|length }} pending</span> + </div> + + {% if object_list %} + <div class="columns is-multiline" style="margin: 0 -0.35rem;"> + {% for item in object_list %} + <div class="column is-12" style="padding: 0.35rem;" id="queue-card-{{ item.id }}"> + <article class="box" style="padding: 0.75rem; border: 1px solid rgba(0, 0, 0, 0.14); box-shadow: none;"> + <div class="is-flex is-justify-content-space-between is-align-items-start" style="gap: 0.75rem; flex-wrap: wrap; margin-bottom: 0.5rem;"> + <div> + <p class="is-size-7 has-text-weight-semibold" style="margin-bottom: 0.2rem;">{{ item.session.identifier.person.name }}</p> + <div class="tags" style="margin-bottom: 0.2rem;"> + <span class="tag is-light">{{ item.session.identifier.service|title }}</span> + <span class="tag is-light">{{ item.manipulation.name }}</span> + <span class="tag is-light">{{ item.ts }}</span> + </div> + </div> + <div class="buttons are-small" style="margin: 0;"> + <button + class="button is-success is-light" + hx-get="{% url 'message_accept_api' message_id=item.id %}" + hx-swap="none" + _="on htmx:afterRequest if event.detail.successful remove #queue-card-{{ item.id }} then trigger {{ context_object_name_singular }}Event on body end"> + <span class="icon is-small"><i class="fa-solid fa-check"></i></span> + <span>Approve</span> + </button> + <button + class="button is-danger is-light" + hx-get="{% url 'message_reject_api' message_id=item.id %}" + hx-swap="none" + _="on htmx:afterRequest if event.detail.successful remove #queue-card-{{ item.id }} then trigger {{ context_object_name_singular }}Event on body end"> + <span class="icon is-small"><i class="fa-solid fa-xmark"></i></span> + <span>Reject</span> + </button> + </div> + </div> + + <div style="padding: 0.6rem; border-radius: 8px; border: 1px solid rgba(0, 0, 0, 0.12); background: rgba(255, 255, 255, 0.45); margin-bottom: 0.5rem;"> + <p style="white-space: pre-wrap; margin: 0;">{{ item.text|default:"(empty draft)" }}</p> + </div> + + <div class="is-flex is-justify-content-space-between is-align-items-center" style="gap: 0.5rem; flex-wrap: wrap;"> + <small class="has-text-grey">Queue ID: {{ item.id }}</small> + <div class="buttons are-small" style="margin: 0;"> + <button + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-get="{% url 'queue_update' type=type pk=item.id %}" + hx-trigger="click" + hx-target="#{{ type }}s-here" + hx-swap="innerHTML" + class="button is-light"> + <span class="icon is-small"><i class="fa-solid fa-pen"></i></span> + <span>Edit</span> + </button> + <button + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-delete="{% url 'queue_delete' type=type pk=item.id %}" + hx-trigger="click" + hx-target="#modals-here" + hx-swap="innerHTML" + hx-confirm="Delete queued message {{ item.id }}?" + class="button is-light"> + <span class="icon is-small"><i class="fa-solid fa-trash"></i></span> + <span>Delete</span> + </button> + </div> + </div> + </article> + </div> + {% endfor %} + </div> + {% else %} + <article class="box" style="padding: 0.8rem; border: 1px dashed rgba(0, 0, 0, 0.25); box-shadow: none;"> + <p class="is-size-7 has-text-grey">Queue is empty.</p> + </article> + {% endif %} + </div> +{% endcache %} diff --git a/core/templates/partials/session-list.html b/core/templates/partials/session-list.html index ca4edcc..ca121b6 100644 --- a/core/templates/partials/session-list.html +++ b/core/templates/partials/session-list.html @@ -19,13 +19,13 @@ {% for item in object_list %} <tr> <td> - <a + <a class="has-text-grey button nowrap-child" onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.id }}');"> <span class="icon" data-tooltip="Copy to clipboard"> <i class="fa-solid fa-copy" aria-hidden="true"></i> </span> - </a> + </a> </td> <td>{{ item.identifier }}</td> <td>{{ item.last_interaction }}</td> diff --git a/core/templates/partials/signal-accounts.html b/core/templates/partials/signal-accounts.html index eb4a2b6..7214b8a 100644 --- a/core/templates/partials/signal-accounts.html +++ b/core/templates/partials/signal-accounts.html @@ -85,38 +85,38 @@ </table> <form - hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' - hx-post="{% url 'signal_account_add' type=type %}" - hx-target="#modals-here" - hx-swap="innerHTML"> - {% csrf_token %} - <div class="field has-addons"> - <div id="device" class="control is-expanded has-icons-left"> - <input - hx-post="{% url 'signal_account_add' type=type %}" - hx-target="#widgets-here" - hx-swap="innerHTML" - name="device" - class="input" - type="text" - placeholder="Account name"> - <span class="icon is-small is-left"> - <i class="fa-solid fa-plus"></i> - </span> - </div> - <div class="control"> - <div class="field"> - <button - id="search" - class="button is-fullwidth" + hx-headers='{"X-CSRFToken": "{{ csrf_token }}"}' + hx-post="{% url 'signal_account_add' type=type %}" + hx-target="#modals-here" + hx-swap="innerHTML"> + {% csrf_token %} + <div class="field has-addons"> + <div id="device" class="control is-expanded has-icons-left"> + <input hx-post="{% url 'signal_account_add' type=type %}" - hx-trigger="click" hx-target="#widgets-here" - hx-swap="innerHTML"> - Add account - </button> + hx-swap="innerHTML" + name="device" + class="input" + type="text" + placeholder="Account name"> + <span class="icon is-small is-left"> + <i class="fa-solid fa-plus"></i> + </span> + </div> + <div class="control"> + <div class="field"> + <button + id="search" + class="button is-fullwidth" + hx-post="{% url 'signal_account_add' type=type %}" + hx-trigger="click" + hx-target="#widgets-here" + hx-swap="innerHTML"> + Add account + </button> + </div> </div> </div> - </div> -</form> + </form> {% endcache %} \ No newline at end of file diff --git a/core/templates/partials/signal-chats-list.html b/core/templates/partials/signal-chats-list.html index 9ea5c0e..485d084 100644 --- a/core/templates/partials/signal-chats-list.html +++ b/core/templates/partials/signal-chats-list.html @@ -19,14 +19,14 @@ <tr> <td>{{ item.source_number }}</td> <td> - <a + <a class="has-text-grey button nowrap-child" onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.source_uuid }}');"> <span class="icon" data-tooltip="Copy to clipboard"> <i class="fa-solid fa-copy" aria-hidden="true"></i> </span> - </a> - </td> + </a> + </td> <td>{{ item.account }}</td> <td>{{ item.source_name }}</td> <td> diff --git a/core/templates/partials/signal-contacts-list.html b/core/templates/partials/signal-contacts-list.html index 17942f4..c33a201 100644 --- a/core/templates/partials/signal-contacts-list.html +++ b/core/templates/partials/signal-contacts-list.html @@ -7,52 +7,52 @@ {% if object_list is not None %} <table - class="table is-fullwidth is-hoverable"> - <thead> - <th>name</th> - <th>number</th> - <th>uuid</th> - <th>verified</th> - <th>blocked</th> - </thead> - {% for item in object_list.contacts %} - <tr> - <td>{{ item.name }}</td> - <td>{{ item.number }}</td> - <td> - <a - class="has-text-grey button nowrap-child" - onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.uuid }}');"> - <span class="icon" data-tooltip="Copy to clipboard"> - <i class="fa-solid fa-copy" aria-hidden="true"></i> - </span> - </a> - </td> - <td> - {% if item.identity.status == "TRUSTED_VERIFIED" %} - <span class="icon"> - <i class="fa-solid fa-check"></i> - </span> - {% else %} - <span class="icon"> - <i class="fa-solid fa-xmark"></i> - </span> - {% endif %} - </td> - <td> - {% if item.blocked %} - <span class="icon"> - <i class="fa-solid fa-check"></i> - </span> - {% else %} - <span class="icon"> - <i class="fa-solid fa-xmark"></i> - </span> - {% endif %} - </td> - </tr> - {% endfor %} + class="table is-fullwidth is-hoverable"> + <thead> + <th>name</th> + <th>number</th> + <th>uuid</th> + <th>verified</th> + <th>blocked</th> + </thead> + {% for item in object_list.contacts %} + <tr> + <td>{{ item.name }}</td> + <td>{{ item.number }}</td> + <td> + <a + class="has-text-grey button nowrap-child" + onclick="window.prompt('Copy to clipboard: Ctrl+C, Enter', '{{ item.uuid }}');"> + <span class="icon" data-tooltip="Copy to clipboard"> + <i class="fa-solid fa-copy" aria-hidden="true"></i> + </span> + </a> + </td> + <td> + {% if item.identity.status == "TRUSTED_VERIFIED" %} + <span class="icon"> + <i class="fa-solid fa-check"></i> + </span> + {% else %} + <span class="icon"> + <i class="fa-solid fa-xmark"></i> + </span> + {% endif %} + </td> + <td> + {% if item.blocked %} + <span class="icon"> + <i class="fa-solid fa-check"></i> + </span> + {% else %} + <span class="icon"> + <i class="fa-solid fa-xmark"></i> + </span> + {% endif %} + </td> + </tr> + {% endfor %} - </table> + </table> {% endif %} {% endcache %} \ No newline at end of file diff --git a/core/views/ais.py b/core/views/ais.py index fd2f57c..56f4a85 100644 --- a/core/views/ais.py +++ b/core/views/ais.py @@ -1,11 +1,5 @@ from django.contrib.auth.mixins import LoginRequiredMixin - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from core.forms import AIForm from core.models import AI @@ -13,11 +7,12 @@ from core.util import logs log = logs.get_logger(__name__) + class AIList(LoginRequiredMixin, ObjectList): list_template = "partials/ai-list.html" model = AI page_title = "AIs" - #page_subtitle = "Add times here in order to permit trading." + # page_subtitle = "Add times here in order to permit trading." list_url_name = "ais" list_url_args = ["type"] diff --git a/core/views/groups.py b/core/views/groups.py index 7d1f34b..74e4b88 100644 --- a/core/views/groups.py +++ b/core/views/groups.py @@ -1,11 +1,5 @@ from django.contrib.auth.mixins import LoginRequiredMixin - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from core.forms import GroupForm from core.models import Group @@ -13,6 +7,7 @@ from core.util import logs log = logs.get_logger(__name__) + class GroupList(LoginRequiredMixin, ObjectList): list_template = "partials/group-list.html" model = Group @@ -39,4 +34,4 @@ class GroupUpdate(LoginRequiredMixin, ObjectUpdate): class GroupDelete(LoginRequiredMixin, ObjectDelete): - model = Group \ No newline at end of file + model = Group diff --git a/core/views/identifiers.py b/core/views/identifiers.py index 7670ef8..41d9db4 100644 --- a/core/views/identifiers.py +++ b/core/views/identifiers.py @@ -1,12 +1,14 @@ from django.contrib.auth.mixins import LoginRequiredMixin -from mixins.views import AbortSave, ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from django.db import IntegrityError +from mixins.views import AbortSave, ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate + from core.forms import PersonIdentifierForm -from core.models import PersonIdentifier, Person +from core.models import Person, PersonIdentifier from core.util import logs log = logs.get_logger(__name__) + class IdentifierPermissionMixin: def set_extra_args(self, user): self.extra_permission_args = { @@ -14,6 +16,7 @@ class IdentifierPermissionMixin: "person__pk": self.kwargs["person"], } + class PersonIdentifierList(LoginRequiredMixin, IdentifierPermissionMixin, ObjectList): list_template = "partials/identifier-list.html" model = PersonIdentifier @@ -26,7 +29,9 @@ class PersonIdentifierList(LoginRequiredMixin, IdentifierPermissionMixin, Object submit_url_args = ["type", "person"] -class PersonIdentifierCreate(LoginRequiredMixin, IdentifierPermissionMixin, ObjectCreate): +class PersonIdentifierCreate( + LoginRequiredMixin, IdentifierPermissionMixin, ObjectCreate +): model = PersonIdentifier form_class = PersonIdentifierForm @@ -52,7 +57,10 @@ class PersonIdentifierCreate(LoginRequiredMixin, IdentifierPermissionMixin, Obje log.error(f"Person {self.kwargs['person']} does not exist") raise AbortSave("person does not exist or you don't have access") -class PersonIdentifierUpdate(LoginRequiredMixin, IdentifierPermissionMixin, ObjectUpdate): + +class PersonIdentifierUpdate( + LoginRequiredMixin, IdentifierPermissionMixin, ObjectUpdate +): model = PersonIdentifier form_class = PersonIdentifierForm @@ -60,5 +68,7 @@ class PersonIdentifierUpdate(LoginRequiredMixin, IdentifierPermissionMixin, Obje submit_url_args = ["type", "pk", "person"] -class PersonIdentifierDelete(LoginRequiredMixin, IdentifierPermissionMixin, ObjectDelete): - model = PersonIdentifier \ No newline at end of file +class PersonIdentifierDelete( + LoginRequiredMixin, IdentifierPermissionMixin, ObjectDelete +): + model = PersonIdentifier diff --git a/core/views/manipulations.py b/core/views/manipulations.py index 7b21f36..df7b9ce 100644 --- a/core/views/manipulations.py +++ b/core/views/manipulations.py @@ -1,11 +1,5 @@ from django.contrib.auth.mixins import LoginRequiredMixin - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from core.forms import ManipulationForm from core.models import Manipulation @@ -13,6 +7,7 @@ from core.util import logs log = logs.get_logger(__name__) + class ManipulationList(LoginRequiredMixin, ObjectList): list_template = "partials/manipulation-list.html" model = Manipulation @@ -39,4 +34,4 @@ class ManipulationUpdate(LoginRequiredMixin, ObjectUpdate): class ManipulationDelete(LoginRequiredMixin, ObjectDelete): - model = Manipulation \ No newline at end of file + model = Manipulation diff --git a/core/views/messages.py b/core/views/messages.py index 8862423..31161e5 100644 --- a/core/views/messages.py +++ b/core/views/messages.py @@ -1,12 +1,14 @@ from django.contrib.auth.mixins import LoginRequiredMixin -from mixins.views import AbortSave, ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from django.db import IntegrityError +from mixins.views import AbortSave, ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate + from core.forms import MessageForm from core.models import Message from core.util import logs log = logs.get_logger(__name__) + class MessagePermissionMixin: def set_extra_args(self, user): self.extra_permission_args = { @@ -14,6 +16,7 @@ class MessagePermissionMixin: "session__pk": self.kwargs["session"], } + class MessageList(LoginRequiredMixin, MessagePermissionMixin, ObjectList): list_template = "partials/message-list.html" model = Message @@ -52,6 +55,7 @@ class MessageCreate(LoginRequiredMixin, MessagePermissionMixin, ObjectCreate): log.error(f"Session {self.kwargs['session']} does not exist") raise AbortSave("session does not exist or you don't have access") + class MessageUpdate(LoginRequiredMixin, MessagePermissionMixin, ObjectUpdate): model = Message form_class = MessageForm @@ -62,5 +66,3 @@ class MessageUpdate(LoginRequiredMixin, MessagePermissionMixin, ObjectUpdate): class MessageDelete(LoginRequiredMixin, MessagePermissionMixin, ObjectDelete): model = Message - - diff --git a/core/views/people.py b/core/views/people.py index f2f5bd7..f58ce6a 100644 --- a/core/views/people.py +++ b/core/views/people.py @@ -1,11 +1,5 @@ from django.contrib.auth.mixins import LoginRequiredMixin - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from core.forms import PersonForm from core.models import Person @@ -13,11 +7,12 @@ from core.util import logs log = logs.get_logger(__name__) + class PersonList(LoginRequiredMixin, ObjectList): list_template = "partials/person-list.html" model = Person page_title = "People" - #page_subtitle = "Add times here in order to permit trading." + # page_subtitle = "Add times here in order to permit trading." list_url_name = "people" list_url_args = ["type"] diff --git a/core/views/personas.py b/core/views/personas.py index 0809430..2de57f2 100644 --- a/core/views/personas.py +++ b/core/views/personas.py @@ -1,11 +1,5 @@ from django.contrib.auth.mixins import LoginRequiredMixin - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from core.forms import PersonaForm from core.models import Persona @@ -13,6 +7,7 @@ from core.util import logs log = logs.get_logger(__name__) + class PersonaList(LoginRequiredMixin, ObjectList): list_template = "partials/persona-list.html" model = Persona @@ -39,4 +34,4 @@ class PersonaUpdate(LoginRequiredMixin, ObjectUpdate): class PersonaDelete(LoginRequiredMixin, ObjectDelete): - model = Persona \ No newline at end of file + model = Persona diff --git a/core/views/queues.py b/core/views/queues.py index 444b0a8..6a34c56 100644 --- a/core/views/queues.py +++ b/core/views/queues.py @@ -1,56 +1,63 @@ -from rest_framework.views import APIView +from asgiref.sync import async_to_sync from django.contrib.auth.mixins import LoginRequiredMixin - -from rest_framework import status - +from django.db import transaction from django.http import HttpResponse -from core.models import QueuedMessage, Message +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate +from rest_framework import status +from rest_framework.views import APIView + +from core.clients import signalapi from core.forms import QueueForm +from core.models import Message, QueuedMessage +from core.util import logs -import requests -import orjson -from django.conf import settings -import redis -import msgpack - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) - -# def start_typing(uuid): -# url = f"http://signal:8080/v1/typing_indicator/{settings.SIGNAL_NUMBER}" -# data = { -# "recipient": uuid, -# } - -# response = requests.put(url, json=data) - -# def stop_typing(uuid): -# url = f"http://signal:8080/v1/typing_indicator/{settings.SIGNAL_NUMBER}" -# data = { -# "recipient": uuid, -# } - -# response = requests.delete(url, json=data) - -r = redis.from_url("unix://var/run/gia-redis.sock", db=10) +log = logs.get_logger("queue") class AcceptMessageAPI(LoginRequiredMixin, APIView): def get(self, request, message_id): - to_submit = { - "type": "def", - "method": "accept_message", - "user_id": request.user.id, - "message_id": message_id, - } - packed = msgpack.packb(to_submit, use_bin_type=True) - r.publish("processing", packed) + try: + queued = QueuedMessage.objects.select_related( + "session", + "session__identifier", + "session__user", + ).get( + user=request.user, + id=message_id, + ) + except QueuedMessage.DoesNotExist: + return HttpResponse(status=status.HTTP_404_NOT_FOUND) + + if queued.session.identifier.service != "signal": + log.warning( + "Queue accept failed: unsupported service '%s' for queued message %s", + queued.session.identifier.service, + queued.id, + ) + return HttpResponse(status=status.HTTP_400_BAD_REQUEST) + + ts = async_to_sync(signalapi.send_message_raw)( + queued.session.identifier.identifier, + queued.text or "", + [], + ) + if not ts: + log.error("Queue accept send failed for queued message %s", queued.id) + return HttpResponse(status=status.HTTP_502_BAD_GATEWAY) + + with transaction.atomic(): + Message.objects.create( + user=queued.session.user, + session=queued.session, + custom_author=queued.custom_author or "BOT", + text=queued.text, + ts=ts, + ) + queued.delete() + return HttpResponse(status=status.HTTP_200_OK) + class RejectMessageAPI(LoginRequiredMixin, APIView): def get(self, request, message_id): try: @@ -64,11 +71,12 @@ class RejectMessageAPI(LoginRequiredMixin, APIView): message.delete() return HttpResponse(status=status.HTTP_200_OK) - + + class QueueList(LoginRequiredMixin, ObjectList): list_template = "partials/queue-list.html" model = QueuedMessage - page_title = "Queues" + page_title = "Queue" list_url_name = "queues" list_url_args = ["type"] diff --git a/core/views/sessions.py b/core/views/sessions.py index 27afa3c..9cda79e 100644 --- a/core/views/sessions.py +++ b/core/views/sessions.py @@ -1,11 +1,5 @@ from django.contrib.auth.mixins import LoginRequiredMixin - -from mixins.views import ( - ObjectCreate, - ObjectDelete, - ObjectList, - ObjectUpdate, -) +from mixins.views import ObjectCreate, ObjectDelete, ObjectList, ObjectUpdate from core.forms import SessionForm from core.models import ChatSession @@ -13,11 +7,12 @@ from core.util import logs log = logs.get_logger(__name__) + class SessionList(LoginRequiredMixin, ObjectList): list_template = "partials/session-list.html" model = ChatSession page_title = "Chat Sessions" - #page_subtitle = "Add times here in order to permit trading." + # page_subtitle = "Add times here in order to permit trading." list_url_name = "sessions" list_url_args = ["type"] @@ -41,4 +36,3 @@ class SessionUpdate(LoginRequiredMixin, ObjectUpdate): class SessionDelete(LoginRequiredMixin, ObjectDelete): model = ChatSession - diff --git a/core/views/signal.py b/core/views/signal.py index f868000..f9c9018 100644 --- a/core/views/signal.py +++ b/core/views/signal.py @@ -1,24 +1,28 @@ -from core.views.manage.permissions import SuperUserRequiredMixin -from django.views import View -from django.shortcuts import render import base64 -from core.models import Chat -from mixins.views import ObjectRead, ObjectList -import requests import orjson +import requests +from django.shortcuts import render +from django.views import View +from mixins.views import ObjectList, ObjectRead + +from core.models import Chat +from core.views.manage.permissions import SuperUserRequiredMixin + class CustomObjectRead(ObjectRead): def post(self, request, *args, **kwargs): self.request = request return super().get(request, *args, **kwargs) + class Signal(SuperUserRequiredMixin, View): template_name = "pages/signal.html" def get(self, request): return render(request, self.template_name) + class SignalAccounts(SuperUserRequiredMixin, ObjectList): list_template = "partials/signal-accounts.html" @@ -36,6 +40,7 @@ class SignalAccounts(SuperUserRequiredMixin, ObjectList): return accounts + class SignalContactsList(SuperUserRequiredMixin, ObjectList): list_template = "partials/signal-contacts-list.html" @@ -45,7 +50,6 @@ class SignalContactsList(SuperUserRequiredMixin, ObjectList): list_url_name = "signal_contacts" list_url_args = ["type", "pk"] - def get_queryset(self, *args, **kwargs): # url = signal:8080/v1/accounts # /v1/configuration/{number}/settings @@ -67,13 +71,14 @@ class SignalContactsList(SuperUserRequiredMixin, ObjectList): contact["identity"] = identity obj = { - #"identity": identity, + # "identity": identity, "contacts": contacts, } self.extra_context = {"pretty": list(obj.keys())} return obj + class SignalChatsList(SuperUserRequiredMixin, ObjectList): list_template = "partials/signal-chats-list.html" @@ -82,15 +87,17 @@ class SignalChatsList(SuperUserRequiredMixin, ObjectList): list_url_name = "signal_chats" list_url_args = ["type", "pk"] - + def get_queryset(self, *args, **kwargs): pk = self.kwargs.get("pk", "") object_list = Chat.objects.filter(account=pk) return object_list + class SignalMessagesList(SuperUserRequiredMixin, ObjectList): ... + class SignalAccountAdd(SuperUserRequiredMixin, CustomObjectRead): detail_template = "partials/signal-account-add.html" @@ -107,7 +114,7 @@ class SignalAccountAdd(SuperUserRequiredMixin, CustomObjectRead): device_name = form_args["device"] url = f"http://signal:8080/v1/qrcodelink?device_name={device_name}" response = requests.get(url) - image_bytes = response.content + image_bytes = response.content base64_image = base64.b64encode(image_bytes).decode("utf-8") - return base64_image \ No newline at end of file + return base64_image diff --git a/core/views/workspace.py b/core/views/workspace.py new file mode 100644 index 0000000..3629231 --- /dev/null +++ b/core/views/workspace.py @@ -0,0 +1,2864 @@ +from datetime import datetime, timezone +import json +import re + +from asgiref.sync import async_to_sync +from django.contrib.auth.mixins import LoginRequiredMixin +from django.http import HttpResponseBadRequest +from django.shortcuts import get_object_or_404, render +from django.utils import timezone as dj_timezone +from django.views import View + +from core.forms import AIWorkspaceWindowForm +from core.lib.notify import raw_sendmsg +from core.messaging import ai as ai_runner +from core.messaging.utils import messages_to_string +from core.models import ( + AI, + AIRequest, + AIResult, + ChatSession, + Message, + MessageEvent, + Manipulation, + PatternArtifactExport, + PatternMitigationAutoSettings, + PatternMitigationCorrection, + PatternMitigationGame, + PatternMitigationMessage, + PatternMitigationPlan, + PatternMitigationRule, + Person, + PersonIdentifier, + QueuedMessage, + WorkspaceConversation, +) + +SEND_ENABLED_MODES = {"active", "instant"} +OPERATION_LABELS = { + "summarise": "Summarise", + "draft_reply": "Draft Reply", + "extract_patterns": "Extract Patterns", + "artifacts": "Plan", +} +MITIGATION_TABS = { + "plan_board", + "corrections", + "engage", + "fundamentals", + "ask_ai", + "auto", +} + + +def _format_unix_ms(ts): + if not ts: + return "" + dt = datetime.fromtimestamp(ts / 1000, tz=timezone.utc) + return dt.strftime("%Y-%m-%d %H:%M UTC") + + +def _infer_direction(message, person_identifiers): + """ + Infer message direction relative to workspace owner. + """ + sender = message.sender_uuid or "" + if sender and sender in person_identifiers: + return "in" + return "out" + + +def _get_send_state(user, person): + """ + Resolve current send capability from user's enabled manipulations for this person. + """ + manipulations = ( + Manipulation.objects.filter( + user=user, + enabled=True, + group__people=person, + ) + .select_related("group") + .distinct() + ) + if not manipulations.exists(): + return { + "can_send": False, + "level": "warning", + "text": "Sending is blocked: no enabled manipulation matched this recipient.", + "manipulation_id": None, + } + + send_manip = manipulations.filter(mode__in=SEND_ENABLED_MODES).first() + if send_manip: + return { + "can_send": True, + "level": "success", + "text": f"Enabled by manipulation '{send_manip.name}' ({send_manip.mode}).", + "manipulation_id": str(send_manip.id), + } + + mode_list = ", ".join(sorted({(m.mode or "unset") for m in manipulations})) + return { + "can_send": False, + "level": "warning", + "text": f"Sending is blocked by active mode(s): {mode_list}.", + "manipulation_id": None, + } + + +def _get_queue_manipulation(user, person): + """ + Resolve a manipulation for queue entries: + prefer send-enabled, otherwise any enabled manipulation in recipient groups. + """ + matched = ( + Manipulation.objects.filter( + user=user, + enabled=True, + group__people=person, + ) + .select_related("group") + .distinct() + ) + return matched.filter(mode__in=SEND_ENABLED_MODES).first() or matched.first() + + +def _resolve_person_identifier(user, person): + """ + Resolve the best identifier for outbound share/send operations. + Prefer Signal identifier, then fallback to any identifier. + """ + return ( + PersonIdentifier.objects.filter( + user=user, + person=person, + service="signal", + ).first() + or PersonIdentifier.objects.filter(user=user, person=person).first() + ) + + +def _is_truthy(value): + return str(value or "").strip().lower() in {"1", "true", "on", "yes"} + + +def _sanitize_active_tab(value, default="plan_board"): + tab = (value or "").strip() + if tab in MITIGATION_TABS: + return tab + return default + + +def _parse_draft_options(result_text): + """ + Parse model output into labeled draft options shown simultaneously in UI. + """ + content = (result_text or "").strip() + if not content: + return [] + + def clean_option_text(value): + value = (value or "").strip(" \n\r\t*:") + # Strip surrounding quotes when the whole option is wrapped. + if len(value) >= 2 and ((value[0] == '"' and value[-1] == '"') or (value[0] == "'" and value[-1] == "'")): + value = value[1:-1].strip() + return value + + def dedupe_by_label(seq): + ordered = [] + seen = set() + for item in seq: + label = (item.get("label") or "").strip().title() + text = clean_option_text(item.get("text") or "") + if not label or not text: + continue + key = label.lower() + if key in seen: + continue + seen.add(key) + ordered.append({"label": label, "text": text}) + return ordered + + # Primary parser: line-based labeled blocks. + # Accepts: + # - Soft/Neutral/Firm + # - optional Tone/Response/Reply suffix + # - optional markdown bold markers + # - content on same line or subsequent lines + block_re = re.compile( + r"(?ims)^\s*(?:[-*]\s*)?(?:\*\*)?\s*(Soft|Neutral|Firm)\s*" + r"(?:(?:Tone|Response|Reply))?\s*:?\s*(?:\*\*)?\s*" + r"(.*?)(?=^\s*(?:[-*]\s*)?(?:\*\*)?\s*(?:Soft|Neutral|Firm)\s*" + r"(?:(?:Tone|Response|Reply))?\s*:?\s*(?:\*\*)?\s*|\Z)" + ) + options = [ + {"label": match.group(1).strip().title(), "text": match.group(2)} + for match in block_re.finditer(content) + ] + options = dedupe_by_label(options) + if options: + return options[:3] + + # Secondary parser: inline labeled segments in one paragraph. + inline_re = re.compile( + r"(?is)\b(Soft|Neutral|Firm)\s*(?:(?:Tone|Response|Reply))?\s*:\s*(.*?)" + r"(?=\b(?:Soft|Neutral|Firm)\s*(?:(?:Tone|Response|Reply))?\s*:|$)" + ) + options = [ + {"label": match.group(1).strip().title(), "text": match.group(2)} + for match in inline_re.finditer(content) + ] + options = dedupe_by_label(options) + if options: + return options[:3] + + # Secondary parser: Option 1/2/3 blocks. + option_split_re = re.compile(r"(?im)^\s*Option\s+\d+\s*$") + chunks = [chunk.strip() for chunk in option_split_re.split(content) if chunk.strip()] + parsed = [] + prefix_re = re.compile(r"(?im)^(?:\*\*)?\s*(Soft|Neutral|Firm)\s*(?:Tone|Response|Reply)?\s*:?\s*(?:\*\*)?\s*") + for idx, chunk in enumerate(chunks, start=1): + label = f"Option {idx}" + prefix_match = prefix_re.match(chunk) + if prefix_match: + label = prefix_match.group(1).strip().title() + chunk = prefix_re.sub("", chunk, count=1).strip(" \n\r\t*:") + if chunk: + parsed.append({"label": label, "text": chunk}) + if parsed: + return dedupe_by_label(parsed)[:3] + + # Final fallback: use first non-empty paragraphs. + paragraphs = [para.strip() for para in re.split(r"\n\s*\n", content) if para.strip()] + return dedupe_by_label([ + {"label": f"Option {idx}", "text": para} + for idx, para in enumerate(paragraphs[:3], start=1) + ]) + + +def _extract_seed_entities_from_context(raw_context): + """ + Heuristic extractor for pasted long-form frameworks. + Returns candidate fundamentals/rules/games without model dependency. + """ + text = (raw_context or "").strip() + if not text: + return {"fundamentals": [], "rules": [], "games": []} + + lines = [line.strip() for line in text.splitlines()] + fundamentals = [] + rules = [] + games = [] + + current_rule = None + current_game = None + in_core_principle = False + in_quick_cheat = False + + def flush_rule(): + nonlocal current_rule + if not current_rule: + return + title = current_rule.get("title", "").strip() + content = " ".join(current_rule.get("body", [])).strip() + if title and content: + rules.append({"title": title, "content": content}) + elif title: + rules.append({"title": title, "content": title}) + current_rule = None + + def flush_game(): + nonlocal current_game + if not current_game: + return + title = current_game.get("title", "").strip() + instructions = " ".join(current_game.get("body", [])).strip() + if title and instructions: + games.append({"title": title, "instructions": instructions}) + elif title: + games.append({"title": title, "instructions": title}) + current_game = None + + for line in lines: + if not line: + flush_rule() + flush_game() + in_core_principle = False + in_quick_cheat = False + continue + + if re.match(r"^SECTION\s+\d+", line, re.IGNORECASE): + in_core_principle = False + in_quick_cheat = False + flush_rule() + flush_game() + continue + + if re.match(r"^CORE PRINCIPLE", line, re.IGNORECASE): + in_core_principle = True + in_quick_cheat = False + continue + + if re.match(r"^QUICK CHEAT SHEET", line, re.IGNORECASE): + in_quick_cheat = True + in_core_principle = False + continue + + rule_match = re.match(r"^Rule\s+(\d+)\s*:\s*(.+)$", line, re.IGNORECASE) + game_match = re.match(r"^Game\s+(\d+)\s*:\s*(.+)$", line, re.IGNORECASE) + mantra_match = re.match(r"^Mantra\s*:\s*(.+)$", line, re.IGNORECASE) + + if rule_match: + flush_rule() + flush_game() + title = rule_match.group(2).strip() + current_rule = {"title": title, "body": []} + if rule_match.group(1) in {"1", "11"} and title: + fundamentals.append(title) + continue + + if game_match: + flush_rule() + flush_game() + title = game_match.group(2).strip() + current_game = {"title": title, "body": []} + continue + + if mantra_match: + fundamentals.append(mantra_match.group(1).strip()) + continue + + if in_core_principle and len(line) <= 120 and ":" not in line: + fundamentals.append(line) + continue + + if in_quick_cheat: + quick_line = re.sub(r"^\s*(?:[-*]|\d+\.)\s*", "", line).strip() + if quick_line and len(quick_line) <= 120 and not quick_line.lower().startswith("if you want"): + fundamentals.append(quick_line) + continue + + if "Emotional safety > Accuracy > Analysis" in line: + fundamentals.append("Emotional safety > Accuracy > Analysis") + continue + + if current_rule: + current_rule["body"].append(line) + continue + if current_game: + current_game["body"].append(line) + continue + + flush_rule() + flush_game() + + # Keep order, remove duplicates. + def dedupe_strings(seq): + seen = set() + out = [] + for item in seq: + key = item.strip().lower() + if not key or key in seen: + continue + seen.add(key) + out.append(item.strip()) + return out + + def dedupe_dicts(seq): + seen = set() + out = [] + for item in seq: + title = (item.get("title") or "").strip() + key = title.lower() + if not key or key in seen: + continue + seen.add(key) + out.append(item) + return out + + return { + "fundamentals": dedupe_strings(fundamentals)[:20], + "rules": dedupe_dicts(rules)[:40], + "games": dedupe_dicts(games)[:40], + } + + +def _merge_seed_entities(artifacts, seed): + merged = dict(artifacts or {}) + seed = seed or {} + + fundamentals = list(merged.get("fundamental_items") or []) + fundamentals = list(dict.fromkeys(fundamentals + list(seed.get("fundamentals") or []))) + merged["fundamental_items"] = fundamentals + + def merge_artifact_list(existing, injected, body_key): + existing = list(existing or []) + injected = list(injected or []) + seen = {(item.get("title") or "").strip().lower() for item in existing} + for item in injected: + title = (item.get("title") or "").strip() + body = (item.get(body_key) or "").strip() + if not title or not body: + continue + key = title.lower() + if key in seen: + continue + existing.append({"title": title, body_key: body}) + seen.add(key) + return existing + + merged["rules"] = merge_artifact_list(merged.get("rules"), seed.get("rules"), "content") + merged["games"] = merge_artifact_list(merged.get("games"), seed.get("games"), "instructions") + return merged + + +def _normalize_markdown_titles(text): + """ + Minimal markdown cleanup: + - convert '**Title:**' style lines into markdown headings so Bulma headers can style them. + """ + out = [] + for line in (text or "").splitlines(): + match = re.match(r"^\s*\*\*(.+?)\*\*\s*:?\s*$", line) + if match: + out.append(f"## {match.group(1).strip()}") + else: + out.append(line) + return "\n".join(out) + + +def _clean_inline_markdown(value): + value = re.sub(r"\*\*(.*?)\*\*", r"\1", value) + value = re.sub(r"\*(.*?)\*", r"\1", value) + value = re.sub(r"`(.*?)`", r"\1", value) + return value.strip() + + +def _append_block(section, block_type, values): + if not values: + return + section["blocks"].append({"type": block_type, "items": values}) + + +def _parse_result_sections(result_text): + """ + Minimal markdown-ish parser used by UI: + - '#/##/### Title' become section headers + - bullet lines become lists + - remaining lines are grouped as paragraphs + + Returned structure is template-safe (no raw HTML). + """ + text = _normalize_markdown_titles(result_text or "") + lines = text.splitlines() + + sections = [] + current = {"title": "Output", "level": 3, "blocks": []} + paragraph = [] + bullets = [] + + def flush_paragraph(): + nonlocal paragraph + if paragraph: + _append_block(current, "p", [" ".join(paragraph)]) + paragraph = [] + + def flush_bullets(): + nonlocal bullets + if bullets: + _append_block(current, "ul", bullets) + bullets = [] + + def flush_section(force=False): + if force or current["blocks"]: + sections.append(current.copy()) + + for raw_line in lines: + line = raw_line.rstrip() + heading_match = re.match(r"^\s*(#{1,6})\s+(.+?)\s*$", line) + if heading_match: + flush_paragraph() + flush_bullets() + flush_section() + level = len(heading_match.group(1)) + title = _clean_inline_markdown(heading_match.group(2)) + current = {"title": title or "Section", "level": level, "blocks": []} + continue + + bullet_match = re.match(r"^\s*(?:[-*]|\d+\.)\s+(.+?)\s*$", line) + if bullet_match: + flush_paragraph() + bullets.append(_clean_inline_markdown(bullet_match.group(1))) + continue + + if not line.strip(): + flush_paragraph() + flush_bullets() + continue + + flush_bullets() + paragraph.append(_clean_inline_markdown(line)) + + flush_paragraph() + flush_bullets() + flush_section(force=True) + + cleaned = [sec for sec in sections if sec.get("blocks")] + if cleaned: + return cleaned + + fallback = _clean_inline_markdown(result_text or "") + return [{"title": "Output", "level": 3, "blocks": [{"type": "p", "items": [fallback]}]}] + + +def _extract_json_object(raw): + text = (raw or "").strip() + if not text: + return None + + try: + parsed = json.loads(text) + if isinstance(parsed, dict): + return parsed + except Exception: + pass + + start = text.find("{") + if start == -1: + return None + + depth = 0 + end = None + for index, char in enumerate(text[start:], start=start): + if char == "{": + depth += 1 + elif char == "}": + depth -= 1 + if depth == 0: + end = index + 1 + break + if end is None: + return None + + try: + parsed = json.loads(text[start:end]) + if isinstance(parsed, dict): + return parsed + except Exception: + return None + return None + + +def _section_lines(section): + lines = [] + for block in section.get("blocks", []): + lines.extend([item for item in block.get("items", []) if item]) + return lines + + +def _shape_artifacts_for_profile(rules, games, output_profile): + """ + Apply lightweight profile shaping for generated mitigation artifacts. + """ + profile = (output_profile or "framework").strip().lower() + if profile in {"rules", "rule"}: + return rules[:12], games[:2] + if profile in {"games", "game"}: + return rules[:3], games[:12] + # framework: balanced + return rules[:10], games[:10] + + +def _default_artifacts_from_patterns(result_text, person, output_profile="framework"): + sections = _parse_result_sections(result_text) + rules = [] + games = [] + + for section in sections: + title = (section.get("title") or "").lower() + lines = _section_lines(section) + if not lines: + continue + + if "rule" in title or "next-step" in title or "mitigation" in title: + for idx, line in enumerate(lines, start=1): + rules.append({"title": f"Rule {idx}", "content": line}) + elif "game" in title or "protocol" in title: + for idx, line in enumerate(lines, start=1): + games.append({"title": f"Game {idx}", "instructions": line}) + + if not rules: + rules = [ + { + "title": "Safety Before Analysis", + "content": "Prioritize reducing emotional escalation before introducing analysis.", + }, + { + "title": "State Matching", + "content": "If either side is flooded, pause first and resume with a time-bound return.", + }, + ] + + if not games: + games = [ + { + "title": "Two-Turn Pause", + "instructions": "Limit conflict responses to two short turns, then pause with a clear return time.", + }, + { + "title": "Mirror Then Ask", + "instructions": "Mirror what you heard, validate emotion, then ask whether comfort or solutions are wanted.", + }, + ] + + rules, games = _shape_artifacts_for_profile(rules, games, output_profile) + + return { + "title": f"{person.name} Pattern Mitigation", + "objective": "Reduce repeated friction loops while preserving trust and clarity.", + "fundamental_items": [], + "rules": rules, + "games": games, + "corrections": [], + } + + +def _build_mitigation_artifacts(ai_obj, person, source_text, creation_mode, inspiration, fundamentals, output_profile): + fallback = _default_artifacts_from_patterns(source_text, person, output_profile) + + if not ai_obj: + if fundamentals: + fallback["fundamental_items"] = fundamentals + return fallback + + prompt = [ + { + "role": "system", + "content": ( + "You design practical relationship mitigation protocols. " + "Return strict JSON only with keys: title, objective, fundamental_items, rules, games. " + "Each rule item must have title and content. " + "Each game item must have title and instructions. " + "If mode is auto, choose strongest artifacts. If mode is guided, strongly follow inspiration. " + "Output profile controls emphasis: framework (balanced), rules (rules-first), games (games-first)." + ), + }, + { + "role": "user", + "content": ( + f"Person: {person.name}\n" + f"Mode: {creation_mode}\n" + f"Output profile: {output_profile}\n" + f"User inspiration: {inspiration or 'None'}\n" + f"Fundamental items (pre-agreed): {json.dumps(fundamentals)}\n\n" + f"Pattern analysis:\n{source_text}" + ), + }, + ] + + try: + raw = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj) + except Exception: + raw = "" + + parsed = _extract_json_object(raw) or {} + + title = (parsed.get("title") or "").strip() or fallback["title"] + objective = (parsed.get("objective") or "").strip() or fallback["objective"] + + parsed_fundamentals = parsed.get("fundamental_items") + if isinstance(parsed_fundamentals, list): + merged_fundamentals = [str(item).strip() for item in parsed_fundamentals if str(item).strip()] + else: + merged_fundamentals = [] + if fundamentals: + merged_fundamentals = list(dict.fromkeys(fundamentals + merged_fundamentals)) + + raw_rules = parsed.get("rules") + rules = [] + if isinstance(raw_rules, list): + for item in raw_rules: + if not isinstance(item, dict): + continue + title_i = str(item.get("title") or "").strip() + content_i = str(item.get("content") or "").strip() + if title_i and content_i: + rules.append({"title": title_i, "content": content_i}) + + raw_games = parsed.get("games") + games = [] + if isinstance(raw_games, list): + for item in raw_games: + if not isinstance(item, dict): + continue + title_i = str(item.get("title") or "").strip() + instructions_i = str(item.get("instructions") or "").strip() + if title_i and instructions_i: + games.append({"title": title_i, "instructions": instructions_i}) + + if not rules: + rules = fallback["rules"] + if not games: + games = fallback["games"] + rules, games = _shape_artifacts_for_profile(rules, games, output_profile) + + return { + "title": title, + "objective": objective, + "fundamental_items": merged_fundamentals, + "rules": rules, + "games": games, + "corrections": [], + } + + +def _serialize_export_payload(plan, artifact_type, export_format): + rules = list(plan.rules.order_by("created_at").values("title", "content", "enabled")) + games = list(plan.games.order_by("created_at").values("title", "instructions", "enabled")) + corrections = list(plan.corrections.order_by("created_at").values("title", "clarification", "enabled")) + + body = { + "protocol_version": "artifact-v1", + "plan_id": str(plan.id), + "plan_title": plan.title, + "objective": plan.objective, + "fundamental_items": plan.fundamental_items or [], + "rules": rules, + "games": games, + "corrections": corrections, + } + + if artifact_type == "rules": + body = { + **body, + "games": [], + "corrections": [], + } + elif artifact_type == "games": + body = { + **body, + "rules": [], + "corrections": [], + } + elif artifact_type == "corrections": + body = { + **body, + "rules": [], + "games": [], + } + + if export_format == "json": + payload = json.dumps(body, indent=2) + else: + lines = [ + f"# {plan.title or 'Pattern Mitigation Artifact'}", + "", + "Protocol: artifact-v1", + f"Artifact Type: {artifact_type}", + "", + "## Objective", + plan.objective or "(none)", + "", + "## Fundamental Items", + ] + fundamentals = plan.fundamental_items or [] + if fundamentals: + lines.extend([f"- {item}" for item in fundamentals]) + else: + lines.append("- (none)") + + if artifact_type in {"rulebook", "rules"}: + lines.append("") + lines.append("## Rules") + if rules: + for idx, rule in enumerate(rules, start=1): + lines.append(f"{idx}. **{rule['title']}** - {rule['content']}") + else: + lines.append("- (none)") + + if artifact_type in {"rulebook", "games"}: + lines.append("") + lines.append("## Games") + if games: + for idx, game in enumerate(games, start=1): + lines.append(f"{idx}. **{game['title']}** - {game['instructions']}") + else: + lines.append("- (none)") + + if artifact_type in {"rulebook", "corrections"}: + lines.append("") + lines.append("## Corrections") + if corrections: + for idx, correction in enumerate(corrections, start=1): + lines.append(f"{idx}. **{correction['title']}** - {correction['clarification']}") + else: + lines.append("- (none)") + + payload = "\n".join(lines) + + meta = { + "rule_count": len(rules), + "game_count": len(games), + "correction_count": len(corrections), + "fundamental_count": len(plan.fundamental_items or []), + } + return payload, meta + + +def _conversation_for_person(user, person): + conversation, _ = WorkspaceConversation.objects.get_or_create( + user=user, + platform_type="signal", + title=f"{person.name} Workspace", + defaults={"platform_thread_id": str(person.id)}, + ) + conversation.participants.add(person) + return conversation + + +def _parse_fundamentals(raw_text): + lines = [] + for line in (raw_text or "").splitlines(): + cleaned = line.strip() + if cleaned: + lines.append(cleaned) + return lines + + +def _engage_source_options(plan): + options = [] + for rule in plan.rules.order_by("created_at"): + options.append( + { + "value": f"rule:{rule.id}", + "label": f"Rule: {rule.title}", + } + ) + for game in plan.games.order_by("created_at"): + options.append( + { + "value": f"game:{game.id}", + "label": f"Game: {game.title}", + } + ) + for correction in plan.corrections.order_by("created_at"): + options.append( + { + "value": f"correction:{correction.id}", + "label": f"Correction: {correction.title}", + } + ) + return options + + +def _normalize_correction_title(value, fallback="Correction"): + cleaned = re.sub(r"\s+", " ", str(value or "").strip()) + cleaned = cleaned.strip("\"'` ") + if not cleaned: + return fallback + # Capitalize each lexical token for consistent correction naming. + words = [] + for token in cleaned.split(" "): + if not token: + continue + if token.isupper() and len(token) <= 4: + words.append(token) + else: + words.append(token[:1].upper() + token[1:]) + return " ".join(words) + + +def _build_engage_payload( + source_obj, + source_kind, + share_target, + framing, + context_note, + owner_name, + recipient_name, +): + share_key = (share_target or "self").strip().lower() + framing_key = (framing or "dont_change").strip().lower() + if share_key not in {"self", "other", "both"}: + share_key = "self" + if framing_key != "shared": + framing_key = "dont_change" + + artifact_type_label = { + "rule": "Rule", + "game": "Game", + "correction": "Correction", + }.get(source_kind, (source_kind or "Artifact").title()) + artifact_name_raw = (getattr(source_obj, "title", None) or f"{artifact_type_label} Item").strip() + artifact_name = ( + _normalize_correction_title(artifact_name_raw, fallback=f"{artifact_type_label} Item") + if source_kind == "correction" + else artifact_name_raw + ) + + if source_kind == "rule": + insight_text = source_obj.content.strip() or source_obj.title.strip() + elif source_kind == "game": + insight_text = source_obj.instructions.strip() or source_obj.title.strip() + else: + insight_text = source_obj.clarification.strip() or source_obj.title.strip() + + owner_label = (owner_name or "You").strip() + recipient_label = (recipient_name or "Other").strip() + + def _clean_text(value): + cleaned = re.sub(r"\s+", " ", (value or "").strip()) + cleaned = cleaned.strip("\"' ") + cleaned = re.sub(r"^\s*#{1,6}\s*", "", cleaned) + cleaned = re.sub(r"\*\*(.*?)\*\*", r"\1", cleaned) + cleaned = re.sub(r"__(.*?)__", r"\1", cleaned) + cleaned = re.sub(r"`(.*?)`", r"\1", cleaned) + cleaned = re.sub(r"^[\-*•]\s*", "", cleaned) + cleaned = re.sub(r"^\d+[.)]\s*", "", cleaned) + return cleaned.strip() + + def _split_sentences(value): + parts = [] + for line in (value or "").splitlines(): + line = _clean_text(line) + if not line: + continue + for piece in re.split(r"(?<=[.!?;])\s+", line.strip()): + piece = piece.strip() + if piece: + parts.append(piece) + return parts + + def _expand_shorthand_tokens(value): + text = value or "" + + alias_map = {} + for name in [owner_label, recipient_label]: + lowered = name.lower() + if lowered in {"you", "we", "us", "our", "i", "me", "other"}: + continue + initial = lowered[:1] + if initial and initial not in alias_map: + alias_map[initial] = name + + # Expand any known initial shorthand before modal verbs. + for initial, name in alias_map.items(): + text = re.sub( + rf"(?i)\b{re.escape(initial)}\s+(?=(?:should|will|must|can|need to|needs to|have to|has to|am|are|is|was|were)\b)", + f"{name} ", + text, + ) + + def replace_quoted_marker(match): + marker = match.group(1) or "" + lower_marker = marker.strip().lower() + replacement = marker + if lower_marker in alias_map: + replacement = alias_map[lower_marker] + elif lower_marker in {"you", "we", "i"}: + replacement = "both parties" + return f"('{replacement}')" + + text = re.sub( + r"\(\s*['\"]([A-Za-z]{1,8})['\"]\s*\)", + replace_quoted_marker, + text, + ) + return text + + def _fix_shared_grammar(value): + text = value + replacements = [ + (r"(?i)\bwe needs to\b", "we need to"), + (r"(?i)\bwe has to\b", "we have to"), + (r"(?i)\bwe is\b", "we are"), + (r"(?i)\bwe was\b", "we were"), + (r"(?i)\bus needs to\b", "we need to"), + (r"(?i)\bus need to\b", "we need to"), + (r"(?i)\bus is\b", "we are"), + (r"(?i)\bus are\b", "we are"), + (r"(?i)\bwe does not\b", "we do not"), + (r"(?i)\bwe doesn't\b", "we don't"), + (r"(?i)\bwe says\b", "we say"), + (r"(?i)\bwe responds\b", "we respond"), + (r"(?i)\bwe follows\b", "we follow"), + ] + for pattern, replacement in replacements: + text = re.sub(pattern, replacement, text) + return re.sub(r"\s+", " ", text).strip() + + def _rewrite_shared_sentence(sentence): + text = _clean_text(sentence) + if not text: + return "" + + punctuation = "." + if text[-1] in ".!?": + punctuation = text[-1] + text = text[:-1].strip() + text = _clean_text(text) + if not text: + return "" + + # Shared-only edge-case logic. + text = _expand_shorthand_tokens(text) + shared_replacements = [ + (r"\bi['’]m\b", "we're"), + (r"\bi['’]ve\b", "we've"), + (r"\bi['’]ll\b", "we'll"), + (r"\bi['’]d\b", "we'd"), + (r"\byou['’]re\b", "we're"), + (r"\byou['’]ve\b", "we've"), + (r"\byou['’]ll\b", "we'll"), + (r"\byou['’]d\b", "we'd"), + (r"\bmy\b", "our"), + (r"\bmine\b", "ours"), + (r"\bmyself\b", "ourselves"), + (r"\byour\b", "our"), + (r"\byours\b", "ours"), + (r"\byourself\b", "ourselves"), + (r"\byourselves\b", "ourselves"), + (r"\bi\b", "we"), + (r"\bme\b", "us"), + (r"\byou\b", "we"), + (r"\bhis\b", "our"), + (r"\bher\b", "our"), + (r"\btheir\b", "our"), + (r"\bhim\b", "us"), + (r"\bthem\b", "us"), + (r"\bhe\b", "we"), + (r"\bshe\b", "we"), + (r"\bthey\b", "we"), + ] + for pattern, replacement in shared_replacements: + text = re.sub(pattern, replacement, text, flags=re.IGNORECASE) + + for name in [owner_label, recipient_label]: + name = str(name or "").strip() + if not name: + continue + if name.lower() in {"you", "we", "us", "our", "i", "me", "other"}: + continue + text = re.sub( + r"(?<!\w)" + re.escape(name) + r"'s(?!\w)", + "our", + text, + flags=re.IGNORECASE, + ) + text = re.sub( + r"(?<!\w)" + re.escape(name) + r"(?!\w)", + "we", + text, + flags=re.IGNORECASE, + ) + + text = _fix_shared_grammar(text).rstrip(".!?").strip() + if not text: + return "" + if not re.match( + r"(?i)^we\s+(?:should|will|must|can|need to|have to|do not|don't|are|were|[a-z]+)\b", + text, + ): + lowered = text[:1].lower() + text[1:] if len(text) > 1 else text.lower() + text = f"We should {lowered}" + text = text[:1].upper() + text[1:] + return f"{text}{punctuation}" + + def _rewrite_shared_text(value): + sentences = _split_sentences(value) + if not sentences: + return "" + adapted = [_rewrite_shared_sentence(sentence) for sentence in sentences] + adapted = [part for part in adapted if part] + return " ".join(adapted).strip() + + preview_lines = [] + outbound_lines = [] + + def _format_artifact_message(lines): + lines = [ + f"**{artifact_name}** ({artifact_type_label})", + "", + "Guidance:", + ] + [line.strip() for line in (lines or []) if (line or "").strip()] + if lines[-1] == "Guidance:": + lines.append("No guidance text available.") + return "\n".join(lines).strip() + + if framing_key == "shared": + shared_line = _rewrite_shared_text(insight_text) + preview_lines = [shared_line] + outbound_lines = [shared_line] + else: + unchanged = _clean_text(insight_text) or (insight_text or "").strip() + preview_lines = [unchanged] + outbound_lines = [unchanged] + + preview = _format_artifact_message(preview_lines) + outbound = _format_artifact_message(outbound_lines) + + # Context note is metadata for the operator and should not alter shared outbound text. + _ = (context_note or "").strip() + + return { + "preview": preview, + "outbound": outbound, + "share_target": share_key, + "framing": framing_key, + } + + +def _get_or_create_auto_settings(user, conversation): + settings_obj, _ = PatternMitigationAutoSettings.objects.get_or_create( + user=user, + conversation=conversation, + ) + return settings_obj + + +def _detect_violation_candidates(plan, recent_rows): + candidates = [] + for row in recent_rows: + text = (row.get("text") or "").strip() + if not text: + continue + upper_ratio = ( + (sum(1 for c in text if c.isupper()) / max(1, sum(1 for c in text if c.isalpha()))) + if any(c.isalpha() for c in text) + else 0 + ) + if upper_ratio > 0.6 and len(text) > 10: + candidates.append( + { + "title": "Escalated tone spike", + "source_phrase": text[:500], + "clarification": "Rephrase into one direct request and one feeling statement.", + "severity": "medium", + } + ) + lowered = text.lower() + if "you always" in lowered or "you never" in lowered: + candidates.append( + { + "title": "Absolute framing", + "source_phrase": text[:500], + "clarification": "Replace absolutes with one concrete example and a bounded request.", + "severity": "medium", + } + ) + return candidates + + +def _normalize_violation_items(raw_items): + normalized = [] + seen = set() + for item in raw_items or []: + title = _normalize_correction_title(item.get("title") or "", fallback="Correction") + phrase = str(item.get("source_phrase") or "").strip() + clarification = str(item.get("clarification") or item.get("correction") or "").strip() + severity = str(item.get("severity") or "medium").strip().lower() + if severity not in {"low", "medium", "high"}: + severity = "medium" + if not title or not clarification: + continue + key = _correction_signature(title, clarification) + if key in seen: + continue + seen.add(key) + normalized.append( + { + "title": title[:255], + "source_phrase": phrase[:1000], + "clarification": clarification[:2000], + "severity": severity, + } + ) + return normalized + + +def _normalize_correction_text(value): + cleaned = re.sub(r"\s+", " ", str(value or "").strip()) + cleaned = cleaned.strip("\"'` ") + cleaned = cleaned.rstrip(" .;:") + return cleaned + + +def _correction_signature(title, clarification): + """ + Normalized key used to deduplicate corrections before persisting. + Source phrase is intentionally excluded so the same correction guidance + cannot be stored repeatedly with minor phrase variations. + """ + normalized_title = _normalize_correction_text(title).lower() + normalized_clarification = _normalize_correction_text(clarification).lower() + return (normalized_title, normalized_clarification) + + +def _existing_correction_signatures(plan, exclude_id=None): + query = plan.corrections.all() + if exclude_id is not None: + query = query.exclude(id=exclude_id) + signatures = set() + for row in query.values("title", "clarification"): + signatures.add( + _correction_signature( + row.get("title") or "", + row.get("clarification") or "", + ) + ) + return signatures + + +def _ai_detect_violations(user, plan, person, recent_rows): + ai_obj = AI.objects.filter(user=user).first() + if ai_obj is None: + return [] + + rules_payload = [ + {"id": str(rule.id), "title": rule.title, "content": rule.content} + for rule in plan.rules.filter(enabled=True).order_by("created_at")[:30] + ] + games_payload = [ + {"id": str(game.id), "title": game.title, "instructions": game.instructions} + for game in plan.games.filter(enabled=True).order_by("created_at")[:30] + ] + corrections_payload = [ + { + "id": str(correction.id), + "title": correction.title, + "source_phrase": correction.source_phrase, + "clarification": correction.clarification, + } + for correction in plan.corrections.filter(enabled=True).order_by("created_at")[:30] + ] + source_payload = { + "person": person.name, + "plan": { + "id": str(plan.id), + "title": plan.title, + "objective": plan.objective, + "fundamentals": plan.fundamental_items or [], + "rules": rules_payload, + "games": games_payload, + "corrections": corrections_payload, + }, + "recent_messages": recent_rows, + "output_schema": { + "violations": [ + { + "title": "short string", + "source_phrase": "exact snippet from recent_messages", + "clarification": "correction-style guidance", + "severity": "low|medium|high", + } + ] + }, + } + prompt = [ + { + "role": "system", + "content": ( + "You detect violations of mitigation patterns in a conversation. " + "Return strict JSON only. No markdown. No prose wrapper. " + "Use only schema keys requested." + ), + }, + { + "role": "user", + "content": json.dumps(source_payload, ensure_ascii=False), + }, + ] + try: + raw = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj) + except Exception: + return [] + + parsed = _extract_json_object(raw) or {} + return _normalize_violation_items(parsed.get("violations") or []) + + +def _maybe_send_auto_notification(user, auto_settings, title, body): + topic_override = (auto_settings.ntfy_topic_override or "").strip() + if topic_override: + raw_sendmsg( + body, + title=title, + url=(auto_settings.ntfy_url_override or None), + topic=topic_override, + ) + return + user.sendmsg(body, title=title) + + +def _run_auto_analysis_for_plan(user, person, conversation, plan, auto_settings, trigger="manual"): + if not auto_settings.enabled: + return { + "ran": False, + "summary": "Automation is disabled.", + "violations": [], + "created_corrections": 0, + "notified": False, + } + + if trigger == "auto" and not auto_settings.auto_pattern_recognition: + return { + "ran": False, + "summary": "Automatic pattern recognition is disabled.", + "violations": [], + "created_corrections": 0, + "notified": False, + } + + now = dj_timezone.now() + if trigger == "auto" and auto_settings.last_run_at and auto_settings.check_cooldown_seconds: + elapsed = (now - auto_settings.last_run_at).total_seconds() + if elapsed < auto_settings.check_cooldown_seconds: + return { + "ran": False, + "summary": "Skipped: cooldown active.", + "violations": [], + "created_corrections": 0, + "notified": False, + } + + limit = max(10, min(int(auto_settings.sample_message_window or 40), 200)) + sessions = ChatSession.objects.filter(user=user, identifier__person=person) + messages = ( + Message.objects.filter(user=user, session__in=sessions) + .order_by("-ts") + .values("id", "ts", "sender_uuid", "text")[:limit] + ) + recent_rows = [] + for row in reversed(list(messages)): + recent_rows.append( + { + "id": str(row["id"]), + "ts": row["ts"], + "sender_uuid": row["sender_uuid"] or "", + "text": row["text"] or "", + } + ) + if not recent_rows: + auto_settings.last_result_summary = "No recent messages available for automation." + auto_settings.last_run_at = now + auto_settings.save(update_fields=["last_result_summary", "last_run_at", "updated_at"]) + return { + "ran": True, + "summary": auto_settings.last_result_summary, + "violations": [], + "created_corrections": 0, + "notified": False, + } + + latest_message_ts = recent_rows[-1]["ts"] + if trigger == "auto" and auto_settings.last_checked_event_ts and latest_message_ts <= auto_settings.last_checked_event_ts: + return { + "ran": False, + "summary": "Skipped: no new messages since last check.", + "violations": [], + "created_corrections": 0, + "notified": False, + } + + ai_candidates = _ai_detect_violations(user, plan, person, recent_rows) + heuristic_candidates = _detect_violation_candidates(plan, recent_rows) + violations = _normalize_violation_items(ai_candidates + heuristic_candidates) + + created_corrections = 0 + if auto_settings.auto_create_corrections and violations: + existing_signatures = _existing_correction_signatures(plan) + for item in violations[:8]: + signature = _correction_signature(item["title"], item["clarification"]) + if signature in existing_signatures: + continue + PatternMitigationCorrection.objects.create( + user=user, + plan=plan, + title=item["title"], + source_phrase=item["source_phrase"], + clarification=item["clarification"], + perspective="second_person", + share_target="both", + language_style="adapted", + enabled=True, + ) + existing_signatures.add(signature) + created_corrections += 1 + + notified = False + if auto_settings.auto_notify_enabled and violations: + title = f"[GIA] Auto pattern alerts for {person.name}" + preview = "\n".join( + [f"- {item['title']}: {item['clarification']}" for item in violations[:3]] + ) + body = ( + f"Detected {len(violations)} potential mitigation violations.\n" + f"Created corrections: {created_corrections}\n\n" + f"{preview}" + ) + _maybe_send_auto_notification(user, auto_settings, title, body) + notified = True + + summary = ( + f"Auto analysis ran on {len(recent_rows)} messages. " + f"Detected {len(violations)} candidates. " + f"Created {created_corrections} corrections." + ) + auto_settings.last_result_summary = summary + auto_settings.last_run_at = now + auto_settings.last_checked_event_ts = latest_message_ts + auto_settings.save( + update_fields=[ + "last_result_summary", + "last_run_at", + "last_checked_event_ts", + "updated_at", + ] + ) + return { + "ran": True, + "summary": summary, + "violations": violations, + "created_corrections": created_corrections, + "notified": notified, + } + + +def _create_baseline_mitigation_plan(user, person, conversation, source_text=""): + artifacts = _default_artifacts_from_patterns( + source_text or f"{person.name} baseline mitigation", + person, + output_profile="framework", + ) + plan = PatternMitigationPlan.objects.create( + user=user, + conversation=conversation, + source_ai_result=None, + title=artifacts.get("title") or f"{person.name} Pattern Mitigation", + objective=artifacts.get("objective") or "", + fundamental_items=artifacts.get("fundamental_items") or [], + creation_mode="auto", + status="draft", + ) + for rule in artifacts.get("rules", []): + PatternMitigationRule.objects.create( + user=user, + plan=plan, + title=str(rule.get("title") or "Rule").strip()[:255], + content=str(rule.get("content") or "").strip(), + ) + for game in artifacts.get("games", []): + PatternMitigationGame.objects.create( + user=user, + plan=plan, + title=str(game.get("title") or "Game").strip()[:255], + instructions=str(game.get("instructions") or "").strip(), + ) + PatternMitigationMessage.objects.create( + user=user, + plan=plan, + role="system", + text="Baseline plan auto-created by automation settings.", + ) + return plan + + +def _mitigation_panel_context( + person, + plan, + notice_message="", + notice_level="info", + export_record=None, + engage_preview="", + engage_preview_flash=False, + engage_form=None, + active_tab="plan_board", + auto_settings=None, +): + engage_form = engage_form or {} + engage_options = _engage_source_options(plan) + selected_ref = engage_form.get("source_ref") or (engage_options[0]["value"] if engage_options else "") + auto_settings = auto_settings or _get_or_create_auto_settings(plan.user, plan.conversation) + return { + "person": person, + "plan": plan, + "rules": plan.rules.order_by("created_at"), + "games": plan.games.order_by("created_at"), + "corrections": plan.corrections.order_by("created_at"), + "fundamentals_text": "\n".join(plan.fundamental_items or []), + "mitigation_messages": plan.messages.order_by("created_at")[:40], + "latest_export": export_record, + "notice_message": notice_message, + "notice_level": notice_level, + "engage_preview": engage_preview, + "engage_preview_flash": engage_preview_flash, + "engage_options": engage_options, + "engage_form": { + "source_ref": selected_ref, + "share_target": engage_form.get("share_target") or "self", + "framing": engage_form.get("framing") or "dont_change", + "context_note": engage_form.get("context_note") or "", + }, + "send_state": _get_send_state(plan.user, person), + "active_tab": _sanitize_active_tab(active_tab), + "auto_settings": auto_settings, + } + + +def _latest_plan_bundle(conversation): + latest_plan = conversation.mitigation_plans.order_by("-updated_at").first() + latest_plan_rules = latest_plan.rules.order_by("created_at") if latest_plan else [] + latest_plan_games = latest_plan.games.order_by("created_at") if latest_plan else [] + latest_plan_corrections = latest_plan.corrections.order_by("created_at") if latest_plan else [] + latest_plan_messages = latest_plan.messages.order_by("created_at")[:40] if latest_plan else [] + latest_plan_export = latest_plan.exports.order_by("-created_at").first() if latest_plan else None + latest_auto_settings = _get_or_create_auto_settings(conversation.user, conversation) + return { + "latest_plan": latest_plan, + "latest_plan_rules": latest_plan_rules, + "latest_plan_games": latest_plan_games, + "latest_plan_corrections": latest_plan_corrections, + "latest_plan_messages": latest_plan_messages, + "latest_plan_export": latest_plan_export, + "latest_auto_settings": latest_auto_settings, + } + + +class AIWorkspace(LoginRequiredMixin, View): + template_name = "pages/ai-workspace.html" + + def get(self, request): + return render(request, self.template_name) + + +class AIWorkspaceContactsWidget(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def _contact_rows(self, user): + rows = [] + people = Person.objects.filter(user=user).order_by("name") + for person in people: + sessions = ChatSession.objects.filter(user=user, identifier__person=person) + message_qs = Message.objects.filter(user=user, session__in=sessions) + last_message = message_qs.order_by("-ts").first() + rows.append( + { + "person": person, + "message_count": message_qs.count(), + "last_text": (last_message.text or "")[:120] if last_message else "", + "last_ts": last_message.ts if last_message else None, + "last_ts_label": _format_unix_ms(last_message.ts) if last_message else "", + } + ) + rows.sort(key=lambda row: row["last_ts"] or 0, reverse=True) + return rows + + def get(self, request, type): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + context = { + "title": "AI Workspace", + "unique": "ai-workspace-contacts", + "window_content": "partials/ai-workspace-widget.html", + "widget_options": 'gs-w="4" gs-h="14" gs-x="0" gs-y="0" gs-min-w="3"', + "contact_rows": self._contact_rows(request.user), + "window_form": AIWorkspaceWindowForm(request.GET or None), + } + return render(request, "mixins/wm/widget.html", context) + + +class AIWorkspacePersonWidget(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def _message_rows(self, user, person, limit): + sessions = ChatSession.objects.filter(user=user, identifier__person=person) + identifiers = set( + PersonIdentifier.objects.filter(user=user, person=person).values_list("identifier", flat=True) + ) + messages = ( + Message.objects.filter(user=user, session__in=sessions) + .select_related("session", "session__identifier") + .order_by("-ts")[:limit] + ) + + rows = [] + for message in reversed(list(messages)): + inferred_direction = _infer_direction(message, identifiers) + rows.append( + { + "message": message, + "direction": inferred_direction, + "ts_label": _format_unix_ms(message.ts), + } + ) + return rows + + def _recent_messages(self, user, person, limit): + sessions = ChatSession.objects.filter(user=user, identifier__person=person) + messages = ( + Message.objects.filter(user=user, session__in=sessions) + .select_related("session", "session__identifier") + .order_by("-ts")[:limit] + ) + return list(reversed(list(messages))) + + def get(self, request, type, person_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + try: + limit = int(request.GET.get("limit", 20)) + except (TypeError, ValueError): + limit = 20 + limit = max(5, min(limit, 200)) + + context = { + "title": f"{person.name} Timeline", + "unique": f"ai-person-{person.id}", + "window_content": "partials/ai-workspace-person-widget.html", + "widget_options": 'gs-w="7" gs-h="16" gs-x="0" gs-y="0" gs-min-w="4"', + "person": person, + "limit": limit, + "message_rows": self._message_rows(request.user, person, limit), + "ai_operations": [ + ("artifacts", "Plan"), + ("summarise", "Summary"), + ("draft_reply", "Draft"), + ("extract_patterns", "Patterns"), + ], + "send_state": _get_send_state(request.user, person), + } + return render(request, "mixins/wm/widget.html", context) + + +class AIWorkspaceRunOperation(LoginRequiredMixin, View): + allowed_types = {"widget"} + allowed_operations = {"artifacts", "summarise", "draft_reply", "extract_patterns"} + + def _ensure_message_events(self, user, conversation, person_identifiers, messages): + """ + Materialize workspace MessageEvent rows from legacy Message rows and + return ordered event IDs for the selected window. + """ + event_ids = [] + for message in messages: + legacy_id = str(message.id) + event = MessageEvent.objects.filter( + user=user, + conversation=conversation, + raw_payload_ref__legacy_message_id=legacy_id, + ).first() + if event is None: + event = MessageEvent.objects.create( + user=user, + conversation=conversation, + source_system="signal", + ts=message.ts, + direction=_infer_direction(message, person_identifiers), + sender_uuid=message.sender_uuid or "", + text=message.text or "", + attachments=[], + raw_payload_ref={"legacy_message_id": legacy_id}, + ) + else: + # Keep event fields in sync if upstream message rows changed. + update_fields = [] + new_direction = _infer_direction(message, person_identifiers) + if event.ts != message.ts: + event.ts = message.ts + update_fields.append("ts") + if event.direction != new_direction: + event.direction = new_direction + update_fields.append("direction") + if event.sender_uuid != (message.sender_uuid or ""): + event.sender_uuid = message.sender_uuid or "" + update_fields.append("sender_uuid") + if event.text != (message.text or ""): + event.text = message.text or "" + update_fields.append("text") + if update_fields: + event.save(update_fields=update_fields) + event_ids.append(str(event.id)) + return event_ids + + def _build_prompt(self, operation, person, transcript, user_notes): + notes = (user_notes or "").strip() + if operation == "draft_reply": + instruction = ( + "Generate 3 concise reply options in different tones: soft, neutral, firm. " + "Return plain text with clear section labels." + ) + elif operation == "extract_patterns": + instruction = ( + "Extract recurring interaction patterns, friction loops, and practical next-step rules. " + "Keep it actionable and concise." + ) + else: + instruction = ( + "Summarize this conversation window with key points, emotional state shifts, and open loops." + ) + prompt = [ + {"role": "system", "content": instruction}, + { + "role": "user", + "content": ( + f"Person: {person.name}\n" + f"Notes: {notes or 'None'}\n\n" + f"Conversation:\n{transcript}" + ), + }, + ] + return prompt + + def get(self, request, type, person_id, operation): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + if operation not in self.allowed_operations: + return HttpResponseBadRequest("Invalid operation specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + send_state = _get_send_state(request.user, person) + conversation = _conversation_for_person(request.user, person) + + if operation == "artifacts": + auto_settings = _get_or_create_auto_settings(request.user, conversation) + plan_bundle = _latest_plan_bundle(conversation) + mitigation_notice_message = "" + mitigation_notice_level = "info" + + if ( + plan_bundle["latest_plan"] is None + and auto_settings.enabled + and auto_settings.auto_create_mitigation + ): + recent_messages = AIWorkspacePersonWidget()._recent_messages( + request.user, + person, + max(20, min(auto_settings.sample_message_window, 200)), + ) + source_text = messages_to_string(recent_messages) if recent_messages else "" + _create_baseline_mitigation_plan( + user=request.user, + person=person, + conversation=conversation, + source_text=source_text, + ) + plan_bundle = _latest_plan_bundle(conversation) + mitigation_notice_message = "Baseline plan auto-created." + mitigation_notice_level = "success" + + if ( + plan_bundle["latest_plan"] is not None + and auto_settings.enabled + and auto_settings.auto_pattern_recognition + ): + auto_result = _run_auto_analysis_for_plan( + user=request.user, + person=person, + conversation=conversation, + plan=plan_bundle["latest_plan"], + auto_settings=auto_settings, + trigger="auto", + ) + if auto_result.get("ran"): + mitigation_notice_message = auto_result["summary"] + mitigation_notice_level = "info" + if auto_result.get("created_corrections"): + plan_bundle = _latest_plan_bundle(conversation) + + context = { + "operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()), + "operation": operation, + "result_text": "", + "result_sections": [], + "error": False, + "person": person, + "send_state": send_state, + "ai_result_id": "", + "mitigation_notice_message": mitigation_notice_message, + "mitigation_notice_level": mitigation_notice_level, + **plan_bundle, + } + return render(request, "partials/ai-workspace-ai-result.html", context) + + ai_obj = AI.objects.filter(user=request.user).first() + if ai_obj is None: + context = { + "operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()), + "operation": operation, + "result_text": "No AI configured for this user yet.", + "result_sections": _parse_result_sections("No AI configured for this user yet."), + "error": True, + "person": person, + "send_state": send_state, + "latest_plan": None, + "latest_plan_rules": [], + "latest_plan_games": [], + "latest_plan_corrections": [], + "latest_plan_messages": [], + "latest_plan_export": None, + } + return render(request, "partials/ai-workspace-ai-result.html", context) + + try: + limit = int(request.GET.get("limit", 20)) + except (TypeError, ValueError): + limit = 20 + limit = max(5, min(limit, 200)) + user_notes = request.GET.get("user_notes", "") + + messages = AIWorkspacePersonWidget()._recent_messages(request.user, person, limit) + transcript = messages_to_string(messages) + person_identifiers = set( + PersonIdentifier.objects.filter( + user=request.user, + person=person, + ).values_list("identifier", flat=True) + ) + + if messages: + conversation.last_event_ts = messages[-1].ts + conversation.save(update_fields=["last_event_ts"]) + message_event_ids = self._ensure_message_events( + request.user, + conversation, + person_identifiers, + messages, + ) + + ai_request = AIRequest.objects.create( + user=request.user, + conversation=conversation, + window_spec={"limit": limit}, + message_ids=message_event_ids, + user_notes=user_notes, + operation=operation, + policy_snapshot={"send_state": send_state}, + status="running", + started_at=dj_timezone.now(), + ) + + try: + prompt = self._build_prompt(operation, person, transcript, user_notes) + result_text = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj) + draft_options = _parse_draft_options(result_text) if operation == "draft_reply" else [] + ai_result = AIResult.objects.create( + user=request.user, + ai_request=ai_request, + working_summary=result_text if operation != "draft_reply" else "", + draft_replies=draft_options, + interaction_signals=[], + memory_proposals=[], + citations=message_event_ids, + ) + ai_request.status = "done" + ai_request.finished_at = dj_timezone.now() + ai_request.save(update_fields=["status", "finished_at"]) + conversation.last_ai_run_at = dj_timezone.now() + conversation.save(update_fields=["last_ai_run_at"]) + plan_bundle = _latest_plan_bundle(conversation) + context = { + "operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()), + "operation": operation, + "result_text": result_text, + "result_sections": _parse_result_sections(result_text), + "draft_replies": ai_result.draft_replies, + "error": False, + "person": person, + "send_state": send_state, + "ai_result_id": str(ai_result.id), + **plan_bundle, + } + except Exception as exc: + ai_request.status = "failed" + ai_request.error = str(exc) + ai_request.finished_at = dj_timezone.now() + ai_request.save(update_fields=["status", "error", "finished_at"]) + context = { + "operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()), + "operation": operation, + "result_text": str(exc), + "result_sections": _parse_result_sections(str(exc)), + "error": True, + "person": person, + "send_state": send_state, + "latest_plan": None, + "latest_plan_rules": [], + "latest_plan_games": [], + "latest_plan_corrections": [], + "latest_plan_messages": [], + "latest_plan_export": None, + } + + return render(request, "partials/ai-workspace-ai-result.html", context) + + +class AIWorkspaceSendDraft(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + send_state = _get_send_state(request.user, person) + text = (request.POST.get("draft_text") or "").strip() + force_send = _is_truthy(request.POST.get("force_send")) + if not text: + return render( + request, + "partials/ai-workspace-send-status.html", + {"ok": False, "message": "Draft is empty.", "level": "danger"}, + ) + if not send_state["can_send"] and not force_send: + return render( + request, + "partials/ai-workspace-send-status.html", + { + "ok": False, + "message": f"Send blocked. {send_state['text']}", + "level": "warning", + }, + ) + + identifier = _resolve_person_identifier(request.user, person) + if identifier is None: + return render( + request, + "partials/ai-workspace-send-status.html", + {"ok": False, "message": "No recipient identifier found.", "level": "danger"}, + ) + + try: + ts = async_to_sync(identifier.send)(text) + except Exception as exc: + return render( + request, + "partials/ai-workspace-send-status.html", + {"ok": False, "message": f"Send failed: {exc}", "level": "danger"}, + ) + + session, _ = ChatSession.objects.get_or_create( + user=request.user, + identifier=identifier, + ) + sent_ts = int(ts) if ts else int(dj_timezone.now().timestamp() * 1000) + Message.objects.create( + user=request.user, + session=session, + custom_author="BOT", + sender_uuid="", + text=text, + ts=sent_ts, + ) + success_message = "Draft sent." + if force_send and not send_state["can_send"]: + success_message = "Draft sent with override." + response = render( + request, + "partials/ai-workspace-send-status.html", + {"ok": True, "message": success_message, "level": "success"}, + ) + response["HX-Trigger"] = json.dumps( + { + "gia-message-sent": { + "person_id": str(person.id), + "ts": sent_ts, + "text": text, + "author": "BOT", + } + } + ) + return response + + +class AIWorkspaceQueueDraft(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + text = (request.POST.get("draft_text") or "").strip() + if not text: + return render( + request, + "partials/ai-workspace-send-status.html", + {"ok": False, "message": "Select a draft before queueing.", "level": "warning"}, + ) + + identifier = _resolve_person_identifier(request.user, person) + if identifier is None: + return render( + request, + "partials/ai-workspace-send-status.html", + {"ok": False, "message": "No recipient identifier found.", "level": "danger"}, + ) + + manipulation = _get_queue_manipulation(request.user, person) + if manipulation is None: + return render( + request, + "partials/ai-workspace-send-status.html", + { + "ok": False, + "message": "No enabled manipulation found for this recipient. Queue entry not created.", + "level": "warning", + }, + ) + + session, _ = ChatSession.objects.get_or_create( + user=request.user, + identifier=identifier, + ) + QueuedMessage.objects.create( + user=request.user, + session=session, + manipulation=manipulation, + ts=int(dj_timezone.now().timestamp() * 1000), + sender_uuid="", + text=text, + custom_author="BOT", + ) + return render( + request, + "partials/ai-workspace-send-status.html", + {"ok": True, "message": "Draft added to queue.", "level": "success"}, + ) + + +class AIWorkspaceCreateMitigation(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + ai_result_id = (request.POST.get("ai_result_id") or "").strip() + output_profile = (request.POST.get("output_profile") or "").strip() + if output_profile not in {"framework", "rule", "rules", "game", "games"}: + return render( + request, + "partials/ai-workspace-mitigation-status.html", + { + "person": person, + "level": "warning", + "message": "Choose one mitigation output type: framework, rules, or games.", + }, + ) + user_context = (request.POST.get("user_context") or "").strip() + creation_mode = "guided" if user_context else "auto" + seed_from_context = _extract_seed_entities_from_context(user_context) + fundamentals = seed_from_context.get("fundamentals", []) + + source_result = None + if ai_result_id: + source_result = AIResult.objects.filter( + id=ai_result_id, + user=request.user, + ).select_related("ai_request", "ai_request__conversation").first() + + conversation = ( + source_result.ai_request.conversation + if source_result is not None + else _conversation_for_person(request.user, person) + ) + conversation.participants.add(person) + + source_text = "" + if source_result is not None: + source_text = source_result.working_summary or "" + if not source_text: + source_text = (request.POST.get("source_text") or "").strip() + + ai_obj = AI.objects.filter(user=request.user).first() + artifacts = _build_mitigation_artifacts( + ai_obj=ai_obj, + person=person, + source_text=source_text, + creation_mode=creation_mode, + inspiration=user_context, + fundamentals=fundamentals, + output_profile=output_profile, + ) + # Deterministically seed from pasted context so long-form frameworks can + # create fundamentals/rules/games in one pass, even when AI output is sparse. + artifacts = _merge_seed_entities(artifacts, seed_from_context) + + plan = PatternMitigationPlan.objects.create( + user=request.user, + conversation=conversation, + source_ai_result=source_result, + title=artifacts.get("title") or f"{person.name} Pattern Mitigation", + objective=artifacts.get("objective") or "", + fundamental_items=artifacts.get("fundamental_items") or fundamentals, + creation_mode=creation_mode, + status="draft", + ) + + for rule in artifacts.get("rules", []): + PatternMitigationRule.objects.create( + user=request.user, + plan=plan, + title=str(rule.get("title") or "Rule").strip()[:255], + content=str(rule.get("content") or "").strip(), + ) + + for game in artifacts.get("games", []): + PatternMitigationGame.objects.create( + user=request.user, + plan=plan, + title=str(game.get("title") or "Game").strip()[:255], + instructions=str(game.get("instructions") or "").strip(), + ) + + PatternMitigationMessage.objects.create( + user=request.user, + plan=plan, + role="system", + text="Plan created. Use the tabs below to refine rules, games, fundamentals, corrections, and AI guidance.", + ) + + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Mitigation plan created.", + notice_level="success", + active_tab="plan_board", + ), + ) + + +class AIWorkspaceMitigationChat(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id, plan_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404( + PatternMitigationPlan, + id=plan_id, + user=request.user, + ) + text = (request.POST.get("message") or "").strip() + active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="ask_ai") + if not text: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Message is empty.", + notice_level="warning", + active_tab=active_tab, + ), + ) + + PatternMitigationMessage.objects.create( + user=request.user, + plan=plan, + role="user", + text=text, + ) + + ai_obj = AI.objects.filter(user=request.user).first() + assistant_text = "" + if ai_obj: + rules_text = "\n".join([f"- {r.title}: {r.content}" for r in plan.rules.order_by("created_at")]) + games_text = "\n".join([f"- {g.title}: {g.instructions}" for g in plan.games.order_by("created_at")]) + corrections_text = "\n".join([f"- {c.title}: {c.clarification}" for c in plan.corrections.order_by("created_at")]) + recent_msgs = plan.messages.order_by("-created_at")[:10] + recent_msgs = list(reversed(list(recent_msgs))) + transcript = "\n".join([f"{m.role.upper()}: {m.text}" for m in recent_msgs]) + prompt = [ + { + "role": "system", + "content": ( + "You are refining a mitigation protocol. " + "Give concise practical updates to rules/games/corrections and explain tradeoffs." + ), + }, + { + "role": "user", + "content": ( + f"Plan objective: {plan.objective}\n" + f"Fundamentals: {json.dumps(plan.fundamental_items or [])}\n" + f"Rules:\n{rules_text or '(none)'}\n\n" + f"Games:\n{games_text or '(none)'}\n\n" + f"Corrections:\n{corrections_text or '(none)'}\n\n" + f"Conversation:\n{transcript}" + ), + }, + ] + try: + assistant_text = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj) + except Exception as exc: + assistant_text = f"Failed to run AI refinement: {exc}" + else: + assistant_text = "No AI configured. Add an AI config to use mitigation chat." + + PatternMitigationMessage.objects.create( + user=request.user, + plan=plan, + role="assistant", + text=assistant_text, + ) + + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + active_tab=active_tab, + ), + ) + + +class AIWorkspaceExportArtifact(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id, plan_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404( + PatternMitigationPlan, + id=plan_id, + user=request.user, + ) + + artifact_type = (request.POST.get("artifact_type") or "rulebook").strip() + if artifact_type not in {"rulebook", "rules", "games", "corrections"}: + artifact_type = "rulebook" + + export_format = (request.POST.get("export_format") or "markdown").strip() + active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="ask_ai") + if export_format not in {"markdown", "json", "text"}: + export_format = "markdown" + + payload, meta = _serialize_export_payload(plan, artifact_type, export_format) + export_record = PatternArtifactExport.objects.create( + user=request.user, + plan=plan, + artifact_type=artifact_type, + export_format=export_format, + protocol_version="artifact-v1", + payload=payload, + meta=meta, + ) + + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=f"Exported {artifact_type} ({export_format}).", + notice_level="success", + export_record=export_record, + active_tab=active_tab, + ), + ) + + +class AIWorkspaceCreateArtifact(LoginRequiredMixin, View): + allowed_types = {"widget"} + kind_map = { + "rule": (PatternMitigationRule, "content", "Rule"), + "game": (PatternMitigationGame, "instructions", "Game"), + "correction": (PatternMitigationCorrection, "clarification", "Correction"), + } + + def post(self, request, type, person_id, plan_id, kind): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + kind_key = (kind or "").strip().lower() + if kind_key not in self.kind_map: + return HttpResponseBadRequest("Invalid artifact kind") + + model, body_field, label = self.kind_map[kind_key] + if kind_key == "correction": + candidate_signature = _correction_signature(f"New {label}", "") + if candidate_signature in _existing_correction_signatures(plan): + tab = _sanitize_active_tab(request.POST.get("active_tab"), default="corrections") + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Duplicate correction skipped.", + notice_level="warning", + active_tab=tab, + ), + ) + payload = { + "user": request.user, + "plan": plan, + "title": f"New {label}", + body_field: "", + "enabled": True, + } + model.objects.create(**payload) + tab = _sanitize_active_tab( + request.POST.get("active_tab"), + default=("corrections" if kind_key == "correction" else "plan_board"), + ) + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=f"{label} created.", + notice_level="success", + active_tab=tab, + ), + ) + + +class AIWorkspaceUpdateArtifact(LoginRequiredMixin, View): + allowed_types = {"widget"} + kind_map = { + "rule": (PatternMitigationRule, "content", "Rule"), + "game": (PatternMitigationGame, "instructions", "Game"), + "correction": (PatternMitigationCorrection, "clarification", "Correction"), + } + + def post(self, request, type, person_id, plan_id, kind, artifact_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + kind_key = (kind or "").strip().lower() + if kind_key not in self.kind_map: + return HttpResponseBadRequest("Invalid artifact kind") + + model, body_field, label = self.kind_map[kind_key] + artifact = get_object_or_404( + model, + id=artifact_id, + user=request.user, + plan=plan, + ) + title = (request.POST.get("title") or "").strip() or artifact.title + body = (request.POST.get("body") or "").strip() + enabled = _is_truthy(request.POST.get("enabled")) + tab = _sanitize_active_tab( + request.POST.get("active_tab"), + default=("corrections" if kind_key == "correction" else "plan_board"), + ) + + if kind_key == "correction": + title = _normalize_correction_title(title, fallback=artifact.title or "Correction") + candidate_signature = _correction_signature(title, body) + if candidate_signature in _existing_correction_signatures(plan, exclude_id=artifact.id): + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Duplicate correction not saved.", + notice_level="warning", + active_tab=tab, + ), + ) + + artifact.title = title[:255] + setattr(artifact, body_field, body) + artifact.enabled = enabled + if kind_key == "correction": + artifact.source_phrase = (request.POST.get("source_phrase") or "").strip() + perspective = (request.POST.get("perspective") or "third_person").strip() + if perspective in {"first_person", "second_person", "third_person"}: + artifact.perspective = perspective + share_target = (request.POST.get("share_target") or "both").strip() + if share_target in {"self", "other", "both"}: + artifact.share_target = share_target + language_style = (request.POST.get("language_style") or "adapted").strip() + if language_style in {"same", "adapted"}: + artifact.language_style = language_style + artifact.save() + + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=f"{label} saved.", + notice_level="success", + active_tab=tab, + ), + ) + + +class AIWorkspaceDeleteArtifact(LoginRequiredMixin, View): + allowed_types = {"widget"} + kind_map = { + "rule": (PatternMitigationRule, "Rule"), + "game": (PatternMitigationGame, "Game"), + "correction": (PatternMitigationCorrection, "Correction"), + } + + def post(self, request, type, person_id, plan_id, kind, artifact_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + kind_key = (kind or "").strip().lower() + if kind_key not in self.kind_map: + return HttpResponseBadRequest("Invalid artifact kind") + + model, label = self.kind_map[kind_key] + artifact = get_object_or_404( + model, + id=artifact_id, + user=request.user, + plan=plan, + ) + artifact.delete() + tab = _sanitize_active_tab( + request.POST.get("active_tab"), + default=("corrections" if kind_key == "correction" else "plan_board"), + ) + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=f"{label} deleted.", + notice_level="success", + active_tab=tab, + ), + ) + + +class AIWorkspaceDeleteArtifactList(LoginRequiredMixin, View): + allowed_types = {"widget"} + kind_map = { + "rule": (PatternMitigationRule, "rules"), + "game": (PatternMitigationGame, "games"), + "correction": (PatternMitigationCorrection, "corrections"), + } + + def post(self, request, type, person_id, plan_id, kind): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + kind_key = (kind or "").strip().lower() + if kind_key not in self.kind_map: + return HttpResponseBadRequest("Invalid artifact kind") + + model, label = self.kind_map[kind_key] + rows = model.objects.filter(user=request.user, plan=plan) + delete_count = rows.count() + if delete_count: + rows.delete() + notice_message = f"Deleted {delete_count} {label}." + notice_level = "success" + else: + notice_message = f"No {label} to delete." + notice_level = "info" + + tab = _sanitize_active_tab( + request.POST.get("active_tab"), + default=("corrections" if kind_key == "correction" else "plan_board"), + ) + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=notice_message, + notice_level=notice_level, + active_tab=tab, + ), + ) + + +class AIWorkspaceEngageShare(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id, plan_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + + source_ref = (request.POST.get("source_ref") or "").strip() + share_target = (request.POST.get("share_target") or "self").strip() + framing = (request.POST.get("framing") or "dont_change").strip() + context_note = (request.POST.get("context_note") or "").strip() + action = (request.POST.get("action") or "preview").strip().lower() + force_send = _is_truthy(request.POST.get("force_send")) + + engage_form = { + "source_ref": source_ref, + "share_target": share_target, + "framing": framing, + "context_note": context_note, + } + active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="engage") + + if ":" not in source_ref: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Select a source item to engage.", + notice_level="warning", + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + source_kind, source_id = source_ref.split(":", 1) + source_kind = source_kind.strip().lower() + source_id = source_id.strip() + model_map = { + "rule": PatternMitigationRule, + "game": PatternMitigationGame, + "correction": PatternMitigationCorrection, + } + if source_kind not in model_map: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Invalid source type for engage.", + notice_level="danger", + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + source_obj = get_object_or_404( + model_map[source_kind], + id=source_id, + user=request.user, + plan=plan, + ) + payload = _build_engage_payload( + source_obj=source_obj, + source_kind=source_kind, + share_target=share_target, + framing=framing, + context_note=context_note, + owner_name=( + request.user.first_name + or request.user.get_full_name().strip() + or request.user.username + or "You" + ), + recipient_name=person.name or "Other", + ) + engage_preview = payload["preview"] + outbound_text = payload["outbound"] + share_target = payload["share_target"] + + if action == "preview": + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + engage_preview=engage_preview, + engage_preview_flash=True, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + if action == "send": + send_state = _get_send_state(request.user, person) + if not send_state["can_send"] and not force_send: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=f"Send blocked. {send_state['text']}", + notice_level="warning", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + identifier = _resolve_person_identifier(request.user, person) + if identifier is None: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="No recipient identifier found.", + notice_level="danger", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + try: + ts = async_to_sync(identifier.send)(outbound_text) + except Exception as exc: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=f"Send failed: {exc}", + notice_level="danger", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + session, _ = ChatSession.objects.get_or_create( + user=request.user, + identifier=identifier, + ) + sent_ts = int(ts) if ts else int(dj_timezone.now().timestamp() * 1000) + Message.objects.create( + user=request.user, + session=session, + custom_author="BOT", + sender_uuid="", + text=outbound_text, + ts=sent_ts, + ) + notice = "Shared via engage." + if force_send and not send_state["can_send"]: + notice = "Shared via engage with override." + response = render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=notice, + notice_level="success", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + response["HX-Trigger"] = json.dumps( + { + "gia-message-sent": { + "person_id": str(person.id), + "ts": sent_ts, + "text": outbound_text, + "author": "BOT", + } + } + ) + return response + + if action == "queue": + identifier = _resolve_person_identifier(request.user, person) + if identifier is None: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="No recipient identifier found.", + notice_level="danger", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + manipulation = _get_queue_manipulation(request.user, person) + if manipulation is None: + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="No enabled manipulation found for this recipient. Queue entry not created.", + notice_level="warning", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + session, _ = ChatSession.objects.get_or_create( + user=request.user, + identifier=identifier, + ) + QueuedMessage.objects.create( + user=request.user, + session=session, + manipulation=manipulation, + ts=int(dj_timezone.now().timestamp() * 1000), + sender_uuid="", + text=outbound_text, + custom_author="BOT", + ) + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Engage text added to queue.", + notice_level="success", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Unknown engage action.", + notice_level="warning", + engage_preview=engage_preview, + engage_form=engage_form, + active_tab=active_tab, + ), + ) + + +class AIWorkspaceAutoSettings(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id, plan_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + auto_settings = _get_or_create_auto_settings(request.user, plan.conversation) + + auto_settings.enabled = _is_truthy(request.POST.get("enabled")) + auto_settings.auto_pattern_recognition = _is_truthy( + request.POST.get("auto_pattern_recognition") + ) + auto_settings.auto_create_mitigation = _is_truthy( + request.POST.get("auto_create_mitigation") + ) + auto_settings.auto_create_corrections = _is_truthy( + request.POST.get("auto_create_corrections") + ) + auto_settings.auto_notify_enabled = _is_truthy( + request.POST.get("auto_notify_enabled") + ) + auto_settings.ntfy_topic_override = ( + (request.POST.get("ntfy_topic_override") or "").strip() or None + ) + auto_settings.ntfy_url_override = ( + (request.POST.get("ntfy_url_override") or "").strip() or None + ) + try: + auto_settings.sample_message_window = max( + 10, min(int(request.POST.get("sample_message_window") or 40), 200) + ) + except Exception: + auto_settings.sample_message_window = 40 + try: + auto_settings.check_cooldown_seconds = max( + 0, min(int(request.POST.get("check_cooldown_seconds") or 300), 86400) + ) + except Exception: + auto_settings.check_cooldown_seconds = 300 + auto_settings.save() + + action = (request.POST.get("action") or "save").strip().lower() + if action == "run_now": + result = _run_auto_analysis_for_plan( + user=request.user, + person=person, + conversation=plan.conversation, + plan=plan, + auto_settings=auto_settings, + trigger="manual", + ) + notice_message = result["summary"] + notice_level = "success" if result.get("ran") else "info" + else: + notice_message = "Automation settings saved." + notice_level = "success" + + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message=notice_message, + notice_level=notice_level, + active_tab="auto", + auto_settings=auto_settings, + ), + ) + + +class AIWorkspaceUpdateFundamentals(LoginRequiredMixin, View): + allowed_types = {"widget"} + + def post(self, request, type, person_id, plan_id): + if type not in self.allowed_types: + return HttpResponseBadRequest("Invalid type specified") + + person = get_object_or_404(Person, pk=person_id, user=request.user) + plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user) + fundamentals_text = request.POST.get("fundamentals_text") or "" + active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="fundamentals") + plan.fundamental_items = _parse_fundamentals(fundamentals_text) + plan.save(update_fields=["fundamental_items", "updated_at"]) + return render( + request, + "partials/ai-workspace-mitigation-panel.html", + _mitigation_panel_context( + person=person, + plan=plan, + notice_message="Fundamentals saved.", + notice_level="success", + active_tab=active_tab, + ), + )