Files
GIA/core/views/workspace.py

2865 lines
102 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from datetime import datetime, timezone
import json
import re
from asgiref.sync import async_to_sync
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseBadRequest
from django.shortcuts import get_object_or_404, render
from django.utils import timezone as dj_timezone
from django.views import View
from core.forms import AIWorkspaceWindowForm
from core.lib.notify import raw_sendmsg
from core.messaging import ai as ai_runner
from core.messaging.utils import messages_to_string
from core.models import (
AI,
AIRequest,
AIResult,
ChatSession,
Message,
MessageEvent,
Manipulation,
PatternArtifactExport,
PatternMitigationAutoSettings,
PatternMitigationCorrection,
PatternMitigationGame,
PatternMitigationMessage,
PatternMitigationPlan,
PatternMitigationRule,
Person,
PersonIdentifier,
QueuedMessage,
WorkspaceConversation,
)
SEND_ENABLED_MODES = {"active", "instant"}
OPERATION_LABELS = {
"summarise": "Summarise",
"draft_reply": "Draft Reply",
"extract_patterns": "Extract Patterns",
"artifacts": "Plan",
}
MITIGATION_TABS = {
"plan_board",
"corrections",
"engage",
"fundamentals",
"ask_ai",
"auto",
}
def _format_unix_ms(ts):
if not ts:
return ""
dt = datetime.fromtimestamp(ts / 1000, tz=timezone.utc)
return dt.strftime("%Y-%m-%d %H:%M UTC")
def _infer_direction(message, person_identifiers):
"""
Infer message direction relative to workspace owner.
"""
sender = message.sender_uuid or ""
if sender and sender in person_identifiers:
return "in"
return "out"
def _get_send_state(user, person):
"""
Resolve current send capability from user's enabled manipulations for this person.
"""
manipulations = (
Manipulation.objects.filter(
user=user,
enabled=True,
group__people=person,
)
.select_related("group")
.distinct()
)
if not manipulations.exists():
return {
"can_send": False,
"level": "warning",
"text": "Sending is blocked: no enabled manipulation matched this recipient.",
"manipulation_id": None,
}
send_manip = manipulations.filter(mode__in=SEND_ENABLED_MODES).first()
if send_manip:
return {
"can_send": True,
"level": "success",
"text": f"Enabled by manipulation '{send_manip.name}' ({send_manip.mode}).",
"manipulation_id": str(send_manip.id),
}
mode_list = ", ".join(sorted({(m.mode or "unset") for m in manipulations}))
return {
"can_send": False,
"level": "warning",
"text": f"Sending is blocked by active mode(s): {mode_list}.",
"manipulation_id": None,
}
def _get_queue_manipulation(user, person):
"""
Resolve a manipulation for queue entries:
prefer send-enabled, otherwise any enabled manipulation in recipient groups.
"""
matched = (
Manipulation.objects.filter(
user=user,
enabled=True,
group__people=person,
)
.select_related("group")
.distinct()
)
return matched.filter(mode__in=SEND_ENABLED_MODES).first() or matched.first()
def _resolve_person_identifier(user, person):
"""
Resolve the best identifier for outbound share/send operations.
Prefer Signal identifier, then fallback to any identifier.
"""
return (
PersonIdentifier.objects.filter(
user=user,
person=person,
service="signal",
).first()
or PersonIdentifier.objects.filter(user=user, person=person).first()
)
def _is_truthy(value):
return str(value or "").strip().lower() in {"1", "true", "on", "yes"}
def _sanitize_active_tab(value, default="plan_board"):
tab = (value or "").strip()
if tab in MITIGATION_TABS:
return tab
return default
def _parse_draft_options(result_text):
"""
Parse model output into labeled draft options shown simultaneously in UI.
"""
content = (result_text or "").strip()
if not content:
return []
def clean_option_text(value):
value = (value or "").strip(" \n\r\t*:")
# Strip surrounding quotes when the whole option is wrapped.
if len(value) >= 2 and ((value[0] == '"' and value[-1] == '"') or (value[0] == "'" and value[-1] == "'")):
value = value[1:-1].strip()
return value
def dedupe_by_label(seq):
ordered = []
seen = set()
for item in seq:
label = (item.get("label") or "").strip().title()
text = clean_option_text(item.get("text") or "")
if not label or not text:
continue
key = label.lower()
if key in seen:
continue
seen.add(key)
ordered.append({"label": label, "text": text})
return ordered
# Primary parser: line-based labeled blocks.
# Accepts:
# - Soft/Neutral/Firm
# - optional Tone/Response/Reply suffix
# - optional markdown bold markers
# - content on same line or subsequent lines
block_re = re.compile(
r"(?ims)^\s*(?:[-*]\s*)?(?:\*\*)?\s*(Soft|Neutral|Firm)\s*"
r"(?:(?:Tone|Response|Reply))?\s*:?\s*(?:\*\*)?\s*"
r"(.*?)(?=^\s*(?:[-*]\s*)?(?:\*\*)?\s*(?:Soft|Neutral|Firm)\s*"
r"(?:(?:Tone|Response|Reply))?\s*:?\s*(?:\*\*)?\s*|\Z)"
)
options = [
{"label": match.group(1).strip().title(), "text": match.group(2)}
for match in block_re.finditer(content)
]
options = dedupe_by_label(options)
if options:
return options[:3]
# Secondary parser: inline labeled segments in one paragraph.
inline_re = re.compile(
r"(?is)\b(Soft|Neutral|Firm)\s*(?:(?:Tone|Response|Reply))?\s*:\s*(.*?)"
r"(?=\b(?:Soft|Neutral|Firm)\s*(?:(?:Tone|Response|Reply))?\s*:|$)"
)
options = [
{"label": match.group(1).strip().title(), "text": match.group(2)}
for match in inline_re.finditer(content)
]
options = dedupe_by_label(options)
if options:
return options[:3]
# Secondary parser: Option 1/2/3 blocks.
option_split_re = re.compile(r"(?im)^\s*Option\s+\d+\s*$")
chunks = [chunk.strip() for chunk in option_split_re.split(content) if chunk.strip()]
parsed = []
prefix_re = re.compile(r"(?im)^(?:\*\*)?\s*(Soft|Neutral|Firm)\s*(?:Tone|Response|Reply)?\s*:?\s*(?:\*\*)?\s*")
for idx, chunk in enumerate(chunks, start=1):
label = f"Option {idx}"
prefix_match = prefix_re.match(chunk)
if prefix_match:
label = prefix_match.group(1).strip().title()
chunk = prefix_re.sub("", chunk, count=1).strip(" \n\r\t*:")
if chunk:
parsed.append({"label": label, "text": chunk})
if parsed:
return dedupe_by_label(parsed)[:3]
# Final fallback: use first non-empty paragraphs.
paragraphs = [para.strip() for para in re.split(r"\n\s*\n", content) if para.strip()]
return dedupe_by_label([
{"label": f"Option {idx}", "text": para}
for idx, para in enumerate(paragraphs[:3], start=1)
])
def _extract_seed_entities_from_context(raw_context):
"""
Heuristic extractor for pasted long-form frameworks.
Returns candidate fundamentals/rules/games without model dependency.
"""
text = (raw_context or "").strip()
if not text:
return {"fundamentals": [], "rules": [], "games": []}
lines = [line.strip() for line in text.splitlines()]
fundamentals = []
rules = []
games = []
current_rule = None
current_game = None
in_core_principle = False
in_quick_cheat = False
def flush_rule():
nonlocal current_rule
if not current_rule:
return
title = current_rule.get("title", "").strip()
content = " ".join(current_rule.get("body", [])).strip()
if title and content:
rules.append({"title": title, "content": content})
elif title:
rules.append({"title": title, "content": title})
current_rule = None
def flush_game():
nonlocal current_game
if not current_game:
return
title = current_game.get("title", "").strip()
instructions = " ".join(current_game.get("body", [])).strip()
if title and instructions:
games.append({"title": title, "instructions": instructions})
elif title:
games.append({"title": title, "instructions": title})
current_game = None
for line in lines:
if not line:
flush_rule()
flush_game()
in_core_principle = False
in_quick_cheat = False
continue
if re.match(r"^SECTION\s+\d+", line, re.IGNORECASE):
in_core_principle = False
in_quick_cheat = False
flush_rule()
flush_game()
continue
if re.match(r"^CORE PRINCIPLE", line, re.IGNORECASE):
in_core_principle = True
in_quick_cheat = False
continue
if re.match(r"^QUICK CHEAT SHEET", line, re.IGNORECASE):
in_quick_cheat = True
in_core_principle = False
continue
rule_match = re.match(r"^Rule\s+(\d+)\s*:\s*(.+)$", line, re.IGNORECASE)
game_match = re.match(r"^Game\s+(\d+)\s*:\s*(.+)$", line, re.IGNORECASE)
mantra_match = re.match(r"^Mantra\s*:\s*(.+)$", line, re.IGNORECASE)
if rule_match:
flush_rule()
flush_game()
title = rule_match.group(2).strip()
current_rule = {"title": title, "body": []}
if rule_match.group(1) in {"1", "11"} and title:
fundamentals.append(title)
continue
if game_match:
flush_rule()
flush_game()
title = game_match.group(2).strip()
current_game = {"title": title, "body": []}
continue
if mantra_match:
fundamentals.append(mantra_match.group(1).strip())
continue
if in_core_principle and len(line) <= 120 and ":" not in line:
fundamentals.append(line)
continue
if in_quick_cheat:
quick_line = re.sub(r"^\s*(?:[-*]|\d+\.)\s*", "", line).strip()
if quick_line and len(quick_line) <= 120 and not quick_line.lower().startswith("if you want"):
fundamentals.append(quick_line)
continue
if "Emotional safety > Accuracy > Analysis" in line:
fundamentals.append("Emotional safety > Accuracy > Analysis")
continue
if current_rule:
current_rule["body"].append(line)
continue
if current_game:
current_game["body"].append(line)
continue
flush_rule()
flush_game()
# Keep order, remove duplicates.
def dedupe_strings(seq):
seen = set()
out = []
for item in seq:
key = item.strip().lower()
if not key or key in seen:
continue
seen.add(key)
out.append(item.strip())
return out
def dedupe_dicts(seq):
seen = set()
out = []
for item in seq:
title = (item.get("title") or "").strip()
key = title.lower()
if not key or key in seen:
continue
seen.add(key)
out.append(item)
return out
return {
"fundamentals": dedupe_strings(fundamentals)[:20],
"rules": dedupe_dicts(rules)[:40],
"games": dedupe_dicts(games)[:40],
}
def _merge_seed_entities(artifacts, seed):
merged = dict(artifacts or {})
seed = seed or {}
fundamentals = list(merged.get("fundamental_items") or [])
fundamentals = list(dict.fromkeys(fundamentals + list(seed.get("fundamentals") or [])))
merged["fundamental_items"] = fundamentals
def merge_artifact_list(existing, injected, body_key):
existing = list(existing or [])
injected = list(injected or [])
seen = {(item.get("title") or "").strip().lower() for item in existing}
for item in injected:
title = (item.get("title") or "").strip()
body = (item.get(body_key) or "").strip()
if not title or not body:
continue
key = title.lower()
if key in seen:
continue
existing.append({"title": title, body_key: body})
seen.add(key)
return existing
merged["rules"] = merge_artifact_list(merged.get("rules"), seed.get("rules"), "content")
merged["games"] = merge_artifact_list(merged.get("games"), seed.get("games"), "instructions")
return merged
def _normalize_markdown_titles(text):
"""
Minimal markdown cleanup:
- convert '**Title:**' style lines into markdown headings so Bulma headers can style them.
"""
out = []
for line in (text or "").splitlines():
match = re.match(r"^\s*\*\*(.+?)\*\*\s*:?\s*$", line)
if match:
out.append(f"## {match.group(1).strip()}")
else:
out.append(line)
return "\n".join(out)
def _clean_inline_markdown(value):
value = re.sub(r"\*\*(.*?)\*\*", r"\1", value)
value = re.sub(r"\*(.*?)\*", r"\1", value)
value = re.sub(r"`(.*?)`", r"\1", value)
return value.strip()
def _append_block(section, block_type, values):
if not values:
return
section["blocks"].append({"type": block_type, "items": values})
def _parse_result_sections(result_text):
"""
Minimal markdown-ish parser used by UI:
- '#/##/### Title' become section headers
- bullet lines become lists
- remaining lines are grouped as paragraphs
Returned structure is template-safe (no raw HTML).
"""
text = _normalize_markdown_titles(result_text or "")
lines = text.splitlines()
sections = []
current = {"title": "Output", "level": 3, "blocks": []}
paragraph = []
bullets = []
def flush_paragraph():
nonlocal paragraph
if paragraph:
_append_block(current, "p", [" ".join(paragraph)])
paragraph = []
def flush_bullets():
nonlocal bullets
if bullets:
_append_block(current, "ul", bullets)
bullets = []
def flush_section(force=False):
if force or current["blocks"]:
sections.append(current.copy())
for raw_line in lines:
line = raw_line.rstrip()
heading_match = re.match(r"^\s*(#{1,6})\s+(.+?)\s*$", line)
if heading_match:
flush_paragraph()
flush_bullets()
flush_section()
level = len(heading_match.group(1))
title = _clean_inline_markdown(heading_match.group(2))
current = {"title": title or "Section", "level": level, "blocks": []}
continue
bullet_match = re.match(r"^\s*(?:[-*]|\d+\.)\s+(.+?)\s*$", line)
if bullet_match:
flush_paragraph()
bullets.append(_clean_inline_markdown(bullet_match.group(1)))
continue
if not line.strip():
flush_paragraph()
flush_bullets()
continue
flush_bullets()
paragraph.append(_clean_inline_markdown(line))
flush_paragraph()
flush_bullets()
flush_section(force=True)
cleaned = [sec for sec in sections if sec.get("blocks")]
if cleaned:
return cleaned
fallback = _clean_inline_markdown(result_text or "")
return [{"title": "Output", "level": 3, "blocks": [{"type": "p", "items": [fallback]}]}]
def _extract_json_object(raw):
text = (raw or "").strip()
if not text:
return None
try:
parsed = json.loads(text)
if isinstance(parsed, dict):
return parsed
except Exception:
pass
start = text.find("{")
if start == -1:
return None
depth = 0
end = None
for index, char in enumerate(text[start:], start=start):
if char == "{":
depth += 1
elif char == "}":
depth -= 1
if depth == 0:
end = index + 1
break
if end is None:
return None
try:
parsed = json.loads(text[start:end])
if isinstance(parsed, dict):
return parsed
except Exception:
return None
return None
def _section_lines(section):
lines = []
for block in section.get("blocks", []):
lines.extend([item for item in block.get("items", []) if item])
return lines
def _shape_artifacts_for_profile(rules, games, output_profile):
"""
Apply lightweight profile shaping for generated mitigation artifacts.
"""
profile = (output_profile or "framework").strip().lower()
if profile in {"rules", "rule"}:
return rules[:12], games[:2]
if profile in {"games", "game"}:
return rules[:3], games[:12]
# framework: balanced
return rules[:10], games[:10]
def _default_artifacts_from_patterns(result_text, person, output_profile="framework"):
sections = _parse_result_sections(result_text)
rules = []
games = []
for section in sections:
title = (section.get("title") or "").lower()
lines = _section_lines(section)
if not lines:
continue
if "rule" in title or "next-step" in title or "mitigation" in title:
for idx, line in enumerate(lines, start=1):
rules.append({"title": f"Rule {idx}", "content": line})
elif "game" in title or "protocol" in title:
for idx, line in enumerate(lines, start=1):
games.append({"title": f"Game {idx}", "instructions": line})
if not rules:
rules = [
{
"title": "Safety Before Analysis",
"content": "Prioritize reducing emotional escalation before introducing analysis.",
},
{
"title": "State Matching",
"content": "If either side is flooded, pause first and resume with a time-bound return.",
},
]
if not games:
games = [
{
"title": "Two-Turn Pause",
"instructions": "Limit conflict responses to two short turns, then pause with a clear return time.",
},
{
"title": "Mirror Then Ask",
"instructions": "Mirror what you heard, validate emotion, then ask whether comfort or solutions are wanted.",
},
]
rules, games = _shape_artifacts_for_profile(rules, games, output_profile)
return {
"title": f"{person.name} Pattern Mitigation",
"objective": "Reduce repeated friction loops while preserving trust and clarity.",
"fundamental_items": [],
"rules": rules,
"games": games,
"corrections": [],
}
def _build_mitigation_artifacts(ai_obj, person, source_text, creation_mode, inspiration, fundamentals, output_profile):
fallback = _default_artifacts_from_patterns(source_text, person, output_profile)
if not ai_obj:
if fundamentals:
fallback["fundamental_items"] = fundamentals
return fallback
prompt = [
{
"role": "system",
"content": (
"You design practical relationship mitigation protocols. "
"Return strict JSON only with keys: title, objective, fundamental_items, rules, games. "
"Each rule item must have title and content. "
"Each game item must have title and instructions. "
"If mode is auto, choose strongest artifacts. If mode is guided, strongly follow inspiration. "
"Output profile controls emphasis: framework (balanced), rules (rules-first), games (games-first)."
),
},
{
"role": "user",
"content": (
f"Person: {person.name}\n"
f"Mode: {creation_mode}\n"
f"Output profile: {output_profile}\n"
f"User inspiration: {inspiration or 'None'}\n"
f"Fundamental items (pre-agreed): {json.dumps(fundamentals)}\n\n"
f"Pattern analysis:\n{source_text}"
),
},
]
try:
raw = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj)
except Exception:
raw = ""
parsed = _extract_json_object(raw) or {}
title = (parsed.get("title") or "").strip() or fallback["title"]
objective = (parsed.get("objective") or "").strip() or fallback["objective"]
parsed_fundamentals = parsed.get("fundamental_items")
if isinstance(parsed_fundamentals, list):
merged_fundamentals = [str(item).strip() for item in parsed_fundamentals if str(item).strip()]
else:
merged_fundamentals = []
if fundamentals:
merged_fundamentals = list(dict.fromkeys(fundamentals + merged_fundamentals))
raw_rules = parsed.get("rules")
rules = []
if isinstance(raw_rules, list):
for item in raw_rules:
if not isinstance(item, dict):
continue
title_i = str(item.get("title") or "").strip()
content_i = str(item.get("content") or "").strip()
if title_i and content_i:
rules.append({"title": title_i, "content": content_i})
raw_games = parsed.get("games")
games = []
if isinstance(raw_games, list):
for item in raw_games:
if not isinstance(item, dict):
continue
title_i = str(item.get("title") or "").strip()
instructions_i = str(item.get("instructions") or "").strip()
if title_i and instructions_i:
games.append({"title": title_i, "instructions": instructions_i})
if not rules:
rules = fallback["rules"]
if not games:
games = fallback["games"]
rules, games = _shape_artifacts_for_profile(rules, games, output_profile)
return {
"title": title,
"objective": objective,
"fundamental_items": merged_fundamentals,
"rules": rules,
"games": games,
"corrections": [],
}
def _serialize_export_payload(plan, artifact_type, export_format):
rules = list(plan.rules.order_by("created_at").values("title", "content", "enabled"))
games = list(plan.games.order_by("created_at").values("title", "instructions", "enabled"))
corrections = list(plan.corrections.order_by("created_at").values("title", "clarification", "enabled"))
body = {
"protocol_version": "artifact-v1",
"plan_id": str(plan.id),
"plan_title": plan.title,
"objective": plan.objective,
"fundamental_items": plan.fundamental_items or [],
"rules": rules,
"games": games,
"corrections": corrections,
}
if artifact_type == "rules":
body = {
**body,
"games": [],
"corrections": [],
}
elif artifact_type == "games":
body = {
**body,
"rules": [],
"corrections": [],
}
elif artifact_type == "corrections":
body = {
**body,
"rules": [],
"games": [],
}
if export_format == "json":
payload = json.dumps(body, indent=2)
else:
lines = [
f"# {plan.title or 'Pattern Mitigation Artifact'}",
"",
"Protocol: artifact-v1",
f"Artifact Type: {artifact_type}",
"",
"## Objective",
plan.objective or "(none)",
"",
"## Fundamental Items",
]
fundamentals = plan.fundamental_items or []
if fundamentals:
lines.extend([f"- {item}" for item in fundamentals])
else:
lines.append("- (none)")
if artifact_type in {"rulebook", "rules"}:
lines.append("")
lines.append("## Rules")
if rules:
for idx, rule in enumerate(rules, start=1):
lines.append(f"{idx}. **{rule['title']}** - {rule['content']}")
else:
lines.append("- (none)")
if artifact_type in {"rulebook", "games"}:
lines.append("")
lines.append("## Games")
if games:
for idx, game in enumerate(games, start=1):
lines.append(f"{idx}. **{game['title']}** - {game['instructions']}")
else:
lines.append("- (none)")
if artifact_type in {"rulebook", "corrections"}:
lines.append("")
lines.append("## Corrections")
if corrections:
for idx, correction in enumerate(corrections, start=1):
lines.append(f"{idx}. **{correction['title']}** - {correction['clarification']}")
else:
lines.append("- (none)")
payload = "\n".join(lines)
meta = {
"rule_count": len(rules),
"game_count": len(games),
"correction_count": len(corrections),
"fundamental_count": len(plan.fundamental_items or []),
}
return payload, meta
def _conversation_for_person(user, person):
conversation, _ = WorkspaceConversation.objects.get_or_create(
user=user,
platform_type="signal",
title=f"{person.name} Workspace",
defaults={"platform_thread_id": str(person.id)},
)
conversation.participants.add(person)
return conversation
def _parse_fundamentals(raw_text):
lines = []
for line in (raw_text or "").splitlines():
cleaned = line.strip()
if cleaned:
lines.append(cleaned)
return lines
def _engage_source_options(plan):
options = []
for rule in plan.rules.order_by("created_at"):
options.append(
{
"value": f"rule:{rule.id}",
"label": f"Rule: {rule.title}",
}
)
for game in plan.games.order_by("created_at"):
options.append(
{
"value": f"game:{game.id}",
"label": f"Game: {game.title}",
}
)
for correction in plan.corrections.order_by("created_at"):
options.append(
{
"value": f"correction:{correction.id}",
"label": f"Correction: {correction.title}",
}
)
return options
def _normalize_correction_title(value, fallback="Correction"):
cleaned = re.sub(r"\s+", " ", str(value or "").strip())
cleaned = cleaned.strip("\"'` ")
if not cleaned:
return fallback
# Capitalize each lexical token for consistent correction naming.
words = []
for token in cleaned.split(" "):
if not token:
continue
if token.isupper() and len(token) <= 4:
words.append(token)
else:
words.append(token[:1].upper() + token[1:])
return " ".join(words)
def _build_engage_payload(
source_obj,
source_kind,
share_target,
framing,
context_note,
owner_name,
recipient_name,
):
share_key = (share_target or "self").strip().lower()
framing_key = (framing or "dont_change").strip().lower()
if share_key not in {"self", "other", "both"}:
share_key = "self"
if framing_key != "shared":
framing_key = "dont_change"
artifact_type_label = {
"rule": "Rule",
"game": "Game",
"correction": "Correction",
}.get(source_kind, (source_kind or "Artifact").title())
artifact_name_raw = (getattr(source_obj, "title", None) or f"{artifact_type_label} Item").strip()
artifact_name = (
_normalize_correction_title(artifact_name_raw, fallback=f"{artifact_type_label} Item")
if source_kind == "correction"
else artifact_name_raw
)
if source_kind == "rule":
insight_text = source_obj.content.strip() or source_obj.title.strip()
elif source_kind == "game":
insight_text = source_obj.instructions.strip() or source_obj.title.strip()
else:
insight_text = source_obj.clarification.strip() or source_obj.title.strip()
owner_label = (owner_name or "You").strip()
recipient_label = (recipient_name or "Other").strip()
def _clean_text(value):
cleaned = re.sub(r"\s+", " ", (value or "").strip())
cleaned = cleaned.strip("\"' ")
cleaned = re.sub(r"^\s*#{1,6}\s*", "", cleaned)
cleaned = re.sub(r"\*\*(.*?)\*\*", r"\1", cleaned)
cleaned = re.sub(r"__(.*?)__", r"\1", cleaned)
cleaned = re.sub(r"`(.*?)`", r"\1", cleaned)
cleaned = re.sub(r"^[\-*•]\s*", "", cleaned)
cleaned = re.sub(r"^\d+[.)]\s*", "", cleaned)
return cleaned.strip()
def _split_sentences(value):
parts = []
for line in (value or "").splitlines():
line = _clean_text(line)
if not line:
continue
for piece in re.split(r"(?<=[.!?;])\s+", line.strip()):
piece = piece.strip()
if piece:
parts.append(piece)
return parts
def _expand_shorthand_tokens(value):
text = value or ""
alias_map = {}
for name in [owner_label, recipient_label]:
lowered = name.lower()
if lowered in {"you", "we", "us", "our", "i", "me", "other"}:
continue
initial = lowered[:1]
if initial and initial not in alias_map:
alias_map[initial] = name
# Expand any known initial shorthand before modal verbs.
for initial, name in alias_map.items():
text = re.sub(
rf"(?i)\b{re.escape(initial)}\s+(?=(?:should|will|must|can|need to|needs to|have to|has to|am|are|is|was|were)\b)",
f"{name} ",
text,
)
def replace_quoted_marker(match):
marker = match.group(1) or ""
lower_marker = marker.strip().lower()
replacement = marker
if lower_marker in alias_map:
replacement = alias_map[lower_marker]
elif lower_marker in {"you", "we", "i"}:
replacement = "both parties"
return f"('{replacement}')"
text = re.sub(
r"\(\s*['\"]([A-Za-z]{1,8})['\"]\s*\)",
replace_quoted_marker,
text,
)
return text
def _fix_shared_grammar(value):
text = value
replacements = [
(r"(?i)\bwe needs to\b", "we need to"),
(r"(?i)\bwe has to\b", "we have to"),
(r"(?i)\bwe is\b", "we are"),
(r"(?i)\bwe was\b", "we were"),
(r"(?i)\bus needs to\b", "we need to"),
(r"(?i)\bus need to\b", "we need to"),
(r"(?i)\bus is\b", "we are"),
(r"(?i)\bus are\b", "we are"),
(r"(?i)\bwe does not\b", "we do not"),
(r"(?i)\bwe doesn't\b", "we don't"),
(r"(?i)\bwe says\b", "we say"),
(r"(?i)\bwe responds\b", "we respond"),
(r"(?i)\bwe follows\b", "we follow"),
]
for pattern, replacement in replacements:
text = re.sub(pattern, replacement, text)
return re.sub(r"\s+", " ", text).strip()
def _rewrite_shared_sentence(sentence):
text = _clean_text(sentence)
if not text:
return ""
punctuation = "."
if text[-1] in ".!?":
punctuation = text[-1]
text = text[:-1].strip()
text = _clean_text(text)
if not text:
return ""
# Shared-only edge-case logic.
text = _expand_shorthand_tokens(text)
shared_replacements = [
(r"\bi[']m\b", "we're"),
(r"\bi[']ve\b", "we've"),
(r"\bi[']ll\b", "we'll"),
(r"\bi[']d\b", "we'd"),
(r"\byou[']re\b", "we're"),
(r"\byou[']ve\b", "we've"),
(r"\byou[']ll\b", "we'll"),
(r"\byou[']d\b", "we'd"),
(r"\bmy\b", "our"),
(r"\bmine\b", "ours"),
(r"\bmyself\b", "ourselves"),
(r"\byour\b", "our"),
(r"\byours\b", "ours"),
(r"\byourself\b", "ourselves"),
(r"\byourselves\b", "ourselves"),
(r"\bi\b", "we"),
(r"\bme\b", "us"),
(r"\byou\b", "we"),
(r"\bhis\b", "our"),
(r"\bher\b", "our"),
(r"\btheir\b", "our"),
(r"\bhim\b", "us"),
(r"\bthem\b", "us"),
(r"\bhe\b", "we"),
(r"\bshe\b", "we"),
(r"\bthey\b", "we"),
]
for pattern, replacement in shared_replacements:
text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
for name in [owner_label, recipient_label]:
name = str(name or "").strip()
if not name:
continue
if name.lower() in {"you", "we", "us", "our", "i", "me", "other"}:
continue
text = re.sub(
r"(?<!\w)" + re.escape(name) + r"'s(?!\w)",
"our",
text,
flags=re.IGNORECASE,
)
text = re.sub(
r"(?<!\w)" + re.escape(name) + r"(?!\w)",
"we",
text,
flags=re.IGNORECASE,
)
text = _fix_shared_grammar(text).rstrip(".!?").strip()
if not text:
return ""
if not re.match(
r"(?i)^we\s+(?:should|will|must|can|need to|have to|do not|don't|are|were|[a-z]+)\b",
text,
):
lowered = text[:1].lower() + text[1:] if len(text) > 1 else text.lower()
text = f"We should {lowered}"
text = text[:1].upper() + text[1:]
return f"{text}{punctuation}"
def _rewrite_shared_text(value):
sentences = _split_sentences(value)
if not sentences:
return ""
adapted = [_rewrite_shared_sentence(sentence) for sentence in sentences]
adapted = [part for part in adapted if part]
return " ".join(adapted).strip()
preview_lines = []
outbound_lines = []
def _format_artifact_message(lines):
lines = [
f"**{artifact_name}** ({artifact_type_label})",
"",
"Guidance:",
] + [line.strip() for line in (lines or []) if (line or "").strip()]
if lines[-1] == "Guidance:":
lines.append("No guidance text available.")
return "\n".join(lines).strip()
if framing_key == "shared":
shared_line = _rewrite_shared_text(insight_text)
preview_lines = [shared_line]
outbound_lines = [shared_line]
else:
unchanged = _clean_text(insight_text) or (insight_text or "").strip()
preview_lines = [unchanged]
outbound_lines = [unchanged]
preview = _format_artifact_message(preview_lines)
outbound = _format_artifact_message(outbound_lines)
# Context note is metadata for the operator and should not alter shared outbound text.
_ = (context_note or "").strip()
return {
"preview": preview,
"outbound": outbound,
"share_target": share_key,
"framing": framing_key,
}
def _get_or_create_auto_settings(user, conversation):
settings_obj, _ = PatternMitigationAutoSettings.objects.get_or_create(
user=user,
conversation=conversation,
)
return settings_obj
def _detect_violation_candidates(plan, recent_rows):
candidates = []
for row in recent_rows:
text = (row.get("text") or "").strip()
if not text:
continue
upper_ratio = (
(sum(1 for c in text if c.isupper()) / max(1, sum(1 for c in text if c.isalpha())))
if any(c.isalpha() for c in text)
else 0
)
if upper_ratio > 0.6 and len(text) > 10:
candidates.append(
{
"title": "Escalated tone spike",
"source_phrase": text[:500],
"clarification": "Rephrase into one direct request and one feeling statement.",
"severity": "medium",
}
)
lowered = text.lower()
if "you always" in lowered or "you never" in lowered:
candidates.append(
{
"title": "Absolute framing",
"source_phrase": text[:500],
"clarification": "Replace absolutes with one concrete example and a bounded request.",
"severity": "medium",
}
)
return candidates
def _normalize_violation_items(raw_items):
normalized = []
seen = set()
for item in raw_items or []:
title = _normalize_correction_title(item.get("title") or "", fallback="Correction")
phrase = str(item.get("source_phrase") or "").strip()
clarification = str(item.get("clarification") or item.get("correction") or "").strip()
severity = str(item.get("severity") or "medium").strip().lower()
if severity not in {"low", "medium", "high"}:
severity = "medium"
if not title or not clarification:
continue
key = _correction_signature(title, clarification)
if key in seen:
continue
seen.add(key)
normalized.append(
{
"title": title[:255],
"source_phrase": phrase[:1000],
"clarification": clarification[:2000],
"severity": severity,
}
)
return normalized
def _normalize_correction_text(value):
cleaned = re.sub(r"\s+", " ", str(value or "").strip())
cleaned = cleaned.strip("\"'` ")
cleaned = cleaned.rstrip(" .;:")
return cleaned
def _correction_signature(title, clarification):
"""
Normalized key used to deduplicate corrections before persisting.
Source phrase is intentionally excluded so the same correction guidance
cannot be stored repeatedly with minor phrase variations.
"""
normalized_title = _normalize_correction_text(title).lower()
normalized_clarification = _normalize_correction_text(clarification).lower()
return (normalized_title, normalized_clarification)
def _existing_correction_signatures(plan, exclude_id=None):
query = plan.corrections.all()
if exclude_id is not None:
query = query.exclude(id=exclude_id)
signatures = set()
for row in query.values("title", "clarification"):
signatures.add(
_correction_signature(
row.get("title") or "",
row.get("clarification") or "",
)
)
return signatures
def _ai_detect_violations(user, plan, person, recent_rows):
ai_obj = AI.objects.filter(user=user).first()
if ai_obj is None:
return []
rules_payload = [
{"id": str(rule.id), "title": rule.title, "content": rule.content}
for rule in plan.rules.filter(enabled=True).order_by("created_at")[:30]
]
games_payload = [
{"id": str(game.id), "title": game.title, "instructions": game.instructions}
for game in plan.games.filter(enabled=True).order_by("created_at")[:30]
]
corrections_payload = [
{
"id": str(correction.id),
"title": correction.title,
"source_phrase": correction.source_phrase,
"clarification": correction.clarification,
}
for correction in plan.corrections.filter(enabled=True).order_by("created_at")[:30]
]
source_payload = {
"person": person.name,
"plan": {
"id": str(plan.id),
"title": plan.title,
"objective": plan.objective,
"fundamentals": plan.fundamental_items or [],
"rules": rules_payload,
"games": games_payload,
"corrections": corrections_payload,
},
"recent_messages": recent_rows,
"output_schema": {
"violations": [
{
"title": "short string",
"source_phrase": "exact snippet from recent_messages",
"clarification": "correction-style guidance",
"severity": "low|medium|high",
}
]
},
}
prompt = [
{
"role": "system",
"content": (
"You detect violations of mitigation patterns in a conversation. "
"Return strict JSON only. No markdown. No prose wrapper. "
"Use only schema keys requested."
),
},
{
"role": "user",
"content": json.dumps(source_payload, ensure_ascii=False),
},
]
try:
raw = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj)
except Exception:
return []
parsed = _extract_json_object(raw) or {}
return _normalize_violation_items(parsed.get("violations") or [])
def _maybe_send_auto_notification(user, auto_settings, title, body):
topic_override = (auto_settings.ntfy_topic_override or "").strip()
if topic_override:
raw_sendmsg(
body,
title=title,
url=(auto_settings.ntfy_url_override or None),
topic=topic_override,
)
return
user.sendmsg(body, title=title)
def _run_auto_analysis_for_plan(user, person, conversation, plan, auto_settings, trigger="manual"):
if not auto_settings.enabled:
return {
"ran": False,
"summary": "Automation is disabled.",
"violations": [],
"created_corrections": 0,
"notified": False,
}
if trigger == "auto" and not auto_settings.auto_pattern_recognition:
return {
"ran": False,
"summary": "Automatic pattern recognition is disabled.",
"violations": [],
"created_corrections": 0,
"notified": False,
}
now = dj_timezone.now()
if trigger == "auto" and auto_settings.last_run_at and auto_settings.check_cooldown_seconds:
elapsed = (now - auto_settings.last_run_at).total_seconds()
if elapsed < auto_settings.check_cooldown_seconds:
return {
"ran": False,
"summary": "Skipped: cooldown active.",
"violations": [],
"created_corrections": 0,
"notified": False,
}
limit = max(10, min(int(auto_settings.sample_message_window or 40), 200))
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
messages = (
Message.objects.filter(user=user, session__in=sessions)
.order_by("-ts")
.values("id", "ts", "sender_uuid", "text")[:limit]
)
recent_rows = []
for row in reversed(list(messages)):
recent_rows.append(
{
"id": str(row["id"]),
"ts": row["ts"],
"sender_uuid": row["sender_uuid"] or "",
"text": row["text"] or "",
}
)
if not recent_rows:
auto_settings.last_result_summary = "No recent messages available for automation."
auto_settings.last_run_at = now
auto_settings.save(update_fields=["last_result_summary", "last_run_at", "updated_at"])
return {
"ran": True,
"summary": auto_settings.last_result_summary,
"violations": [],
"created_corrections": 0,
"notified": False,
}
latest_message_ts = recent_rows[-1]["ts"]
if trigger == "auto" and auto_settings.last_checked_event_ts and latest_message_ts <= auto_settings.last_checked_event_ts:
return {
"ran": False,
"summary": "Skipped: no new messages since last check.",
"violations": [],
"created_corrections": 0,
"notified": False,
}
ai_candidates = _ai_detect_violations(user, plan, person, recent_rows)
heuristic_candidates = _detect_violation_candidates(plan, recent_rows)
violations = _normalize_violation_items(ai_candidates + heuristic_candidates)
created_corrections = 0
if auto_settings.auto_create_corrections and violations:
existing_signatures = _existing_correction_signatures(plan)
for item in violations[:8]:
signature = _correction_signature(item["title"], item["clarification"])
if signature in existing_signatures:
continue
PatternMitigationCorrection.objects.create(
user=user,
plan=plan,
title=item["title"],
source_phrase=item["source_phrase"],
clarification=item["clarification"],
perspective="second_person",
share_target="both",
language_style="adapted",
enabled=True,
)
existing_signatures.add(signature)
created_corrections += 1
notified = False
if auto_settings.auto_notify_enabled and violations:
title = f"[GIA] Auto pattern alerts for {person.name}"
preview = "\n".join(
[f"- {item['title']}: {item['clarification']}" for item in violations[:3]]
)
body = (
f"Detected {len(violations)} potential mitigation violations.\n"
f"Created corrections: {created_corrections}\n\n"
f"{preview}"
)
_maybe_send_auto_notification(user, auto_settings, title, body)
notified = True
summary = (
f"Auto analysis ran on {len(recent_rows)} messages. "
f"Detected {len(violations)} candidates. "
f"Created {created_corrections} corrections."
)
auto_settings.last_result_summary = summary
auto_settings.last_run_at = now
auto_settings.last_checked_event_ts = latest_message_ts
auto_settings.save(
update_fields=[
"last_result_summary",
"last_run_at",
"last_checked_event_ts",
"updated_at",
]
)
return {
"ran": True,
"summary": summary,
"violations": violations,
"created_corrections": created_corrections,
"notified": notified,
}
def _create_baseline_mitigation_plan(user, person, conversation, source_text=""):
artifacts = _default_artifacts_from_patterns(
source_text or f"{person.name} baseline mitigation",
person,
output_profile="framework",
)
plan = PatternMitigationPlan.objects.create(
user=user,
conversation=conversation,
source_ai_result=None,
title=artifacts.get("title") or f"{person.name} Pattern Mitigation",
objective=artifacts.get("objective") or "",
fundamental_items=artifacts.get("fundamental_items") or [],
creation_mode="auto",
status="draft",
)
for rule in artifacts.get("rules", []):
PatternMitigationRule.objects.create(
user=user,
plan=plan,
title=str(rule.get("title") or "Rule").strip()[:255],
content=str(rule.get("content") or "").strip(),
)
for game in artifacts.get("games", []):
PatternMitigationGame.objects.create(
user=user,
plan=plan,
title=str(game.get("title") or "Game").strip()[:255],
instructions=str(game.get("instructions") or "").strip(),
)
PatternMitigationMessage.objects.create(
user=user,
plan=plan,
role="system",
text="Baseline plan auto-created by automation settings.",
)
return plan
def _mitigation_panel_context(
person,
plan,
notice_message="",
notice_level="info",
export_record=None,
engage_preview="",
engage_preview_flash=False,
engage_form=None,
active_tab="plan_board",
auto_settings=None,
):
engage_form = engage_form or {}
engage_options = _engage_source_options(plan)
selected_ref = engage_form.get("source_ref") or (engage_options[0]["value"] if engage_options else "")
auto_settings = auto_settings or _get_or_create_auto_settings(plan.user, plan.conversation)
return {
"person": person,
"plan": plan,
"rules": plan.rules.order_by("created_at"),
"games": plan.games.order_by("created_at"),
"corrections": plan.corrections.order_by("created_at"),
"fundamentals_text": "\n".join(plan.fundamental_items or []),
"mitigation_messages": plan.messages.order_by("created_at")[:40],
"latest_export": export_record,
"notice_message": notice_message,
"notice_level": notice_level,
"engage_preview": engage_preview,
"engage_preview_flash": engage_preview_flash,
"engage_options": engage_options,
"engage_form": {
"source_ref": selected_ref,
"share_target": engage_form.get("share_target") or "self",
"framing": engage_form.get("framing") or "dont_change",
"context_note": engage_form.get("context_note") or "",
},
"send_state": _get_send_state(plan.user, person),
"active_tab": _sanitize_active_tab(active_tab),
"auto_settings": auto_settings,
}
def _latest_plan_bundle(conversation):
latest_plan = conversation.mitigation_plans.order_by("-updated_at").first()
latest_plan_rules = latest_plan.rules.order_by("created_at") if latest_plan else []
latest_plan_games = latest_plan.games.order_by("created_at") if latest_plan else []
latest_plan_corrections = latest_plan.corrections.order_by("created_at") if latest_plan else []
latest_plan_messages = latest_plan.messages.order_by("created_at")[:40] if latest_plan else []
latest_plan_export = latest_plan.exports.order_by("-created_at").first() if latest_plan else None
latest_auto_settings = _get_or_create_auto_settings(conversation.user, conversation)
return {
"latest_plan": latest_plan,
"latest_plan_rules": latest_plan_rules,
"latest_plan_games": latest_plan_games,
"latest_plan_corrections": latest_plan_corrections,
"latest_plan_messages": latest_plan_messages,
"latest_plan_export": latest_plan_export,
"latest_auto_settings": latest_auto_settings,
}
class AIWorkspace(LoginRequiredMixin, View):
template_name = "pages/ai-workspace.html"
def get(self, request):
return render(request, self.template_name)
class AIWorkspaceContactsWidget(LoginRequiredMixin, View):
allowed_types = {"widget"}
def _contact_rows(self, user):
rows = []
people = Person.objects.filter(user=user).order_by("name")
for person in people:
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
message_qs = Message.objects.filter(user=user, session__in=sessions)
last_message = message_qs.order_by("-ts").first()
rows.append(
{
"person": person,
"message_count": message_qs.count(),
"last_text": (last_message.text or "")[:120] if last_message else "",
"last_ts": last_message.ts if last_message else None,
"last_ts_label": _format_unix_ms(last_message.ts) if last_message else "",
}
)
rows.sort(key=lambda row: row["last_ts"] or 0, reverse=True)
return rows
def get(self, request, type):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
context = {
"title": "AI Workspace",
"unique": "ai-workspace-contacts",
"window_content": "partials/ai-workspace-widget.html",
"widget_options": 'gs-w="4" gs-h="14" gs-x="0" gs-y="0" gs-min-w="3"',
"contact_rows": self._contact_rows(request.user),
"window_form": AIWorkspaceWindowForm(request.GET or None),
}
return render(request, "mixins/wm/widget.html", context)
class AIWorkspacePersonWidget(LoginRequiredMixin, View):
allowed_types = {"widget"}
def _message_rows(self, user, person, limit):
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
identifiers = set(
PersonIdentifier.objects.filter(user=user, person=person).values_list("identifier", flat=True)
)
messages = (
Message.objects.filter(user=user, session__in=sessions)
.select_related("session", "session__identifier")
.order_by("-ts")[:limit]
)
rows = []
for message in reversed(list(messages)):
inferred_direction = _infer_direction(message, identifiers)
rows.append(
{
"message": message,
"direction": inferred_direction,
"ts_label": _format_unix_ms(message.ts),
}
)
return rows
def _recent_messages(self, user, person, limit):
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
messages = (
Message.objects.filter(user=user, session__in=sessions)
.select_related("session", "session__identifier")
.order_by("-ts")[:limit]
)
return list(reversed(list(messages)))
def get(self, request, type, person_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
try:
limit = int(request.GET.get("limit", 20))
except (TypeError, ValueError):
limit = 20
limit = max(5, min(limit, 200))
context = {
"title": f"{person.name} Timeline",
"unique": f"ai-person-{person.id}",
"window_content": "partials/ai-workspace-person-widget.html",
"widget_options": 'gs-w="7" gs-h="16" gs-x="0" gs-y="0" gs-min-w="4"',
"person": person,
"limit": limit,
"message_rows": self._message_rows(request.user, person, limit),
"ai_operations": [
("artifacts", "Plan"),
("summarise", "Summary"),
("draft_reply", "Draft"),
("extract_patterns", "Patterns"),
],
"send_state": _get_send_state(request.user, person),
}
return render(request, "mixins/wm/widget.html", context)
class AIWorkspaceRunOperation(LoginRequiredMixin, View):
allowed_types = {"widget"}
allowed_operations = {"artifacts", "summarise", "draft_reply", "extract_patterns"}
def _ensure_message_events(self, user, conversation, person_identifiers, messages):
"""
Materialize workspace MessageEvent rows from legacy Message rows and
return ordered event IDs for the selected window.
"""
event_ids = []
for message in messages:
legacy_id = str(message.id)
event = MessageEvent.objects.filter(
user=user,
conversation=conversation,
raw_payload_ref__legacy_message_id=legacy_id,
).first()
if event is None:
event = MessageEvent.objects.create(
user=user,
conversation=conversation,
source_system="signal",
ts=message.ts,
direction=_infer_direction(message, person_identifiers),
sender_uuid=message.sender_uuid or "",
text=message.text or "",
attachments=[],
raw_payload_ref={"legacy_message_id": legacy_id},
)
else:
# Keep event fields in sync if upstream message rows changed.
update_fields = []
new_direction = _infer_direction(message, person_identifiers)
if event.ts != message.ts:
event.ts = message.ts
update_fields.append("ts")
if event.direction != new_direction:
event.direction = new_direction
update_fields.append("direction")
if event.sender_uuid != (message.sender_uuid or ""):
event.sender_uuid = message.sender_uuid or ""
update_fields.append("sender_uuid")
if event.text != (message.text or ""):
event.text = message.text or ""
update_fields.append("text")
if update_fields:
event.save(update_fields=update_fields)
event_ids.append(str(event.id))
return event_ids
def _build_prompt(self, operation, person, transcript, user_notes):
notes = (user_notes or "").strip()
if operation == "draft_reply":
instruction = (
"Generate 3 concise reply options in different tones: soft, neutral, firm. "
"Return plain text with clear section labels."
)
elif operation == "extract_patterns":
instruction = (
"Extract recurring interaction patterns, friction loops, and practical next-step rules. "
"Keep it actionable and concise."
)
else:
instruction = (
"Summarize this conversation window with key points, emotional state shifts, and open loops."
)
prompt = [
{"role": "system", "content": instruction},
{
"role": "user",
"content": (
f"Person: {person.name}\n"
f"Notes: {notes or 'None'}\n\n"
f"Conversation:\n{transcript}"
),
},
]
return prompt
def get(self, request, type, person_id, operation):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
if operation not in self.allowed_operations:
return HttpResponseBadRequest("Invalid operation specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
send_state = _get_send_state(request.user, person)
conversation = _conversation_for_person(request.user, person)
if operation == "artifacts":
auto_settings = _get_or_create_auto_settings(request.user, conversation)
plan_bundle = _latest_plan_bundle(conversation)
mitigation_notice_message = ""
mitigation_notice_level = "info"
if (
plan_bundle["latest_plan"] is None
and auto_settings.enabled
and auto_settings.auto_create_mitigation
):
recent_messages = AIWorkspacePersonWidget()._recent_messages(
request.user,
person,
max(20, min(auto_settings.sample_message_window, 200)),
)
source_text = messages_to_string(recent_messages) if recent_messages else ""
_create_baseline_mitigation_plan(
user=request.user,
person=person,
conversation=conversation,
source_text=source_text,
)
plan_bundle = _latest_plan_bundle(conversation)
mitigation_notice_message = "Baseline plan auto-created."
mitigation_notice_level = "success"
if (
plan_bundle["latest_plan"] is not None
and auto_settings.enabled
and auto_settings.auto_pattern_recognition
):
auto_result = _run_auto_analysis_for_plan(
user=request.user,
person=person,
conversation=conversation,
plan=plan_bundle["latest_plan"],
auto_settings=auto_settings,
trigger="auto",
)
if auto_result.get("ran"):
mitigation_notice_message = auto_result["summary"]
mitigation_notice_level = "info"
if auto_result.get("created_corrections"):
plan_bundle = _latest_plan_bundle(conversation)
context = {
"operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()),
"operation": operation,
"result_text": "",
"result_sections": [],
"error": False,
"person": person,
"send_state": send_state,
"ai_result_id": "",
"mitigation_notice_message": mitigation_notice_message,
"mitigation_notice_level": mitigation_notice_level,
**plan_bundle,
}
return render(request, "partials/ai-workspace-ai-result.html", context)
ai_obj = AI.objects.filter(user=request.user).first()
if ai_obj is None:
context = {
"operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()),
"operation": operation,
"result_text": "No AI configured for this user yet.",
"result_sections": _parse_result_sections("No AI configured for this user yet."),
"error": True,
"person": person,
"send_state": send_state,
"latest_plan": None,
"latest_plan_rules": [],
"latest_plan_games": [],
"latest_plan_corrections": [],
"latest_plan_messages": [],
"latest_plan_export": None,
}
return render(request, "partials/ai-workspace-ai-result.html", context)
try:
limit = int(request.GET.get("limit", 20))
except (TypeError, ValueError):
limit = 20
limit = max(5, min(limit, 200))
user_notes = request.GET.get("user_notes", "")
messages = AIWorkspacePersonWidget()._recent_messages(request.user, person, limit)
transcript = messages_to_string(messages)
person_identifiers = set(
PersonIdentifier.objects.filter(
user=request.user,
person=person,
).values_list("identifier", flat=True)
)
if messages:
conversation.last_event_ts = messages[-1].ts
conversation.save(update_fields=["last_event_ts"])
message_event_ids = self._ensure_message_events(
request.user,
conversation,
person_identifiers,
messages,
)
ai_request = AIRequest.objects.create(
user=request.user,
conversation=conversation,
window_spec={"limit": limit},
message_ids=message_event_ids,
user_notes=user_notes,
operation=operation,
policy_snapshot={"send_state": send_state},
status="running",
started_at=dj_timezone.now(),
)
try:
prompt = self._build_prompt(operation, person, transcript, user_notes)
result_text = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj)
draft_options = _parse_draft_options(result_text) if operation == "draft_reply" else []
ai_result = AIResult.objects.create(
user=request.user,
ai_request=ai_request,
working_summary=result_text if operation != "draft_reply" else "",
draft_replies=draft_options,
interaction_signals=[],
memory_proposals=[],
citations=message_event_ids,
)
ai_request.status = "done"
ai_request.finished_at = dj_timezone.now()
ai_request.save(update_fields=["status", "finished_at"])
conversation.last_ai_run_at = dj_timezone.now()
conversation.save(update_fields=["last_ai_run_at"])
plan_bundle = _latest_plan_bundle(conversation)
context = {
"operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()),
"operation": operation,
"result_text": result_text,
"result_sections": _parse_result_sections(result_text),
"draft_replies": ai_result.draft_replies,
"error": False,
"person": person,
"send_state": send_state,
"ai_result_id": str(ai_result.id),
**plan_bundle,
}
except Exception as exc:
ai_request.status = "failed"
ai_request.error = str(exc)
ai_request.finished_at = dj_timezone.now()
ai_request.save(update_fields=["status", "error", "finished_at"])
context = {
"operation_label": OPERATION_LABELS.get(operation, operation.replace("_", " ").title()),
"operation": operation,
"result_text": str(exc),
"result_sections": _parse_result_sections(str(exc)),
"error": True,
"person": person,
"send_state": send_state,
"latest_plan": None,
"latest_plan_rules": [],
"latest_plan_games": [],
"latest_plan_corrections": [],
"latest_plan_messages": [],
"latest_plan_export": None,
}
return render(request, "partials/ai-workspace-ai-result.html", context)
class AIWorkspaceSendDraft(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
send_state = _get_send_state(request.user, person)
text = (request.POST.get("draft_text") or "").strip()
force_send = _is_truthy(request.POST.get("force_send"))
if not text:
return render(
request,
"partials/ai-workspace-send-status.html",
{"ok": False, "message": "Draft is empty.", "level": "danger"},
)
if not send_state["can_send"] and not force_send:
return render(
request,
"partials/ai-workspace-send-status.html",
{
"ok": False,
"message": f"Send blocked. {send_state['text']}",
"level": "warning",
},
)
identifier = _resolve_person_identifier(request.user, person)
if identifier is None:
return render(
request,
"partials/ai-workspace-send-status.html",
{"ok": False, "message": "No recipient identifier found.", "level": "danger"},
)
try:
ts = async_to_sync(identifier.send)(text)
except Exception as exc:
return render(
request,
"partials/ai-workspace-send-status.html",
{"ok": False, "message": f"Send failed: {exc}", "level": "danger"},
)
session, _ = ChatSession.objects.get_or_create(
user=request.user,
identifier=identifier,
)
sent_ts = int(ts) if ts else int(dj_timezone.now().timestamp() * 1000)
Message.objects.create(
user=request.user,
session=session,
custom_author="BOT",
sender_uuid="",
text=text,
ts=sent_ts,
)
success_message = "Draft sent."
if force_send and not send_state["can_send"]:
success_message = "Draft sent with override."
response = render(
request,
"partials/ai-workspace-send-status.html",
{"ok": True, "message": success_message, "level": "success"},
)
response["HX-Trigger"] = json.dumps(
{
"gia-message-sent": {
"person_id": str(person.id),
"ts": sent_ts,
"text": text,
"author": "BOT",
}
}
)
return response
class AIWorkspaceQueueDraft(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
text = (request.POST.get("draft_text") or "").strip()
if not text:
return render(
request,
"partials/ai-workspace-send-status.html",
{"ok": False, "message": "Select a draft before queueing.", "level": "warning"},
)
identifier = _resolve_person_identifier(request.user, person)
if identifier is None:
return render(
request,
"partials/ai-workspace-send-status.html",
{"ok": False, "message": "No recipient identifier found.", "level": "danger"},
)
manipulation = _get_queue_manipulation(request.user, person)
if manipulation is None:
return render(
request,
"partials/ai-workspace-send-status.html",
{
"ok": False,
"message": "No enabled manipulation found for this recipient. Queue entry not created.",
"level": "warning",
},
)
session, _ = ChatSession.objects.get_or_create(
user=request.user,
identifier=identifier,
)
QueuedMessage.objects.create(
user=request.user,
session=session,
manipulation=manipulation,
ts=int(dj_timezone.now().timestamp() * 1000),
sender_uuid="",
text=text,
custom_author="BOT",
)
return render(
request,
"partials/ai-workspace-send-status.html",
{"ok": True, "message": "Draft added to queue.", "level": "success"},
)
class AIWorkspaceCreateMitigation(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
ai_result_id = (request.POST.get("ai_result_id") or "").strip()
output_profile = (request.POST.get("output_profile") or "").strip()
if output_profile not in {"framework", "rule", "rules", "game", "games"}:
return render(
request,
"partials/ai-workspace-mitigation-status.html",
{
"person": person,
"level": "warning",
"message": "Choose one mitigation output type: framework, rules, or games.",
},
)
user_context = (request.POST.get("user_context") or "").strip()
creation_mode = "guided" if user_context else "auto"
seed_from_context = _extract_seed_entities_from_context(user_context)
fundamentals = seed_from_context.get("fundamentals", [])
source_result = None
if ai_result_id:
source_result = AIResult.objects.filter(
id=ai_result_id,
user=request.user,
).select_related("ai_request", "ai_request__conversation").first()
conversation = (
source_result.ai_request.conversation
if source_result is not None
else _conversation_for_person(request.user, person)
)
conversation.participants.add(person)
source_text = ""
if source_result is not None:
source_text = source_result.working_summary or ""
if not source_text:
source_text = (request.POST.get("source_text") or "").strip()
ai_obj = AI.objects.filter(user=request.user).first()
artifacts = _build_mitigation_artifacts(
ai_obj=ai_obj,
person=person,
source_text=source_text,
creation_mode=creation_mode,
inspiration=user_context,
fundamentals=fundamentals,
output_profile=output_profile,
)
# Deterministically seed from pasted context so long-form frameworks can
# create fundamentals/rules/games in one pass, even when AI output is sparse.
artifacts = _merge_seed_entities(artifacts, seed_from_context)
plan = PatternMitigationPlan.objects.create(
user=request.user,
conversation=conversation,
source_ai_result=source_result,
title=artifacts.get("title") or f"{person.name} Pattern Mitigation",
objective=artifacts.get("objective") or "",
fundamental_items=artifacts.get("fundamental_items") or fundamentals,
creation_mode=creation_mode,
status="draft",
)
for rule in artifacts.get("rules", []):
PatternMitigationRule.objects.create(
user=request.user,
plan=plan,
title=str(rule.get("title") or "Rule").strip()[:255],
content=str(rule.get("content") or "").strip(),
)
for game in artifacts.get("games", []):
PatternMitigationGame.objects.create(
user=request.user,
plan=plan,
title=str(game.get("title") or "Game").strip()[:255],
instructions=str(game.get("instructions") or "").strip(),
)
PatternMitigationMessage.objects.create(
user=request.user,
plan=plan,
role="system",
text="Plan created. Use the tabs below to refine rules, games, fundamentals, corrections, and AI guidance.",
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Mitigation plan created.",
notice_level="success",
active_tab="plan_board",
),
)
class AIWorkspaceMitigationChat(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id, plan_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(
PatternMitigationPlan,
id=plan_id,
user=request.user,
)
text = (request.POST.get("message") or "").strip()
active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="ask_ai")
if not text:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Message is empty.",
notice_level="warning",
active_tab=active_tab,
),
)
PatternMitigationMessage.objects.create(
user=request.user,
plan=plan,
role="user",
text=text,
)
ai_obj = AI.objects.filter(user=request.user).first()
assistant_text = ""
if ai_obj:
rules_text = "\n".join([f"- {r.title}: {r.content}" for r in plan.rules.order_by("created_at")])
games_text = "\n".join([f"- {g.title}: {g.instructions}" for g in plan.games.order_by("created_at")])
corrections_text = "\n".join([f"- {c.title}: {c.clarification}" for c in plan.corrections.order_by("created_at")])
recent_msgs = plan.messages.order_by("-created_at")[:10]
recent_msgs = list(reversed(list(recent_msgs)))
transcript = "\n".join([f"{m.role.upper()}: {m.text}" for m in recent_msgs])
prompt = [
{
"role": "system",
"content": (
"You are refining a mitigation protocol. "
"Give concise practical updates to rules/games/corrections and explain tradeoffs."
),
},
{
"role": "user",
"content": (
f"Plan objective: {plan.objective}\n"
f"Fundamentals: {json.dumps(plan.fundamental_items or [])}\n"
f"Rules:\n{rules_text or '(none)'}\n\n"
f"Games:\n{games_text or '(none)'}\n\n"
f"Corrections:\n{corrections_text or '(none)'}\n\n"
f"Conversation:\n{transcript}"
),
},
]
try:
assistant_text = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj)
except Exception as exc:
assistant_text = f"Failed to run AI refinement: {exc}"
else:
assistant_text = "No AI configured. Add an AI config to use mitigation chat."
PatternMitigationMessage.objects.create(
user=request.user,
plan=plan,
role="assistant",
text=assistant_text,
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
active_tab=active_tab,
),
)
class AIWorkspaceExportArtifact(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id, plan_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(
PatternMitigationPlan,
id=plan_id,
user=request.user,
)
artifact_type = (request.POST.get("artifact_type") or "rulebook").strip()
if artifact_type not in {"rulebook", "rules", "games", "corrections"}:
artifact_type = "rulebook"
export_format = (request.POST.get("export_format") or "markdown").strip()
active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="ask_ai")
if export_format not in {"markdown", "json", "text"}:
export_format = "markdown"
payload, meta = _serialize_export_payload(plan, artifact_type, export_format)
export_record = PatternArtifactExport.objects.create(
user=request.user,
plan=plan,
artifact_type=artifact_type,
export_format=export_format,
protocol_version="artifact-v1",
payload=payload,
meta=meta,
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=f"Exported {artifact_type} ({export_format}).",
notice_level="success",
export_record=export_record,
active_tab=active_tab,
),
)
class AIWorkspaceCreateArtifact(LoginRequiredMixin, View):
allowed_types = {"widget"}
kind_map = {
"rule": (PatternMitigationRule, "content", "Rule"),
"game": (PatternMitigationGame, "instructions", "Game"),
"correction": (PatternMitigationCorrection, "clarification", "Correction"),
}
def post(self, request, type, person_id, plan_id, kind):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
kind_key = (kind or "").strip().lower()
if kind_key not in self.kind_map:
return HttpResponseBadRequest("Invalid artifact kind")
model, body_field, label = self.kind_map[kind_key]
if kind_key == "correction":
candidate_signature = _correction_signature(f"New {label}", "")
if candidate_signature in _existing_correction_signatures(plan):
tab = _sanitize_active_tab(request.POST.get("active_tab"), default="corrections")
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Duplicate correction skipped.",
notice_level="warning",
active_tab=tab,
),
)
payload = {
"user": request.user,
"plan": plan,
"title": f"New {label}",
body_field: "",
"enabled": True,
}
model.objects.create(**payload)
tab = _sanitize_active_tab(
request.POST.get("active_tab"),
default=("corrections" if kind_key == "correction" else "plan_board"),
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=f"{label} created.",
notice_level="success",
active_tab=tab,
),
)
class AIWorkspaceUpdateArtifact(LoginRequiredMixin, View):
allowed_types = {"widget"}
kind_map = {
"rule": (PatternMitigationRule, "content", "Rule"),
"game": (PatternMitigationGame, "instructions", "Game"),
"correction": (PatternMitigationCorrection, "clarification", "Correction"),
}
def post(self, request, type, person_id, plan_id, kind, artifact_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
kind_key = (kind or "").strip().lower()
if kind_key not in self.kind_map:
return HttpResponseBadRequest("Invalid artifact kind")
model, body_field, label = self.kind_map[kind_key]
artifact = get_object_or_404(
model,
id=artifact_id,
user=request.user,
plan=plan,
)
title = (request.POST.get("title") or "").strip() or artifact.title
body = (request.POST.get("body") or "").strip()
enabled = _is_truthy(request.POST.get("enabled"))
tab = _sanitize_active_tab(
request.POST.get("active_tab"),
default=("corrections" if kind_key == "correction" else "plan_board"),
)
if kind_key == "correction":
title = _normalize_correction_title(title, fallback=artifact.title or "Correction")
candidate_signature = _correction_signature(title, body)
if candidate_signature in _existing_correction_signatures(plan, exclude_id=artifact.id):
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Duplicate correction not saved.",
notice_level="warning",
active_tab=tab,
),
)
artifact.title = title[:255]
setattr(artifact, body_field, body)
artifact.enabled = enabled
if kind_key == "correction":
artifact.source_phrase = (request.POST.get("source_phrase") or "").strip()
perspective = (request.POST.get("perspective") or "third_person").strip()
if perspective in {"first_person", "second_person", "third_person"}:
artifact.perspective = perspective
share_target = (request.POST.get("share_target") or "both").strip()
if share_target in {"self", "other", "both"}:
artifact.share_target = share_target
language_style = (request.POST.get("language_style") or "adapted").strip()
if language_style in {"same", "adapted"}:
artifact.language_style = language_style
artifact.save()
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=f"{label} saved.",
notice_level="success",
active_tab=tab,
),
)
class AIWorkspaceDeleteArtifact(LoginRequiredMixin, View):
allowed_types = {"widget"}
kind_map = {
"rule": (PatternMitigationRule, "Rule"),
"game": (PatternMitigationGame, "Game"),
"correction": (PatternMitigationCorrection, "Correction"),
}
def post(self, request, type, person_id, plan_id, kind, artifact_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
kind_key = (kind or "").strip().lower()
if kind_key not in self.kind_map:
return HttpResponseBadRequest("Invalid artifact kind")
model, label = self.kind_map[kind_key]
artifact = get_object_or_404(
model,
id=artifact_id,
user=request.user,
plan=plan,
)
artifact.delete()
tab = _sanitize_active_tab(
request.POST.get("active_tab"),
default=("corrections" if kind_key == "correction" else "plan_board"),
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=f"{label} deleted.",
notice_level="success",
active_tab=tab,
),
)
class AIWorkspaceDeleteArtifactList(LoginRequiredMixin, View):
allowed_types = {"widget"}
kind_map = {
"rule": (PatternMitigationRule, "rules"),
"game": (PatternMitigationGame, "games"),
"correction": (PatternMitigationCorrection, "corrections"),
}
def post(self, request, type, person_id, plan_id, kind):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
kind_key = (kind or "").strip().lower()
if kind_key not in self.kind_map:
return HttpResponseBadRequest("Invalid artifact kind")
model, label = self.kind_map[kind_key]
rows = model.objects.filter(user=request.user, plan=plan)
delete_count = rows.count()
if delete_count:
rows.delete()
notice_message = f"Deleted {delete_count} {label}."
notice_level = "success"
else:
notice_message = f"No {label} to delete."
notice_level = "info"
tab = _sanitize_active_tab(
request.POST.get("active_tab"),
default=("corrections" if kind_key == "correction" else "plan_board"),
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=notice_message,
notice_level=notice_level,
active_tab=tab,
),
)
class AIWorkspaceEngageShare(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id, plan_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
source_ref = (request.POST.get("source_ref") or "").strip()
share_target = (request.POST.get("share_target") or "self").strip()
framing = (request.POST.get("framing") or "dont_change").strip()
context_note = (request.POST.get("context_note") or "").strip()
action = (request.POST.get("action") or "preview").strip().lower()
force_send = _is_truthy(request.POST.get("force_send"))
engage_form = {
"source_ref": source_ref,
"share_target": share_target,
"framing": framing,
"context_note": context_note,
}
active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="engage")
if ":" not in source_ref:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Select a source item to engage.",
notice_level="warning",
engage_form=engage_form,
active_tab=active_tab,
),
)
source_kind, source_id = source_ref.split(":", 1)
source_kind = source_kind.strip().lower()
source_id = source_id.strip()
model_map = {
"rule": PatternMitigationRule,
"game": PatternMitigationGame,
"correction": PatternMitigationCorrection,
}
if source_kind not in model_map:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Invalid source type for engage.",
notice_level="danger",
engage_form=engage_form,
active_tab=active_tab,
),
)
source_obj = get_object_or_404(
model_map[source_kind],
id=source_id,
user=request.user,
plan=plan,
)
payload = _build_engage_payload(
source_obj=source_obj,
source_kind=source_kind,
share_target=share_target,
framing=framing,
context_note=context_note,
owner_name=(
request.user.first_name
or request.user.get_full_name().strip()
or request.user.username
or "You"
),
recipient_name=person.name or "Other",
)
engage_preview = payload["preview"]
outbound_text = payload["outbound"]
share_target = payload["share_target"]
if action == "preview":
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
engage_preview=engage_preview,
engage_preview_flash=True,
engage_form=engage_form,
active_tab=active_tab,
),
)
if action == "send":
send_state = _get_send_state(request.user, person)
if not send_state["can_send"] and not force_send:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=f"Send blocked. {send_state['text']}",
notice_level="warning",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
identifier = _resolve_person_identifier(request.user, person)
if identifier is None:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="No recipient identifier found.",
notice_level="danger",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
try:
ts = async_to_sync(identifier.send)(outbound_text)
except Exception as exc:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=f"Send failed: {exc}",
notice_level="danger",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
session, _ = ChatSession.objects.get_or_create(
user=request.user,
identifier=identifier,
)
sent_ts = int(ts) if ts else int(dj_timezone.now().timestamp() * 1000)
Message.objects.create(
user=request.user,
session=session,
custom_author="BOT",
sender_uuid="",
text=outbound_text,
ts=sent_ts,
)
notice = "Shared via engage."
if force_send and not send_state["can_send"]:
notice = "Shared via engage with override."
response = render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=notice,
notice_level="success",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
response["HX-Trigger"] = json.dumps(
{
"gia-message-sent": {
"person_id": str(person.id),
"ts": sent_ts,
"text": outbound_text,
"author": "BOT",
}
}
)
return response
if action == "queue":
identifier = _resolve_person_identifier(request.user, person)
if identifier is None:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="No recipient identifier found.",
notice_level="danger",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
manipulation = _get_queue_manipulation(request.user, person)
if manipulation is None:
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="No enabled manipulation found for this recipient. Queue entry not created.",
notice_level="warning",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
session, _ = ChatSession.objects.get_or_create(
user=request.user,
identifier=identifier,
)
QueuedMessage.objects.create(
user=request.user,
session=session,
manipulation=manipulation,
ts=int(dj_timezone.now().timestamp() * 1000),
sender_uuid="",
text=outbound_text,
custom_author="BOT",
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Engage text added to queue.",
notice_level="success",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Unknown engage action.",
notice_level="warning",
engage_preview=engage_preview,
engage_form=engage_form,
active_tab=active_tab,
),
)
class AIWorkspaceAutoSettings(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id, plan_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
auto_settings = _get_or_create_auto_settings(request.user, plan.conversation)
auto_settings.enabled = _is_truthy(request.POST.get("enabled"))
auto_settings.auto_pattern_recognition = _is_truthy(
request.POST.get("auto_pattern_recognition")
)
auto_settings.auto_create_mitigation = _is_truthy(
request.POST.get("auto_create_mitigation")
)
auto_settings.auto_create_corrections = _is_truthy(
request.POST.get("auto_create_corrections")
)
auto_settings.auto_notify_enabled = _is_truthy(
request.POST.get("auto_notify_enabled")
)
auto_settings.ntfy_topic_override = (
(request.POST.get("ntfy_topic_override") or "").strip() or None
)
auto_settings.ntfy_url_override = (
(request.POST.get("ntfy_url_override") or "").strip() or None
)
try:
auto_settings.sample_message_window = max(
10, min(int(request.POST.get("sample_message_window") or 40), 200)
)
except Exception:
auto_settings.sample_message_window = 40
try:
auto_settings.check_cooldown_seconds = max(
0, min(int(request.POST.get("check_cooldown_seconds") or 300), 86400)
)
except Exception:
auto_settings.check_cooldown_seconds = 300
auto_settings.save()
action = (request.POST.get("action") or "save").strip().lower()
if action == "run_now":
result = _run_auto_analysis_for_plan(
user=request.user,
person=person,
conversation=plan.conversation,
plan=plan,
auto_settings=auto_settings,
trigger="manual",
)
notice_message = result["summary"]
notice_level = "success" if result.get("ran") else "info"
else:
notice_message = "Automation settings saved."
notice_level = "success"
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message=notice_message,
notice_level=notice_level,
active_tab="auto",
auto_settings=auto_settings,
),
)
class AIWorkspaceUpdateFundamentals(LoginRequiredMixin, View):
allowed_types = {"widget"}
def post(self, request, type, person_id, plan_id):
if type not in self.allowed_types:
return HttpResponseBadRequest("Invalid type specified")
person = get_object_or_404(Person, pk=person_id, user=request.user)
plan = get_object_or_404(PatternMitigationPlan, id=plan_id, user=request.user)
fundamentals_text = request.POST.get("fundamentals_text") or ""
active_tab = _sanitize_active_tab(request.POST.get("active_tab"), default="fundamentals")
plan.fundamental_items = _parse_fundamentals(fundamentals_text)
plan.save(update_fields=["fundamental_items", "updated_at"])
return render(
request,
"partials/ai-workspace-mitigation-panel.html",
_mitigation_panel_context(
person=person,
plan=plan,
notice_message="Fundamentals saved.",
notice_level="success",
active_tab=active_tab,
),
)