Improve and condense related controls

This commit is contained in:
2026-02-15 22:11:17 +00:00
parent ae3365e165
commit 981ee56de7
18 changed files with 1340 additions and 209 deletions

View File

@@ -24,6 +24,7 @@ from core.models import (
AI,
ChatSession,
Message,
MessageEvent,
PatternMitigationPlan,
Person,
PersonIdentifier,
@@ -46,6 +47,12 @@ IMAGE_EXTENSIONS = (
".avif",
".svg",
)
EMPTY_TEXT_VALUES = {
"",
"[No Body]",
"[no body]",
"(no text)",
}
def _uniq_ordered(values):
@@ -144,6 +151,122 @@ def _image_urls_from_text(text_value: str) -> list[str]:
return []
def _looks_like_image_name(name_value: str) -> bool:
value = str(name_value or "").strip().lower()
return bool(value) and value.endswith(IMAGE_EXTENSIONS)
def _extract_attachment_image_urls(blob) -> list[str]:
urls = []
if isinstance(blob, str):
normalized = _clean_url(blob)
if normalized and _looks_like_image_url(normalized):
urls.append(normalized)
return urls
if isinstance(blob, dict):
content_type = str(
blob.get("content_type")
or blob.get("contentType")
or blob.get("mime_type")
or blob.get("mimetype")
or ""
).strip().lower()
filename = str(blob.get("filename") or blob.get("fileName") or "").strip()
image_hint = content_type.startswith("image/") or _looks_like_image_name(filename)
for key in ("url", "source_url", "download_url", "proxy_url", "href", "uri"):
normalized = _clean_url(blob.get(key))
if not normalized:
continue
if image_hint or _looks_like_image_url(normalized):
urls.append(normalized)
nested = blob.get("attachments")
if isinstance(nested, list):
for row in nested:
urls.extend(_extract_attachment_image_urls(row))
return urls
if isinstance(blob, list):
for row in blob:
urls.extend(_extract_attachment_image_urls(row))
return urls
def _attachment_image_urls_by_message(messages):
rows = list(messages or [])
if not rows:
return {}
by_message = {}
unresolved = []
for msg in rows:
text_value = str(msg.text or "").strip()
if text_value and text_value not in EMPTY_TEXT_VALUES:
continue
unresolved.append(msg)
if not unresolved:
return by_message
legacy_ids = [str(msg.id) for msg in unresolved]
linked_events = MessageEvent.objects.filter(
user=rows[0].user,
raw_payload_ref__legacy_message_id__in=legacy_ids,
).order_by("ts")
for event in linked_events:
legacy_id = str((event.raw_payload_ref or {}).get("legacy_message_id") or "").strip()
if not legacy_id:
continue
urls = _uniq_ordered(
_extract_attachment_image_urls(event.attachments)
+ _extract_attachment_image_urls(event.raw_payload_ref or {})
)
if urls:
by_message.setdefault(legacy_id, urls)
missing = [msg for msg in unresolved if str(msg.id) not in by_message]
if not missing:
return by_message
min_ts = min(int(msg.ts or 0) for msg in missing) - 3000
max_ts = max(int(msg.ts or 0) for msg in missing) + 3000
fallback_events = (
MessageEvent.objects.filter(
user=rows[0].user,
source_system="xmpp",
ts__gte=min_ts,
ts__lte=max_ts,
)
.exclude(attachments=[])
.order_by("ts")
)
fallback_list = list(fallback_events)
for msg in missing:
if str(msg.id) in by_message:
continue
msg_ts = int(msg.ts or 0)
candidates = [
event
for event in fallback_list
if abs(int(event.ts or 0) - msg_ts) <= 3000
]
if not candidates:
continue
event = candidates[0]
urls = _uniq_ordered(
_extract_attachment_image_urls(event.attachments)
+ _extract_attachment_image_urls(event.raw_payload_ref or {})
)
if urls:
by_message[str(msg.id)] = urls
return by_message
def _serialize_message(msg: Message) -> dict:
text_value = str(msg.text or "")
image_urls = _image_urls_from_text(text_value)
@@ -448,6 +571,21 @@ def _serialize_messages_with_artifacts(
):
rows = list(messages or [])
serialized = [_serialize_message(msg) for msg in rows]
attachment_images = _attachment_image_urls_by_message(rows)
for idx, msg in enumerate(rows):
item = serialized[idx]
if item.get("image_urls"):
continue
recovered = _uniq_ordered(attachment_images.get(str(msg.id)) or [])
if not recovered:
continue
item["image_urls"] = recovered
item["image_url"] = recovered[0]
text_value = str(msg.text or "").strip()
if text_value in EMPTY_TEXT_VALUES:
item["hide_text"] = True
item["display_text"] = ""
for item in serialized:
item["gap_fragments"] = []
item["metric_fragments"] = []
@@ -815,6 +953,7 @@ def _quick_insights_rows(conversation):
{
"key": "stability_score",
"label": "Stability Score",
"doc_slug": "stability_score",
"field": "stability_score",
"source": "conversation",
"kind": "score",
@@ -824,6 +963,7 @@ def _quick_insights_rows(conversation):
{
"key": "stability_confidence",
"label": "Stability Confidence",
"doc_slug": "stability_confidence",
"field": "stability_confidence",
"source": "conversation",
"kind": "confidence",
@@ -833,6 +973,7 @@ def _quick_insights_rows(conversation):
{
"key": "sample_messages",
"label": "Sample Messages",
"doc_slug": "sample_messages",
"field": "stability_sample_messages",
"source": "conversation",
"kind": "count",
@@ -842,6 +983,7 @@ def _quick_insights_rows(conversation):
{
"key": "sample_days",
"label": "Sample Days",
"doc_slug": "sample_days",
"field": "stability_sample_days",
"source": "conversation",
"kind": "count",
@@ -851,6 +993,7 @@ def _quick_insights_rows(conversation):
{
"key": "commitment_inbound",
"label": "Commit In",
"doc_slug": "commitment_inbound",
"field": "commitment_inbound_score",
"source": "conversation",
"kind": "score",
@@ -860,6 +1003,7 @@ def _quick_insights_rows(conversation):
{
"key": "commitment_outbound",
"label": "Commit Out",
"doc_slug": "commitment_outbound",
"field": "commitment_outbound_score",
"source": "conversation",
"kind": "score",
@@ -869,6 +1013,7 @@ def _quick_insights_rows(conversation):
{
"key": "commitment_confidence",
"label": "Commit Confidence",
"doc_slug": "commitment_confidence",
"field": "commitment_confidence",
"source": "conversation",
"kind": "confidence",
@@ -878,6 +1023,7 @@ def _quick_insights_rows(conversation):
{
"key": "reciprocity",
"label": "Reciprocity",
"doc_slug": "reciprocity_score",
"field": "reciprocity_score",
"source": "snapshot",
"kind": "score",
@@ -887,6 +1033,7 @@ def _quick_insights_rows(conversation):
{
"key": "continuity",
"label": "Continuity",
"doc_slug": "continuity_score",
"field": "continuity_score",
"source": "snapshot",
"kind": "score",
@@ -896,6 +1043,7 @@ def _quick_insights_rows(conversation):
{
"key": "response",
"label": "Response",
"doc_slug": "response_score",
"field": "response_score",
"source": "snapshot",
"kind": "score",
@@ -905,6 +1053,7 @@ def _quick_insights_rows(conversation):
{
"key": "volatility",
"label": "Volatility",
"doc_slug": "volatility_score",
"field": "volatility_score",
"source": "snapshot",
"kind": "score",
@@ -914,6 +1063,7 @@ def _quick_insights_rows(conversation):
{
"key": "inbound_messages",
"label": "Inbound Messages",
"doc_slug": "inbound_messages",
"field": "inbound_messages",
"source": "snapshot",
"kind": "count",
@@ -923,6 +1073,7 @@ def _quick_insights_rows(conversation):
{
"key": "outbound_messages",
"label": "Outbound Messages",
"doc_slug": "outbound_messages",
"field": "outbound_messages",
"source": "snapshot",
"kind": "count",
@@ -933,6 +1084,7 @@ def _quick_insights_rows(conversation):
rows = []
for spec in metric_specs:
field_name = spec["field"]
metric_copy = _metric_copy(spec.get("doc_slug") or spec["key"], spec["label"])
if spec["source"] == "conversation":
current = getattr(conversation, field_name, None)
previous_value = getattr(previous, field_name, None) if previous else None
@@ -964,6 +1116,8 @@ def _quick_insights_rows(conversation):
"point_count": point_count,
"trend": trend,
"emotion": emotion,
"calculation": metric_copy.get("calculation") or "",
"psychology": metric_copy.get("psychology") or "",
}
)
return {
@@ -1092,14 +1246,22 @@ def _engage_source_from_ref(plan, source_ref):
def _context_base(user, service, identifier, person):
person_identifier = None
if person is not None:
person_identifier = (
PersonIdentifier.objects.filter(
if identifier:
person_identifier = PersonIdentifier.objects.filter(
user=user,
person=person,
service=service,
identifier=identifier,
).first()
or PersonIdentifier.objects.filter(user=user, person=person).first()
)
if person_identifier is None:
person_identifier = (
PersonIdentifier.objects.filter(
user=user,
person=person,
service=service,
).first()
or PersonIdentifier.objects.filter(user=user, person=person).first()
)
if person_identifier is None and identifier:
person_identifier = PersonIdentifier.objects.filter(
user=user,
@@ -1190,7 +1352,9 @@ def _panel_context(
)
ws_url = f"/ws/compose/thread/?{urlencode({'token': ws_token})}"
unique_raw = f"{base['service']}|{base['identifier']}|{request.user.id}"
unique_raw = (
f"{base['service']}|{base['identifier']}|{request.user.id}|{time.time_ns()}"
)
unique = hashlib.sha1(unique_raw.encode("utf-8")).hexdigest()[:12]
typing_state = get_person_typing_state(
user_id=request.user.id,
@@ -1228,6 +1392,14 @@ def _panel_context(
if base["person"]
else reverse("ai_workspace")
),
"ai_workspace_widget_url": (
(
f"{reverse('ai_workspace_person', kwargs={'type': 'widget', 'person_id': base['person'].id})}"
f"?{urlencode({'limit': limit})}"
)
if base["person"]
else ""
),
"manual_icon_class": "fa-solid fa-paper-plane",
"panel_id": f"compose-panel-{unique}",
"typing_state_json": json.dumps(typing_state),
@@ -1635,9 +1807,21 @@ class ComposeQuickInsights(LoginRequiredMixin, View):
"thread": "",
"last_event": "",
"last_ai_run": "",
"workspace_created": "",
"snapshot_count": 0,
"workspace_created": "",
"snapshot_count": 0,
"platform_docs": _metric_copy("platform", "Platform"),
"state_docs": _metric_copy("stability_state", "Participant State"),
"thread_docs": _metric_copy("thread", "Thread"),
"snapshot_docs": {
"calculation": (
"Count of stored workspace metric snapshots for this person."
),
"psychology": (
"More points improve trend reliability; sparse points are "
"best treated as directional signals."
),
},
},
"rows": [],
"docs": [
"Quick Insights needs at least one workspace conversation snapshot.",
@@ -1673,6 +1857,18 @@ class ComposeQuickInsights(LoginRequiredMixin, View):
conversation.created_at
).strftime("%Y-%m-%d %H:%M"),
"snapshot_count": payload["snapshot_count"],
"platform_docs": _metric_copy("platform", "Platform"),
"state_docs": _metric_copy("stability_state", "Participant State"),
"thread_docs": _metric_copy("thread", "Thread"),
"snapshot_docs": {
"calculation": (
"Count of stored workspace metric snapshots for this person."
),
"psychology": (
"More points improve trend reliability; sparse points are "
"best treated as directional signals."
),
},
},
"rows": payload["rows"],
"docs": [