Implement more information displays
This commit is contained in:
@@ -629,6 +629,41 @@ def _compose_page_url_for_person(user, person):
|
||||
return f"{reverse('compose_page')}?{query}"
|
||||
|
||||
|
||||
def _message_rows_for_person(user, person, limit):
|
||||
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
|
||||
identifiers = set(
|
||||
PersonIdentifier.objects.filter(user=user, person=person).values_list(
|
||||
"identifier", flat=True
|
||||
)
|
||||
)
|
||||
messages = (
|
||||
Message.objects.filter(user=user, session__in=sessions)
|
||||
.select_related("session", "session__identifier")
|
||||
.order_by("-ts")[:limit]
|
||||
)
|
||||
|
||||
rows = []
|
||||
for message in reversed(list(messages)):
|
||||
rows.append(
|
||||
{
|
||||
"message": message,
|
||||
"direction": _infer_direction(message, identifiers),
|
||||
"ts_label": _format_unix_ms(message.ts),
|
||||
}
|
||||
)
|
||||
return rows
|
||||
|
||||
|
||||
def _recent_messages_for_person(user, person, limit):
|
||||
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
|
||||
messages = (
|
||||
Message.objects.filter(user=user, session__in=sessions)
|
||||
.select_related("session", "session__identifier")
|
||||
.order_by("-ts")[:limit]
|
||||
)
|
||||
return list(reversed(list(messages)))
|
||||
|
||||
|
||||
def _is_truthy(value):
|
||||
return str(value or "").strip().lower() in {"1", "true", "on", "yes"}
|
||||
|
||||
@@ -807,6 +842,274 @@ def _all_graph_payload(conversation):
|
||||
return graphs
|
||||
|
||||
|
||||
def _commitment_directionality_payload(conversation):
|
||||
latest_snapshot = conversation.metric_snapshots.first()
|
||||
inbound = conversation.commitment_inbound_score
|
||||
outbound = conversation.commitment_outbound_score
|
||||
confidence = conversation.commitment_confidence or 0.0
|
||||
|
||||
if inbound is None or outbound is None:
|
||||
return {
|
||||
"direction_key": "calibrating",
|
||||
"direction_label": "Calibrating",
|
||||
"magnitude": None,
|
||||
"delta": None,
|
||||
"confidence": confidence,
|
||||
"conclusion": (
|
||||
"Directionality cannot be inferred yet. Collect more exchanges to "
|
||||
"stabilize directional signal."
|
||||
),
|
||||
"factors": [],
|
||||
"graph_refs": [],
|
||||
}
|
||||
|
||||
delta = round(float(outbound) - float(inbound), 2)
|
||||
magnitude = round(abs(delta), 2)
|
||||
if magnitude < 4:
|
||||
direction_key = "balanced"
|
||||
direction_label = "Balanced"
|
||||
conclusion = (
|
||||
"Commitment appears symmetric. Keep current cadence and focus on "
|
||||
"maintaining clarity."
|
||||
)
|
||||
elif delta > 0:
|
||||
direction_key = "outbound"
|
||||
direction_label = "Outbound-Leaning"
|
||||
conclusion = (
|
||||
"You are carrying relatively more directional effort right now. "
|
||||
"Consider reducing over-functioning and asking for explicit reciprocity."
|
||||
)
|
||||
else:
|
||||
direction_key = "inbound"
|
||||
direction_label = "Inbound-Leaning"
|
||||
conclusion = (
|
||||
"The other party is carrying relatively more directional effort right now. "
|
||||
"Acknowledge this and match consistency to reduce asymmetry."
|
||||
)
|
||||
|
||||
graph_refs = [
|
||||
{"slug": "commitment_inbound", "title": "Commit In"},
|
||||
{"slug": "commitment_outbound", "title": "Commit Out"},
|
||||
{"slug": "inbound_response_score", "title": "Inbound Response Score"},
|
||||
{"slug": "outbound_response_score", "title": "Outbound Response Score"},
|
||||
{"slug": "balance_inbound_score", "title": "Inbound Balance Score"},
|
||||
{"slug": "balance_outbound_score", "title": "Outbound Balance Score"},
|
||||
{"slug": "commitment_confidence", "title": "Commit Confidence"},
|
||||
]
|
||||
factor_lookup = {
|
||||
"inbound_response_score": (
|
||||
latest_snapshot.inbound_response_score if latest_snapshot else None
|
||||
),
|
||||
"outbound_response_score": (
|
||||
latest_snapshot.outbound_response_score if latest_snapshot else None
|
||||
),
|
||||
"balance_inbound_score": (
|
||||
latest_snapshot.balance_inbound_score if latest_snapshot else None
|
||||
),
|
||||
"balance_outbound_score": (
|
||||
latest_snapshot.balance_outbound_score if latest_snapshot else None
|
||||
),
|
||||
"commitment_confidence": confidence,
|
||||
}
|
||||
factors = [
|
||||
{
|
||||
"title": "Inbound Response",
|
||||
"icon": "fa-solid fa-inbox",
|
||||
"weight": "60% of Commit In",
|
||||
"value": factor_lookup["inbound_response_score"],
|
||||
"slug": "inbound_response_score",
|
||||
},
|
||||
{
|
||||
"title": "Inbound Balance",
|
||||
"icon": "fa-solid fa-scale-balanced",
|
||||
"weight": "40% of Commit In",
|
||||
"value": factor_lookup["balance_inbound_score"],
|
||||
"slug": "balance_inbound_score",
|
||||
},
|
||||
{
|
||||
"title": "Outbound Response",
|
||||
"icon": "fa-solid fa-paper-plane",
|
||||
"weight": "60% of Commit Out",
|
||||
"value": factor_lookup["outbound_response_score"],
|
||||
"slug": "outbound_response_score",
|
||||
},
|
||||
{
|
||||
"title": "Outbound Balance",
|
||||
"icon": "fa-solid fa-arrows-left-right",
|
||||
"weight": "40% of Commit Out",
|
||||
"value": factor_lookup["balance_outbound_score"],
|
||||
"slug": "balance_outbound_score",
|
||||
},
|
||||
{
|
||||
"title": "Confidence",
|
||||
"icon": "fa-solid fa-shield-check",
|
||||
"weight": "Applies To Direction",
|
||||
"value": confidence,
|
||||
"slug": "commitment_confidence",
|
||||
},
|
||||
]
|
||||
return {
|
||||
"direction_key": direction_key,
|
||||
"direction_label": direction_label,
|
||||
"magnitude": magnitude,
|
||||
"delta": delta,
|
||||
"confidence": confidence,
|
||||
"conclusion": conclusion,
|
||||
"commit_in": inbound,
|
||||
"commit_out": outbound,
|
||||
"factors": factors,
|
||||
"graph_refs": graph_refs,
|
||||
}
|
||||
|
||||
|
||||
def _metric_pattern_context(conversation):
|
||||
latest_snapshot = conversation.metric_snapshots.first()
|
||||
directionality = _commitment_directionality_payload(conversation)
|
||||
confidence = conversation.stability_confidence or 0.0
|
||||
risk_signals = []
|
||||
|
||||
state_key = str(conversation.stability_state or "").lower()
|
||||
if state_key == WorkspaceConversation.StabilityState.FRAGILE:
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "stability_fragile",
|
||||
"label": "Fragile Stability",
|
||||
"severity": "high",
|
||||
"explanation": (
|
||||
"Stability is in fragile range. Bias corrections toward "
|
||||
"de-escalation and explicit repair loops."
|
||||
),
|
||||
}
|
||||
)
|
||||
elif state_key == WorkspaceConversation.StabilityState.WATCH:
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "stability_watch",
|
||||
"label": "Watch Stability",
|
||||
"severity": "medium",
|
||||
"explanation": (
|
||||
"Stability is watch-range. Reinforce concise requests and "
|
||||
"misinterpretation checks before escalation."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
if confidence < 0.25:
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "low_confidence",
|
||||
"label": "Low Confidence Window",
|
||||
"severity": "low",
|
||||
"explanation": (
|
||||
"Confidence is low. Prefer reversible, low-risk corrections "
|
||||
"that can be validated quickly."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
magnitude = directionality.get("magnitude")
|
||||
if magnitude is not None:
|
||||
severity = "high" if magnitude >= 15 else "medium" if magnitude >= 8 else None
|
||||
if severity:
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "commitment_asymmetry",
|
||||
"label": "Commitment Asymmetry",
|
||||
"severity": severity,
|
||||
"explanation": (
|
||||
"Directional commitment is asymmetric. Add corrections "
|
||||
"that restore reciprocity and explicit confirmation."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
if latest_snapshot:
|
||||
if (
|
||||
latest_snapshot.volatility_score is not None
|
||||
and latest_snapshot.volatility_score >= 70
|
||||
):
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "volatility_spike",
|
||||
"label": "Volatility Spike",
|
||||
"severity": "medium",
|
||||
"explanation": (
|
||||
"Volatility is elevated. Use short, bounded wording to "
|
||||
"reduce sudden interaction swings."
|
||||
),
|
||||
}
|
||||
)
|
||||
if (
|
||||
latest_snapshot.reciprocity_score is not None
|
||||
and latest_snapshot.reciprocity_score <= 35
|
||||
):
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "reciprocity_drop",
|
||||
"label": "Reciprocity Drop",
|
||||
"severity": "medium",
|
||||
"explanation": (
|
||||
"Reciprocity is low. Add corrections that request and "
|
||||
"acknowledge balanced turn-taking."
|
||||
),
|
||||
}
|
||||
)
|
||||
if (
|
||||
latest_snapshot.response_score is not None
|
||||
and latest_snapshot.response_score <= 35
|
||||
):
|
||||
risk_signals.append(
|
||||
{
|
||||
"key": "response_drag",
|
||||
"label": "Response Drag",
|
||||
"severity": "medium",
|
||||
"explanation": (
|
||||
"Response pace is slow. Prefer corrections that set timing "
|
||||
"expectations and explicit follow-up windows."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
state_label = (
|
||||
conversation.get_stability_state_display()
|
||||
if hasattr(conversation, "get_stability_state_display")
|
||||
else str(conversation.stability_state or "")
|
||||
)
|
||||
return {
|
||||
"stability": {
|
||||
"state": state_label,
|
||||
"score": conversation.stability_score,
|
||||
"confidence": confidence,
|
||||
"sample_messages": conversation.stability_sample_messages,
|
||||
"sample_days": conversation.stability_sample_days,
|
||||
"computed_at": conversation.stability_last_computed_at,
|
||||
},
|
||||
"commitment": {
|
||||
"inbound": conversation.commitment_inbound_score,
|
||||
"outbound": conversation.commitment_outbound_score,
|
||||
"confidence": conversation.commitment_confidence,
|
||||
"computed_at": conversation.commitment_last_computed_at,
|
||||
"directionality": directionality,
|
||||
},
|
||||
"components": (
|
||||
{
|
||||
"reciprocity": latest_snapshot.reciprocity_score,
|
||||
"continuity": latest_snapshot.continuity_score,
|
||||
"response": latest_snapshot.response_score,
|
||||
"volatility": latest_snapshot.volatility_score,
|
||||
"inbound_response": latest_snapshot.inbound_response_score,
|
||||
"outbound_response": latest_snapshot.outbound_response_score,
|
||||
"balance_inbound": latest_snapshot.balance_inbound_score,
|
||||
"balance_outbound": latest_snapshot.balance_outbound_score,
|
||||
"source_event_ts": latest_snapshot.source_event_ts,
|
||||
}
|
||||
if latest_snapshot
|
||||
else {}
|
||||
),
|
||||
"risk_signals": risk_signals[:8],
|
||||
}
|
||||
|
||||
|
||||
def _store_metric_snapshot(conversation, payload):
|
||||
compare_keys = [
|
||||
"source_event_ts",
|
||||
@@ -1476,6 +1779,7 @@ def _build_mitigation_artifacts(
|
||||
inspiration,
|
||||
fundamentals,
|
||||
output_profile,
|
||||
metric_context=None,
|
||||
):
|
||||
fallback = _default_artifacts_from_patterns(source_text, person, output_profile)
|
||||
|
||||
@@ -1489,11 +1793,15 @@ def _build_mitigation_artifacts(
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You design practical relationship mitigation protocols. "
|
||||
"Return strict JSON only with keys: title, objective, fundamental_items, rules, games. "
|
||||
"Return strict JSON only with keys: title, objective, "
|
||||
"fundamental_items, rules, games, corrections. "
|
||||
"Each rule item must have title and content. "
|
||||
"Each game item must have title and instructions. "
|
||||
"Each correction item must have title and clarification. "
|
||||
"If mode is auto, choose strongest artifacts. If mode is guided, strongly follow inspiration. "
|
||||
"Output profile controls emphasis: framework (balanced), rules (rules-first), games (games-first)."
|
||||
"Use provided metrics as risk context to tighten corrections. "
|
||||
"Output profile controls emphasis: framework (balanced), "
|
||||
"rules (rules-first), games (games-first)."
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -1504,6 +1812,8 @@ def _build_mitigation_artifacts(
|
||||
f"Output profile: {output_profile}\n"
|
||||
f"User inspiration: {inspiration or 'None'}\n"
|
||||
f"Fundamental items (pre-agreed): {json.dumps(fundamentals)}\n\n"
|
||||
"Metric context:\n"
|
||||
f"{json.dumps(metric_context or {}, ensure_ascii=False, default=str)}\n\n"
|
||||
f"Pattern analysis:\n{source_text}"
|
||||
),
|
||||
},
|
||||
@@ -1551,6 +1861,28 @@ def _build_mitigation_artifacts(
|
||||
if title_i and instructions_i:
|
||||
games.append({"title": title_i, "instructions": instructions_i})
|
||||
|
||||
raw_corrections = parsed.get("corrections")
|
||||
corrections = []
|
||||
if isinstance(raw_corrections, list):
|
||||
for item in raw_corrections:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
title_i = _normalize_correction_title(
|
||||
item.get("title") or "", fallback="Correction"
|
||||
)
|
||||
clarification_i = str(
|
||||
item.get("clarification") or item.get("content") or ""
|
||||
).strip()
|
||||
source_phrase_i = str(item.get("source_phrase") or "").strip()
|
||||
if title_i and clarification_i:
|
||||
corrections.append(
|
||||
{
|
||||
"title": title_i[:255],
|
||||
"clarification": clarification_i[:2000],
|
||||
"source_phrase": source_phrase_i[:1000],
|
||||
}
|
||||
)
|
||||
|
||||
if not rules:
|
||||
rules = fallback["rules"]
|
||||
if not games:
|
||||
@@ -1563,7 +1895,7 @@ def _build_mitigation_artifacts(
|
||||
"fundamental_items": merged_fundamentals,
|
||||
"rules": rules,
|
||||
"games": games,
|
||||
"corrections": [],
|
||||
"corrections": _normalize_violation_items(corrections),
|
||||
}
|
||||
|
||||
|
||||
@@ -2342,6 +2674,74 @@ def _get_or_create_auto_settings(user, conversation):
|
||||
return settings_obj
|
||||
|
||||
|
||||
def _metric_guided_artifact_candidates(plan, metric_context):
|
||||
signals = list((metric_context or {}).get("risk_signals") or [])
|
||||
if not signals:
|
||||
return []
|
||||
|
||||
artifacts = []
|
||||
for rule in plan.rules.filter(enabled=True).order_by("created_at")[:10]:
|
||||
artifacts.append(
|
||||
{
|
||||
"kind": "rule",
|
||||
"title": str(rule.title or "").strip(),
|
||||
"body": str(rule.content or "").strip(),
|
||||
}
|
||||
)
|
||||
for game in plan.games.filter(enabled=True).order_by("created_at")[:10]:
|
||||
artifacts.append(
|
||||
{
|
||||
"kind": "game",
|
||||
"title": str(game.title or "").strip(),
|
||||
"body": str(game.instructions or "").strip(),
|
||||
}
|
||||
)
|
||||
if not artifacts:
|
||||
for item in (plan.fundamental_items or [])[:10]:
|
||||
text = str(item or "").strip()
|
||||
if not text:
|
||||
continue
|
||||
artifacts.append(
|
||||
{
|
||||
"kind": "fundamental",
|
||||
"title": text[:100],
|
||||
"body": text,
|
||||
}
|
||||
)
|
||||
if not artifacts:
|
||||
return []
|
||||
|
||||
out = []
|
||||
for idx, signal in enumerate(signals[:8]):
|
||||
artifact = artifacts[idx % len(artifacts)]
|
||||
kind_label = {
|
||||
"rule": "Rule",
|
||||
"game": "Game",
|
||||
"fundamental": "Fundamental",
|
||||
}.get(artifact["kind"], "Artifact")
|
||||
title = _normalize_correction_title(
|
||||
f"{signal.get('label') or 'Metric Signal'} Safeguard"
|
||||
)
|
||||
clarification = (
|
||||
f"{str(signal.get('explanation') or '').strip()} "
|
||||
f"Apply {kind_label.lower()} '{artifact['title']}' in the next exchange: "
|
||||
f"{artifact['body']}"
|
||||
).strip()
|
||||
source_phrase = (
|
||||
f"Metric signal: {signal.get('label') or 'Metric Signal'}; "
|
||||
f"Artifact: {kind_label} '{artifact['title']}'"
|
||||
)
|
||||
out.append(
|
||||
{
|
||||
"title": title,
|
||||
"source_phrase": source_phrase[:1000],
|
||||
"clarification": clarification[:2000],
|
||||
"severity": str(signal.get("severity") or "medium"),
|
||||
}
|
||||
)
|
||||
return _normalize_violation_items(out)
|
||||
|
||||
|
||||
def _detect_violation_candidates(plan, recent_rows):
|
||||
candidates = []
|
||||
for row in recent_rows:
|
||||
@@ -2442,10 +2842,10 @@ def _existing_correction_signatures(plan, exclude_id=None):
|
||||
return signatures
|
||||
|
||||
|
||||
def _ai_detect_violations(user, plan, person, recent_rows):
|
||||
def _ai_detect_violations(user, plan, person, recent_rows, metric_context=None):
|
||||
ai_obj = AI.objects.filter(user=user).first()
|
||||
if ai_obj is None:
|
||||
return []
|
||||
return {"violations": [], "artifact_corrections": []}
|
||||
|
||||
rules_payload = [
|
||||
{"id": str(rule.id), "title": rule.title, "content": rule.content}
|
||||
@@ -2477,6 +2877,7 @@ def _ai_detect_violations(user, plan, person, recent_rows):
|
||||
"games": games_payload,
|
||||
"corrections": corrections_payload,
|
||||
},
|
||||
"metrics": metric_context or {},
|
||||
"recent_messages": recent_rows,
|
||||
"output_schema": {
|
||||
"violations": [
|
||||
@@ -2486,6 +2887,14 @@ def _ai_detect_violations(user, plan, person, recent_rows):
|
||||
"clarification": "correction-style guidance",
|
||||
"severity": "low|medium|high",
|
||||
}
|
||||
],
|
||||
"artifact_corrections": [
|
||||
{
|
||||
"title": "short string",
|
||||
"source_phrase": "artifact reference + metric rationale",
|
||||
"clarification": "proactive correction mapped to an artifact",
|
||||
"severity": "low|medium|high",
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
@@ -2494,22 +2903,31 @@ def _ai_detect_violations(user, plan, person, recent_rows):
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You detect violations of mitigation patterns in a conversation. "
|
||||
"Use recent_messages for direct violations. "
|
||||
"Use plan artifacts plus metrics for proactive artifact_corrections. "
|
||||
"Return strict JSON only. No markdown. No prose wrapper. "
|
||||
"Use only schema keys requested."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": json.dumps(source_payload, ensure_ascii=False),
|
||||
"content": json.dumps(source_payload, ensure_ascii=False, default=str),
|
||||
},
|
||||
]
|
||||
try:
|
||||
raw = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj)
|
||||
except Exception:
|
||||
return []
|
||||
return {"violations": [], "artifact_corrections": []}
|
||||
|
||||
parsed = _extract_json_object(raw) or {}
|
||||
return _normalize_violation_items(parsed.get("violations") or [])
|
||||
return {
|
||||
"violations": _normalize_violation_items(parsed.get("violations") or []),
|
||||
"artifact_corrections": _normalize_violation_items(
|
||||
parsed.get("artifact_corrections")
|
||||
or parsed.get("artifact_based_corrections")
|
||||
or []
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _maybe_send_auto_notification(user, auto_settings, title, body):
|
||||
@@ -2563,6 +2981,8 @@ def _run_auto_analysis_for_plan(
|
||||
}
|
||||
|
||||
limit = max(10, min(int(auto_settings.sample_message_window or 40), 200))
|
||||
_refresh_conversation_stability(conversation, user, person)
|
||||
metric_context = _metric_pattern_context(conversation)
|
||||
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
|
||||
messages = (
|
||||
Message.objects.filter(user=user, session__in=sessions)
|
||||
@@ -2609,9 +3029,25 @@ def _run_auto_analysis_for_plan(
|
||||
"notified": False,
|
||||
}
|
||||
|
||||
ai_candidates = _ai_detect_violations(user, plan, person, recent_rows)
|
||||
ai_detection = _ai_detect_violations(
|
||||
user,
|
||||
plan,
|
||||
person,
|
||||
recent_rows,
|
||||
metric_context=metric_context,
|
||||
)
|
||||
ai_candidates = list(ai_detection.get("violations") or [])
|
||||
artifact_candidates_ai = list(ai_detection.get("artifact_corrections") or [])
|
||||
heuristic_candidates = _detect_violation_candidates(plan, recent_rows)
|
||||
violations = _normalize_violation_items(ai_candidates + heuristic_candidates)
|
||||
artifact_candidates_metric = _metric_guided_artifact_candidates(
|
||||
plan, metric_context
|
||||
)
|
||||
violations = _normalize_violation_items(
|
||||
ai_candidates
|
||||
+ heuristic_candidates
|
||||
+ artifact_candidates_ai
|
||||
+ artifact_candidates_metric
|
||||
)
|
||||
|
||||
created_corrections = 0
|
||||
if auto_settings.auto_create_corrections and violations:
|
||||
@@ -2847,40 +3283,6 @@ class AIWorkspaceContactsWidget(LoginRequiredMixin, View):
|
||||
class AIWorkspacePersonWidget(LoginRequiredMixin, View):
|
||||
allowed_types = {"widget"}
|
||||
|
||||
def _message_rows(self, user, person, limit):
|
||||
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
|
||||
identifiers = set(
|
||||
PersonIdentifier.objects.filter(user=user, person=person).values_list(
|
||||
"identifier", flat=True
|
||||
)
|
||||
)
|
||||
messages = (
|
||||
Message.objects.filter(user=user, session__in=sessions)
|
||||
.select_related("session", "session__identifier")
|
||||
.order_by("-ts")[:limit]
|
||||
)
|
||||
|
||||
rows = []
|
||||
for message in reversed(list(messages)):
|
||||
inferred_direction = _infer_direction(message, identifiers)
|
||||
rows.append(
|
||||
{
|
||||
"message": message,
|
||||
"direction": inferred_direction,
|
||||
"ts_label": _format_unix_ms(message.ts),
|
||||
}
|
||||
)
|
||||
return rows
|
||||
|
||||
def _recent_messages(self, user, person, limit):
|
||||
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
|
||||
messages = (
|
||||
Message.objects.filter(user=user, session__in=sessions)
|
||||
.select_related("session", "session__identifier")
|
||||
.order_by("-ts")[:limit]
|
||||
)
|
||||
return list(reversed(list(messages)))
|
||||
|
||||
def get(self, request, type, person_id):
|
||||
if type not in self.allowed_types:
|
||||
return HttpResponseBadRequest("Invalid type specified")
|
||||
@@ -2894,14 +3296,13 @@ class AIWorkspacePersonWidget(LoginRequiredMixin, View):
|
||||
limit = max(5, min(limit, 200))
|
||||
|
||||
context = {
|
||||
"title": f"{person.name} Timeline",
|
||||
"title": f"{person.name} AI",
|
||||
"unique": f"ai-person-{person.id}",
|
||||
"window_content": "partials/ai-workspace-person-widget.html",
|
||||
"widget_options": 'gs-w="7" gs-h="16" gs-x="0" gs-y="0" gs-min-w="4"',
|
||||
"widget_options": 'gs-w="8" gs-h="11" gs-x="4" gs-y="0" gs-min-w="4"',
|
||||
"person": person,
|
||||
"workspace_conversation": conversation,
|
||||
"limit": limit,
|
||||
"message_rows": self._message_rows(request.user, person, limit),
|
||||
"ai_operations": [
|
||||
("artifacts", "Plan"),
|
||||
("summarise", "Summary"),
|
||||
@@ -2915,6 +3316,32 @@ class AIWorkspacePersonWidget(LoginRequiredMixin, View):
|
||||
return render(request, "mixins/wm/widget.html", context)
|
||||
|
||||
|
||||
class AIWorkspacePersonTimelineWidget(LoginRequiredMixin, View):
|
||||
allowed_types = {"widget"}
|
||||
|
||||
def get(self, request, type, person_id):
|
||||
if type not in self.allowed_types:
|
||||
return HttpResponseBadRequest("Invalid type specified")
|
||||
|
||||
person = get_object_or_404(Person, pk=person_id, user=request.user)
|
||||
try:
|
||||
limit = int(request.GET.get("limit", 20))
|
||||
except (TypeError, ValueError):
|
||||
limit = 20
|
||||
limit = max(5, min(limit, 200))
|
||||
|
||||
context = {
|
||||
"title": f"{person.name} Timeline",
|
||||
"unique": f"ai-timeline-{person.id}",
|
||||
"window_content": "partials/ai-workspace-person-timeline-widget.html",
|
||||
"widget_options": 'gs-w="8" gs-h="10" gs-x="4" gs-y="11" gs-min-w="4"',
|
||||
"person": person,
|
||||
"limit": limit,
|
||||
"message_rows": _message_rows_for_person(request.user, person, limit),
|
||||
}
|
||||
return render(request, "mixins/wm/widget.html", context)
|
||||
|
||||
|
||||
class AIWorkspaceInsightDetail(LoginRequiredMixin, View):
|
||||
allowed_types = {"page", "widget"}
|
||||
|
||||
@@ -2953,6 +3380,10 @@ class AIWorkspaceInsightDetail(LoginRequiredMixin, View):
|
||||
"ai_workspace_insight_help",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"information_url": reverse(
|
||||
"ai_workspace_information",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"workspace_url": f"{reverse('ai_workspace')}?person={person.id}",
|
||||
}
|
||||
return render(request, "pages/ai-workspace-insight-detail.html", context)
|
||||
@@ -2976,11 +3407,62 @@ class AIWorkspaceInsightGraphs(LoginRequiredMixin, View):
|
||||
"ai_workspace_insight_help",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"information_url": reverse(
|
||||
"ai_workspace_information",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"workspace_url": f"{reverse('ai_workspace')}?person={person.id}",
|
||||
}
|
||||
return render(request, "pages/ai-workspace-insight-graphs.html", context)
|
||||
|
||||
|
||||
class AIWorkspaceInformation(LoginRequiredMixin, View):
|
||||
allowed_types = {"page", "widget"}
|
||||
|
||||
def get(self, request, type, person_id):
|
||||
if type not in self.allowed_types:
|
||||
return HttpResponseBadRequest("Invalid type specified")
|
||||
|
||||
person = get_object_or_404(Person, pk=person_id, user=request.user)
|
||||
conversation = _conversation_for_person(request.user, person)
|
||||
latest_snapshot = conversation.metric_snapshots.first()
|
||||
directionality = _commitment_directionality_payload(conversation)
|
||||
commitment_graph_cards = [
|
||||
card for card in _all_graph_payload(conversation) if card["group"] == "commitment"
|
||||
]
|
||||
|
||||
graph_refs = []
|
||||
for ref in directionality.get("graph_refs", []):
|
||||
slug = ref.get("slug")
|
||||
if not slug:
|
||||
continue
|
||||
graph_refs.append(
|
||||
{
|
||||
**ref,
|
||||
"slug": slug,
|
||||
"value": _format_metric_value(conversation, slug, latest_snapshot),
|
||||
}
|
||||
)
|
||||
directionality["graph_refs"] = graph_refs
|
||||
|
||||
context = {
|
||||
"person": person,
|
||||
"workspace_conversation": conversation,
|
||||
"directionality": directionality,
|
||||
"commitment_graph_cards": commitment_graph_cards,
|
||||
"graphs_url": reverse(
|
||||
"ai_workspace_insight_graphs",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"help_url": reverse(
|
||||
"ai_workspace_insight_help",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"workspace_url": f"{reverse('ai_workspace')}?person={person.id}",
|
||||
}
|
||||
return render(request, "pages/ai-workspace-information.html", context)
|
||||
|
||||
|
||||
class AIWorkspaceInsightHelp(LoginRequiredMixin, View):
|
||||
allowed_types = {"page", "widget"}
|
||||
|
||||
@@ -3018,6 +3500,10 @@ class AIWorkspaceInsightHelp(LoginRequiredMixin, View):
|
||||
"ai_workspace_insight_graphs",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"information_url": reverse(
|
||||
"ai_workspace_information",
|
||||
kwargs={"type": "page", "person_id": person.id},
|
||||
),
|
||||
"workspace_url": f"{reverse('ai_workspace')}?person={person.id}",
|
||||
}
|
||||
return render(request, "pages/ai-workspace-insight-help.html", context)
|
||||
@@ -3164,7 +3650,7 @@ class AIWorkspaceRunOperation(LoginRequiredMixin, View):
|
||||
and auto_settings.enabled
|
||||
and auto_settings.auto_create_mitigation
|
||||
):
|
||||
recent_messages = AIWorkspacePersonWidget()._recent_messages(
|
||||
recent_messages = _recent_messages_for_person(
|
||||
request.user,
|
||||
person,
|
||||
max(20, min(auto_settings.sample_message_window, 200)),
|
||||
@@ -3609,6 +4095,8 @@ class AIWorkspaceCreateMitigation(LoginRequiredMixin, View):
|
||||
else _conversation_for_person(request.user, person)
|
||||
)
|
||||
conversation.participants.add(person)
|
||||
_refresh_conversation_stability(conversation, request.user, person)
|
||||
metric_context = _metric_pattern_context(conversation)
|
||||
|
||||
source_text = ""
|
||||
if source_result is not None:
|
||||
@@ -3625,6 +4113,7 @@ class AIWorkspaceCreateMitigation(LoginRequiredMixin, View):
|
||||
inspiration=user_context,
|
||||
fundamentals=fundamentals,
|
||||
output_profile=output_profile,
|
||||
metric_context=metric_context,
|
||||
)
|
||||
# Deterministically seed from pasted context so long-form frameworks can
|
||||
# create fundamentals/rules/games in one pass, even when AI output is sparse.
|
||||
@@ -3657,6 +4146,31 @@ class AIWorkspaceCreateMitigation(LoginRequiredMixin, View):
|
||||
instructions=str(game.get("instructions") or "").strip(),
|
||||
)
|
||||
|
||||
existing_signatures = set()
|
||||
for correction in artifacts.get("corrections", []):
|
||||
title = _normalize_correction_title(
|
||||
correction.get("title") or "", fallback="Correction"
|
||||
)
|
||||
clarification = str(correction.get("clarification") or "").strip()
|
||||
source_phrase = str(correction.get("source_phrase") or "").strip()
|
||||
if not clarification:
|
||||
continue
|
||||
signature = _correction_signature(title, clarification)
|
||||
if signature in existing_signatures:
|
||||
continue
|
||||
PatternMitigationCorrection.objects.create(
|
||||
user=request.user,
|
||||
plan=plan,
|
||||
title=title[:255],
|
||||
clarification=clarification[:2000],
|
||||
source_phrase=source_phrase[:1000],
|
||||
perspective="second_person",
|
||||
share_target="both",
|
||||
language_style="adapted",
|
||||
enabled=True,
|
||||
)
|
||||
existing_signatures.add(signature)
|
||||
|
||||
PatternMitigationMessage.objects.create(
|
||||
user=request.user,
|
||||
plan=plan,
|
||||
|
||||
Reference in New Issue
Block a user