@@ -163,7 +196,8 @@
// One-time migration flush to avoid stale cached pane HTML from earlier UI schema.
localStorage.removeItem("gia_workspace_cache_v1");
localStorage.removeItem("gia_workspace_cache_v2");
- return JSON.parse(localStorage.getItem("gia_workspace_cache_v3") || "{}");
+ localStorage.removeItem("gia_workspace_cache_v3");
+ return JSON.parse(localStorage.getItem("gia_workspace_cache_v4") || "{}");
} catch (e) {
return {};
}
@@ -171,7 +205,7 @@
function persistCache() {
try {
- localStorage.setItem("gia_workspace_cache_v3", JSON.stringify(window.giaWorkspaceCache));
+ localStorage.setItem("gia_workspace_cache_v4", JSON.stringify(window.giaWorkspaceCache));
} catch (e) {
// Ignore storage write issues.
}
@@ -282,6 +316,46 @@
indicator.style.display = show ? "inline-flex" : "none";
}
+ const OPERATION_TABS = ["summarise", "draft_reply", "extract_patterns"];
+ const MITIGATION_TABS = ["plan_board", "corrections", "engage", "fundamentals", "auto", "ask_ai"];
+ const ALL_TOP_TABS = MITIGATION_TABS.concat(OPERATION_TABS);
+
+ function isMitigationTab(tabKey) {
+ return MITIGATION_TABS.indexOf(tabKey) !== -1;
+ }
+
+ function operationForTab(tabKey) {
+ return isMitigationTab(tabKey) ? "artifacts" : tabKey;
+ }
+
+ function setTopCapsuleActive(tabKey) {
+ ALL_TOP_TABS.forEach(function(name) {
+ const tab = document.getElementById("ai-tab-" + personId + "-" + name);
+ if (tab) {
+ tab.classList.toggle("is-active", name === tabKey);
+ }
+ });
+ }
+
+ function showOperationPane(operation) {
+ ["artifacts", "summarise", "draft_reply", "extract_patterns"].forEach(function(op) {
+ const pane = document.getElementById("ai-pane-" + personId + "-" + op);
+ if (!pane) {
+ return;
+ }
+ pane.style.display = op === operation ? "block" : "none";
+ });
+ }
+
+ function applyMitigationTabSelection() {
+ const state = window.giaWorkspaceState[personId] || {};
+ const targetTab = state.currentMitigationTab || "plan_board";
+ setTopCapsuleActive(targetTab);
+ if (typeof window.giaMitigationShowTab === "function") {
+ window.giaMitigationShowTab(personId, targetTab);
+ }
+ }
+
function hydrateCachedIfAvailable(operation) {
if (operation === "artifacts") {
return false;
@@ -303,29 +377,35 @@
return false;
}
- window.giaWorkspaceShowTab = function(pid, operation) {
+ window.giaWorkspaceShowTab = function(pid, operation, tabKey) {
if (pid !== personId) {
return;
}
- ["artifacts", "summarise", "draft_reply", "extract_patterns"].forEach(function(op) {
- const tab = document.getElementById("ai-tab-" + personId + "-" + op);
- const pane = document.getElementById("ai-pane-" + personId + "-" + op);
- if (!tab || !pane) {
- return;
- }
- if (op === operation) {
- tab.classList.add("is-active");
- pane.style.display = "block";
- } else {
- tab.classList.remove("is-active");
- pane.style.display = "none";
- }
- });
+ showOperationPane(operation);
+ const activeTab = tabKey || (
+ operation === "artifacts"
+ ? ((window.giaWorkspaceState[personId] || {}).currentMitigationTab || "plan_board")
+ : operation
+ );
+ setTopCapsuleActive(activeTab);
const hydrated = hydrateCachedIfAvailable(operation);
const entry = operation === "artifacts" ? null : getCacheEntry(operation);
setCachedIndicator(hydrated || !!entry, entry ? entry.ts : null);
window.giaWorkspaceState[personId] = window.giaWorkspaceState[personId] || {};
window.giaWorkspaceState[personId].current = operation;
+ window.giaWorkspaceState[personId].currentTab = activeTab;
+ };
+
+ window.giaWorkspaceOpenTab = function(pid, tabKey, forceRefresh) {
+ if (pid !== personId) {
+ return;
+ }
+ window.giaWorkspaceState[personId] = window.giaWorkspaceState[personId] || {};
+ if (isMitigationTab(tabKey)) {
+ window.giaWorkspaceState[personId].currentMitigationTab = tabKey;
+ }
+ window.giaWorkspaceState[personId].pendingTabKey = tabKey;
+ window.giaWorkspaceRun(personId, operationForTab(tabKey), !!forceRefresh);
};
window.giaWorkspaceRun = function(pid, operation, forceRefresh) {
@@ -339,11 +419,25 @@
return;
}
const currentState = window.giaWorkspaceState[personId] || {};
+ const targetTabKey = currentState.pendingTabKey || (
+ operation === "artifacts"
+ ? (currentState.currentMitigationTab || "plan_board")
+ : operation
+ );
if (!forceRefresh && currentState.current === operation && pane.dataset.loaded === "1") {
- window.giaWorkspaceShowTab(personId, operation);
+ window.giaWorkspaceShowTab(personId, operation, targetTabKey);
+ if (operation === "artifacts") {
+ applyMitigationTabSelection();
+ }
+ if (window.giaWorkspaceState[personId]) {
+ window.giaWorkspaceState[personId].pendingTabKey = "";
+ }
return;
}
- window.giaWorkspaceShowTab(personId, operation);
+ window.giaWorkspaceShowTab(personId, operation, targetTabKey);
+ if (operation === "artifacts") {
+ applyMitigationTabSelection();
+ }
const key = cacheKey(operation);
const entry = getCacheEntry(operation);
@@ -360,6 +454,12 @@
if (operation === "draft_reply" && typeof window.giaWorkspaceUseDraft === "function") {
window.giaWorkspaceUseDraft(personId, operation, 0);
}
+ if (operation === "artifacts") {
+ applyMitigationTabSelection();
+ }
+ if (window.giaWorkspaceState[personId]) {
+ window.giaWorkspaceState[personId].pendingTabKey = "";
+ }
return;
}
@@ -391,6 +491,12 @@
if (operation === "draft_reply" && typeof window.giaWorkspaceUseDraft === "function") {
window.giaWorkspaceUseDraft(personId, operation, 0);
}
+ if (operation === "artifacts") {
+ applyMitigationTabSelection();
+ }
+ if (window.giaWorkspaceState[personId]) {
+ window.giaWorkspaceState[personId].pendingTabKey = "";
+ }
})
.catch(function() {
pane.innerHTML = '
Failed to load AI response.
';
@@ -401,8 +507,13 @@
if (pid !== personId) {
return;
}
- const current = (window.giaWorkspaceState[personId] && window.giaWorkspaceState[personId].current) || "summarise";
- window.giaWorkspaceRun(personId, current, true);
+ const state = window.giaWorkspaceState[personId] || {};
+ const currentTab = state.currentTab || (
+ state.current === "artifacts"
+ ? (state.currentMitigationTab || "plan_board")
+ : (state.current || "plan_board")
+ );
+ window.giaWorkspaceOpenTab(personId, currentTab, true);
};
window.giaWorkspaceUseDraft = function(pid, operation, index) {
@@ -503,12 +614,14 @@
names.forEach(function(name) {
const pane = document.getElementById("mitigation-tab-" + pid + "-" + name);
const tab = document.getElementById("mitigation-tab-btn-" + pid + "-" + name);
- if (!pane || !tab) {
+ if (!pane) {
return;
}
const active = (name === tabName);
pane.style.display = active ? "block" : "none";
- tab.classList.toggle("is-active", active);
+ if (tab) {
+ tab.classList.toggle("is-active", active);
+ }
});
const shell = document.getElementById("mitigation-shell-" + pid);
if (!shell) {
@@ -584,6 +697,6 @@
};
}
- window.giaWorkspaceRun(personId, "artifacts", false);
+ window.giaWorkspaceOpenTab(personId, "plan_board", false);
})();
diff --git a/core/templates/partials/compose-panel.html b/core/templates/partials/compose-panel.html
index 42bcbaa..f01fff4 100644
--- a/core/templates/partials/compose-panel.html
+++ b/core/templates/partials/compose-panel.html
@@ -121,6 +121,25 @@
data-engage-send-url="{{ compose_engage_send_url }}">
{% for msg in serialized_messages %}
+ {% if msg.gap_fragments %}
+
+ {% for frag in msg.gap_fragments %}
+
+
+
+ {{ frag.focus }} · {{ frag.lag }}
+ Score {{ frag.score }}
+
+ {% if frag.calculation %}
+ How: {{ frag.calculation }}
+ {% endif %}
+ {% if frag.psychology %}
+ Meaning: {{ frag.psychology }}
+ {% endif %}
+
+ {% endfor %}
+
+ {% endif %}
{% if msg.image_urls %}
{% for image_url in msg.image_urls %}
@@ -152,6 +171,21 @@
{{ msg.display_ts }}{% if msg.author %} · {{ msg.author }}{% endif %}
+ {% if msg.metric_fragments %}
+
+ {% for frag in msg.metric_fragments %}
+
+
+
+ {{ frag.title }}
+ {{ frag.value }}
+
+
+ {% endfor %}
+
+ {% endif %}
{% empty %}
No stored messages for this contact yet.
@@ -219,13 +253,15 @@
}
#{{ panel_id }} .compose-row {
display: flex;
+ flex-direction: column;
+ gap: 0.3rem;
margin-bottom: 0.5rem;
}
#{{ panel_id }} .compose-row.is-in {
- justify-content: flex-start;
+ align-items: flex-start;
}
#{{ panel_id }} .compose-row.is-out {
- justify-content: flex-end;
+ align-items: flex-end;
}
#{{ panel_id }} .compose-bubble {
max-width: min(85%, 46rem);
@@ -265,6 +301,49 @@
#{{ panel_id }} .compose-msg-meta {
margin: 0;
}
+ #{{ panel_id }} .compose-gap-artifacts {
+ align-self: center;
+ width: min(92%, 34rem);
+ }
+ #{{ panel_id }} .compose-metric-artifacts {
+ width: min(86%, 46rem);
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(9.4rem, 1fr));
+ gap: 0.28rem;
+ }
+ #{{ panel_id }} .compose-artifact {
+ border: 1px dashed rgba(0, 0, 0, 0.16);
+ border-radius: 8px;
+ background: rgba(252, 253, 255, 0.96);
+ padding: 0.28rem 0.38rem;
+ }
+ #{{ panel_id }} .compose-artifact.compose-artifact-gap {
+ margin-bottom: 0.2rem;
+ }
+ #{{ panel_id }} .compose-artifact-head {
+ margin: 0;
+ display: flex;
+ gap: 0.3rem;
+ align-items: center;
+ color: #3f4f67;
+ font-size: 0.68rem;
+ line-height: 1.25;
+ }
+ #{{ panel_id }} .compose-artifact-head .icon {
+ color: #6a88b4;
+ }
+ #{{ panel_id }} .compose-artifact-score {
+ margin-left: auto;
+ color: #2f4f7a;
+ font-weight: 700;
+ font-size: 0.66rem;
+ }
+ #{{ panel_id }} .compose-artifact-detail {
+ margin: 0.15rem 0 0;
+ color: #637185;
+ font-size: 0.64rem;
+ line-height: 1.25;
+ }
#{{ panel_id }} .compose-empty {
margin: 0;
color: #6f6f6f;
@@ -456,6 +535,7 @@
border-radius: 8px;
padding: 0.35rem 0.42rem;
background: #fff;
+ min-width: 0;
}
#{{ panel_id }} .compose-qi-chip p {
margin: 0;
@@ -468,9 +548,22 @@
#{{ panel_id }} .compose-qi-chip .v {
font-size: 0.78rem;
font-weight: 600;
+ display: flex;
+ align-items: flex-start;
+ min-width: 0;
+ gap: 0.28rem;
+ white-space: normal;
+ overflow-wrap: anywhere;
+ word-break: break-word;
+ }
+ #{{ panel_id }} .compose-qi-chip .v > span:last-child {
+ min-width: 0;
+ overflow-wrap: anywhere;
+ word-break: break-all;
}
#{{ panel_id }} .compose-qi-list {
display: grid;
+ grid-template-columns: repeat(2, minmax(0, 1fr));
gap: 0.36rem;
}
#{{ panel_id }} .compose-qi-row {
@@ -478,6 +571,7 @@
border-radius: 8px;
background: #fff;
padding: 0.42rem 0.46rem;
+ height: 100%;
}
#{{ panel_id }} .compose-qi-row-head {
display: flex;
@@ -750,6 +844,88 @@
row.className = "compose-row " + (outgoing ? "is-out" : "is-in");
row.dataset.ts = String(msg.ts || 0);
+ const appendGapArtifacts = function (fragments) {
+ if (!Array.isArray(fragments) || !fragments.length) {
+ return;
+ }
+ const wrap = document.createElement("div");
+ wrap.className = "compose-gap-artifacts";
+ fragments.forEach(function (fragment) {
+ const artifact = document.createElement("article");
+ artifact.className = "compose-artifact compose-artifact-gap";
+ const head = document.createElement("p");
+ head.className = "compose-artifact-head";
+ const icon = document.createElement("span");
+ icon.className = "icon is-small";
+ icon.innerHTML = '
';
+ const focus = document.createElement("span");
+ const focusText = String(fragment.focus || "Response gap");
+ const lagText = String(fragment.lag || "");
+ focus.textContent = lagText ? (focusText + " · " + lagText) : focusText;
+ const score = document.createElement("span");
+ score.className = "compose-artifact-score";
+ score.textContent = "Score " + String(fragment.score || "-");
+ head.appendChild(icon);
+ head.appendChild(focus);
+ head.appendChild(score);
+ artifact.appendChild(head);
+ if (fragment.calculation) {
+ const calc = document.createElement("p");
+ calc.className = "compose-artifact-detail";
+ calc.textContent = "How: " + String(fragment.calculation || "");
+ artifact.appendChild(calc);
+ }
+ if (fragment.psychology) {
+ const psych = document.createElement("p");
+ psych.className = "compose-artifact-detail";
+ psych.textContent = "Meaning: " + String(fragment.psychology || "");
+ artifact.appendChild(psych);
+ }
+ wrap.appendChild(artifact);
+ });
+ row.appendChild(wrap);
+ };
+
+ const appendMetricArtifacts = function (fragments) {
+ if (!Array.isArray(fragments) || !fragments.length) {
+ return;
+ }
+ const wrap = document.createElement("div");
+ wrap.className = "compose-metric-artifacts";
+ fragments.forEach(function (fragment) {
+ const artifact = document.createElement("article");
+ artifact.className = "compose-artifact compose-artifact-metric";
+ const calc = String(fragment.calculation || "");
+ const psych = String(fragment.psychology || "");
+ const tips = [];
+ if (calc) {
+ tips.push("How it is calculated: " + calc);
+ }
+ if (psych) {
+ tips.push("Psychological interpretation: " + psych);
+ }
+ artifact.title = tips.join(" | ");
+ const head = document.createElement("p");
+ head.className = "compose-artifact-head";
+ const icon = document.createElement("span");
+ icon.className = "icon is-small";
+ icon.innerHTML = '
';
+ const title = document.createElement("span");
+ title.textContent = String(fragment.title || "Metric");
+ const value = document.createElement("span");
+ value.className = "compose-artifact-score";
+ value.textContent = String(fragment.value || "-");
+ head.appendChild(icon);
+ head.appendChild(title);
+ head.appendChild(value);
+ artifact.appendChild(head);
+ wrap.appendChild(artifact);
+ });
+ row.appendChild(wrap);
+ };
+
+ appendGapArtifacts(msg.gap_fragments);
+
const bubble = document.createElement("article");
bubble.className = "compose-bubble " + (outgoing ? "is-out" : "is-in");
@@ -787,6 +963,7 @@
bubble.appendChild(meta);
row.appendChild(bubble);
+ appendMetricArtifacts(msg.metric_fragments);
const empty = thread.querySelector(".compose-empty");
if (empty) {
empty.remove();
@@ -1144,20 +1321,83 @@
const docs = Array.isArray(payload.docs) ? payload.docs : [];
container.innerHTML = "";
+ const stateFaceMeta = function (stateText) {
+ const state = String(stateText || "").toLowerCase();
+ if (state.includes("balanced")) {
+ return {
+ icon: "fa-regular fa-face-smile",
+ className: "has-text-success",
+ label: "Balanced"
+ };
+ }
+ if (state.includes("withdrawing")) {
+ return {
+ icon: "fa-regular fa-face-frown",
+ className: "has-text-danger",
+ label: "Withdrawing"
+ };
+ }
+ if (state.includes("overextending")) {
+ return {
+ icon: "fa-regular fa-face-meh",
+ className: "has-text-warning",
+ label: "Overextending"
+ };
+ }
+ if (state.includes("stable")) {
+ return {
+ icon: "fa-regular fa-face-smile",
+ className: "has-text-success",
+ label: "Positive"
+ };
+ }
+ if (state.includes("watch")) {
+ return {
+ icon: "fa-regular fa-face-meh",
+ className: "has-text-warning",
+ label: "Mixed"
+ };
+ }
+ if (state.includes("fragile")) {
+ return {
+ icon: "fa-regular fa-face-frown",
+ className: "has-text-danger",
+ label: "Strained"
+ };
+ }
+ return {
+ icon: "fa-regular fa-face-meh-blank",
+ className: "has-text-grey",
+ label: "Unknown"
+ };
+ };
+ const stateFace = stateFaceMeta(summary.state);
+
const head = document.createElement("div");
head.className = "compose-qi-head";
[
- ["Platform", summary.platform || "-"],
- ["State", summary.state || "-"],
- ["Data Points", String(summary.snapshot_count || 0)],
- ["Thread", summary.thread || "-"],
+ { key: "Platform", value: summary.platform || "-" },
+ {
+ key: "Participant State",
+ value: summary.state || "-",
+ icon: stateFace.icon,
+ className: stateFace.className,
+ },
+ { key: "Data Points", value: String(summary.snapshot_count || 0) },
+ { key: "Thread", value: summary.thread || "-" },
].forEach(function (pair) {
const chip = document.createElement("div");
chip.className = "compose-qi-chip";
- chip.innerHTML = (
- '
' + pair[0] + "
"
- + '
' + pair[1] + "
"
- );
+ let valueHtml = String(pair.value || "-");
+ if (pair.icon) {
+ valueHtml = (
+ '
'
+ + ''
+ + ""
+ + "
" + valueHtml + ""
+ );
+ }
+ chip.innerHTML = '
' + pair.key + "
" + '
' + valueHtml + "
";
head.appendChild(chip);
});
container.appendChild(head);
@@ -1205,15 +1445,6 @@
});
container.appendChild(docsList);
}
- const openBtn = document.createElement("a");
- openBtn.className = "button is-light is-small is-rounded";
- openBtn.href = "{{ ai_workspace_url }}";
- openBtn.innerHTML = (
- '
'
- + "
Open Minimal AI Workspace"
- );
- openBtn.style.marginTop = "0.45rem";
- container.appendChild(openBtn);
} catch (err) {
setCardLoading(card, false);
card.querySelector(".compose-ai-content").textContent =
diff --git a/core/templates/partials/osint/list-table.html b/core/templates/partials/osint/list-table.html
index 36b004b..d89123b 100644
--- a/core/templates/partials/osint/list-table.html
+++ b/core/templates/partials/osint/list-table.html
@@ -51,12 +51,46 @@
{% endif %}
+
+
{% for column in osint_columns %}
- |
+ |
{% if column.sortable %}
{% for cell in row.cells %}
- |
+ |
{% if cell.kind == "id_copy" %}
{% endif %}
-
- {{ osint_result_count }} result{% if osint_result_count != 1 %}s{% endif %}
-
+
+
+
diff --git a/core/views/compose.py b/core/views/compose.py
index e385be1..6440547 100644
--- a/core/views/compose.py
+++ b/core/views/compose.py
@@ -30,7 +30,7 @@ from core.models import (
WorkspaceConversation,
)
from core.realtime.typing_state import get_person_typing_state
-from core.views.workspace import _build_engage_payload, _parse_draft_options
+from core.views.workspace import INSIGHT_METRICS, _build_engage_payload, _parse_draft_options
COMPOSE_WS_TOKEN_SALT = "compose-ws"
COMPOSE_ENGAGE_TOKEN_SALT = "compose-engage"
@@ -169,6 +169,320 @@ def _serialize_message(msg: Message) -> dict:
}
+THREAD_METRIC_FRAGMENT_SPECS = (
+ {
+ "slug": "stability_score",
+ "title": "Stability Score",
+ "source": "conversation",
+ "field": "stability_score",
+ "precision": 2,
+ },
+ {
+ "slug": "stability_confidence",
+ "title": "Stability Confidence",
+ "source": "conversation",
+ "field": "stability_confidence",
+ "precision": 3,
+ },
+ {
+ "slug": "sample_messages",
+ "title": "Sample Messages",
+ "source": "conversation",
+ "field": "stability_sample_messages",
+ "precision": 0,
+ },
+ {
+ "slug": "sample_days",
+ "title": "Sample Days",
+ "source": "conversation",
+ "field": "stability_sample_days",
+ "precision": 0,
+ },
+ {
+ "slug": "commitment_inbound",
+ "title": "Commit In",
+ "source": "conversation",
+ "field": "commitment_inbound_score",
+ "precision": 2,
+ },
+ {
+ "slug": "commitment_outbound",
+ "title": "Commit Out",
+ "source": "conversation",
+ "field": "commitment_outbound_score",
+ "precision": 2,
+ },
+ {
+ "slug": "commitment_confidence",
+ "title": "Commit Confidence",
+ "source": "conversation",
+ "field": "commitment_confidence",
+ "precision": 3,
+ },
+ {
+ "slug": "inbound_messages",
+ "title": "Inbound Messages",
+ "source": "snapshot",
+ "field": "inbound_messages",
+ "precision": 0,
+ },
+ {
+ "slug": "outbound_messages",
+ "title": "Outbound Messages",
+ "source": "snapshot",
+ "field": "outbound_messages",
+ "precision": 0,
+ },
+ {
+ "slug": "reciprocity_score",
+ "title": "Reciprocity",
+ "source": "snapshot",
+ "field": "reciprocity_score",
+ "precision": 2,
+ },
+ {
+ "slug": "continuity_score",
+ "title": "Continuity",
+ "source": "snapshot",
+ "field": "continuity_score",
+ "precision": 2,
+ },
+ {
+ "slug": "response_score",
+ "title": "Response",
+ "source": "snapshot",
+ "field": "response_score",
+ "precision": 2,
+ },
+ {
+ "slug": "volatility_score",
+ "title": "Volatility",
+ "source": "snapshot",
+ "field": "volatility_score",
+ "precision": 2,
+ },
+ {
+ "slug": "inbound_response_score",
+ "title": "Inbound Response",
+ "source": "snapshot",
+ "field": "inbound_response_score",
+ "precision": 2,
+ },
+ {
+ "slug": "outbound_response_score",
+ "title": "Outbound Response",
+ "source": "snapshot",
+ "field": "outbound_response_score",
+ "precision": 2,
+ },
+ {
+ "slug": "balance_inbound_score",
+ "title": "Inbound Balance",
+ "source": "snapshot",
+ "field": "balance_inbound_score",
+ "precision": 2,
+ },
+ {
+ "slug": "balance_outbound_score",
+ "title": "Outbound Balance",
+ "source": "snapshot",
+ "field": "balance_outbound_score",
+ "precision": 2,
+ },
+)
+
+THREAD_METRIC_COPY_OVERRIDES = {
+ "inbound_messages": {
+ "calculation": (
+ "Count of counterpart-to-user messages in the sampled analysis window."
+ ),
+ "psychology": (
+ "Lower counts can indicate reduced reach-back or temporary withdrawal."
+ ),
+ },
+ "outbound_messages": {
+ "calculation": (
+ "Count of user-to-counterpart messages in the sampled analysis window."
+ ),
+ "psychology": (
+ "Large imbalances can reflect chasing or over-functioning dynamics."
+ ),
+ },
+}
+
+
+def _workspace_conversation_for_person(user, person):
+ if person is None:
+ return None
+ return (
+ WorkspaceConversation.objects.filter(
+ user=user,
+ participants=person,
+ )
+ .order_by("-last_event_ts", "-created_at")
+ .first()
+ )
+
+
+def _counterpart_identifiers_for_person(user, person):
+ if person is None:
+ return set()
+ values = (
+ PersonIdentifier.objects.filter(user=user, person=person)
+ .values_list("identifier", flat=True)
+ )
+ return {str(value or "").strip() for value in values if str(value or "").strip()}
+
+
+def _message_is_outgoing_for_analysis(msg, counterpart_identifiers):
+ sender = str(getattr(msg, "sender_uuid", "") or "").strip()
+ if sender and sender in counterpart_identifiers:
+ return False
+ return _is_outgoing(msg)
+
+
+def _format_gap_duration(ms_value):
+ value = max(0, int(ms_value or 0))
+ seconds = value // 1000
+ if seconds < 60:
+ return f"{seconds}s"
+ minutes = seconds // 60
+ if minutes < 60:
+ return f"{minutes}m"
+ hours = minutes // 60
+ rem_minutes = minutes % 60
+ if rem_minutes == 0:
+ return f"{hours}h"
+ return f"{hours}h {rem_minutes}m"
+
+
+def _score_from_lag_for_thread(lag_ms, target_hours=4):
+ if lag_ms is None:
+ return 50.0
+ target_ms = max(1, target_hours) * 60 * 60 * 1000
+ return max(0.0, min(100.0, 100.0 / (1.0 + (lag_ms / target_ms))))
+
+
+def _metric_copy(slug, fallback_title):
+ spec = INSIGHT_METRICS.get(slug) or {}
+ override = THREAD_METRIC_COPY_OVERRIDES.get(slug) or {}
+ return {
+ "title": spec.get("title") or fallback_title,
+ "calculation": override.get("calculation") or spec.get("calculation") or "",
+ "psychology": override.get("psychology") or spec.get("psychology") or "",
+ }
+
+
+def _format_metric_fragment_value(value, precision):
+ if value is None:
+ return "-"
+ try:
+ number = float(value)
+ except (TypeError, ValueError):
+ return str(value)
+ if int(precision or 0) <= 0:
+ return str(int(round(number)))
+ rounded = round(number, int(precision))
+ if float(rounded).is_integer():
+ return str(int(rounded))
+ return f"{rounded:.{int(precision)}f}"
+
+
+def _build_thread_metric_fragments(conversation):
+ if conversation is None:
+ return []
+ snapshot = conversation.metric_snapshots.first()
+ fragments = []
+ for spec in THREAD_METRIC_FRAGMENT_SPECS:
+ if spec["source"] == "snapshot":
+ source_obj = snapshot
+ else:
+ source_obj = conversation
+ if source_obj is None:
+ continue
+ value = getattr(source_obj, spec["field"], None)
+ copy = _metric_copy(spec["slug"], spec["title"])
+ fragments.append(
+ {
+ "slug": spec["slug"],
+ "title": copy["title"],
+ "value": _format_metric_fragment_value(value, spec.get("precision", 2)),
+ "calculation": copy["calculation"],
+ "psychology": copy["psychology"],
+ }
+ )
+ return fragments
+
+
+def _build_gap_fragment(is_outgoing_reply, lag_ms, snapshot):
+ metric_slug = "outbound_response_score" if is_outgoing_reply else "inbound_response_score"
+ copy = _metric_copy(metric_slug, "Response Score")
+ score_value = None
+ if snapshot is not None:
+ score_value = getattr(
+ snapshot,
+ "outbound_response_score" if is_outgoing_reply else "inbound_response_score",
+ None,
+ )
+ if score_value is None:
+ score_value = _score_from_lag_for_thread(lag_ms)
+ return {
+ "title": "Unseen Gap",
+ "focus": "Your reply delay" if is_outgoing_reply else "Counterpart reply delay",
+ "lag": _format_gap_duration(lag_ms),
+ "score": _format_metric_fragment_value(score_value, 2),
+ "calculation": copy["calculation"],
+ "psychology": copy["psychology"],
+ }
+
+
+def _serialize_messages_with_artifacts(
+ messages,
+ counterpart_identifiers=None,
+ conversation=None,
+ seed_previous=None,
+):
+ rows = list(messages or [])
+ serialized = [_serialize_message(msg) for msg in rows]
+ for item in serialized:
+ item["gap_fragments"] = []
+ item["metric_fragments"] = []
+
+ counterpart_identifiers = set(counterpart_identifiers or [])
+ snapshot = conversation.metric_snapshots.first() if conversation is not None else None
+
+ prev_msg = seed_previous
+ prev_ts = int(prev_msg.ts or 0) if prev_msg is not None else None
+ prev_outgoing = (
+ _message_is_outgoing_for_analysis(prev_msg, counterpart_identifiers)
+ if prev_msg is not None
+ else None
+ )
+
+ for idx, msg in enumerate(rows):
+ current_ts = int(msg.ts or 0)
+ current_outgoing = _message_is_outgoing_for_analysis(msg, counterpart_identifiers)
+ if (
+ prev_msg is not None
+ and prev_ts is not None
+ and prev_outgoing is not None
+ and current_outgoing != prev_outgoing
+ and current_ts >= prev_ts
+ ):
+ lag_ms = current_ts - prev_ts
+ serialized[idx]["gap_fragments"].append(
+ _build_gap_fragment(current_outgoing, lag_ms, snapshot)
+ )
+ prev_msg = msg
+ prev_ts = current_ts
+ prev_outgoing = current_outgoing
+
+ if serialized:
+ serialized[-1]["metric_fragments"] = _build_thread_metric_fragments(conversation)
+
+ return serialized
+
+
def _owner_name(user) -> str:
return (
user.first_name
@@ -571,6 +885,21 @@ def _quick_insights_rows(conversation):
}
+def _participant_feedback_state_label(conversation, person):
+ payload = conversation.participant_feedback or {}
+ if not isinstance(payload, dict) or person is None:
+ return ""
+ raw = payload.get(str(person.id)) or {}
+ if not isinstance(raw, dict):
+ return ""
+ state_key = str(raw.get("state") or "").strip().lower()
+ return {
+ "withdrawing": "Withdrawing",
+ "overextending": "Overextending",
+ "balanced": "Balanced",
+ }.get(state_key, "")
+
+
def _build_engage_prompt(owner_name, person_name, transcript):
return [
{
@@ -743,6 +1072,10 @@ def _panel_context(
base = _context_base(request.user, service, identifier, person)
limit = _safe_limit(request.GET.get("limit") or request.POST.get("limit"))
session_bundle = _load_messages(request.user, base["person_identifier"], limit)
+ conversation = _workspace_conversation_for_person(request.user, base["person"])
+ counterpart_identifiers = _counterpart_identifiers_for_person(
+ request.user, base["person"]
+ )
last_ts = 0
if session_bundle["messages"]:
last_ts = int(session_bundle["messages"][-1].ts or 0)
@@ -773,9 +1106,11 @@ def _panel_context(
"person_identifier": base["person_identifier"],
"session": session_bundle["session"],
"messages": session_bundle["messages"],
- "serialized_messages": [
- _serialize_message(msg) for msg in session_bundle["messages"]
- ],
+ "serialized_messages": _serialize_messages_with_artifacts(
+ session_bundle["messages"],
+ counterpart_identifiers=counterpart_identifiers,
+ conversation=conversation,
+ ),
"last_ts": last_ts,
"limit": limit,
"notice_message": notice,
@@ -900,13 +1235,18 @@ class ComposeThread(LoginRequiredMixin, View):
base = _context_base(request.user, service, identifier, person)
latest_ts = after_ts
messages = []
+ seed_previous = None
if base["person_identifier"] is not None:
session, _ = ChatSession.objects.get_or_create(
user=request.user,
identifier=base["person_identifier"],
)
- queryset = Message.objects.filter(user=request.user, session=session)
+ base_queryset = Message.objects.filter(user=request.user, session=session)
+ queryset = base_queryset
if after_ts > 0:
+ seed_previous = (
+ base_queryset.filter(ts__lte=after_ts).order_by("-ts").first()
+ )
queryset = queryset.filter(ts__gt=after_ts)
messages = list(
queryset.select_related(
@@ -924,8 +1264,17 @@ class ComposeThread(LoginRequiredMixin, View):
)
if newest:
latest_ts = max(latest_ts, int(newest))
+ conversation = _workspace_conversation_for_person(request.user, base["person"])
+ counterpart_identifiers = _counterpart_identifiers_for_person(
+ request.user, base["person"]
+ )
payload = {
- "messages": [_serialize_message(msg) for msg in messages],
+ "messages": _serialize_messages_with_artifacts(
+ messages,
+ counterpart_identifiers=counterpart_identifiers,
+ conversation=conversation,
+ seed_previous=seed_previous,
+ ),
"last_ts": latest_ts,
"typing": get_person_typing_state(
user_id=request.user.id,
@@ -1122,6 +1471,7 @@ class ComposeQuickInsights(LoginRequiredMixin, View):
)
payload = _quick_insights_rows(conversation)
+ participant_state = _participant_feedback_state_label(conversation, person)
return JsonResponse(
{
"ok": True,
@@ -1129,7 +1479,9 @@ class ComposeQuickInsights(LoginRequiredMixin, View):
"summary": {
"person_name": person.name,
"platform": conversation.get_platform_type_display(),
- "state": conversation.get_stability_state_display(),
+ "state": participant_state
+ or conversation.get_stability_state_display(),
+ "stability_state": conversation.get_stability_state_display(),
"thread": conversation.platform_thread_id or "",
"last_event": _format_ts_label(conversation.last_event_ts or 0)
if conversation.last_event_ts
@@ -1150,6 +1502,7 @@ class ComposeQuickInsights(LoginRequiredMixin, View):
"docs": [
"Each row shows current value, percent change vs previous point, and data-point count.",
"Arrow color indicates improving or risk direction for that metric.",
+ "State uses participant feedback (Withdrawing/Overextending/Balanced) when available.",
"Face indicator maps value range to positive, mixed, or strained climate.",
"Use this card for fast triage; open AI Workspace for full graphs and details.",
],
diff --git a/core/views/osint.py b/core/views/osint.py
index b32bda8..e6d1462 100644
--- a/core/views/osint.py
+++ b/core/views/osint.py
@@ -498,6 +498,7 @@ class OSINTListBase(ObjectList):
object_list: list[Any],
request_type: str,
) -> list[dict[str, Any]]:
+ context_type = _context_type(request_type)
rows = []
for item in object_list:
row = {"id": str(item.pk), "cells": [], "actions": []}
diff --git a/core/views/personas.py b/core/views/personas.py
index d7f112c..38b361d 100644
--- a/core/views/personas.py
+++ b/core/views/personas.py
@@ -23,6 +23,7 @@ class PersonaList(LoginRequiredMixin, OSINTListBase):
class PersonaCreate(LoginRequiredMixin, ObjectCreate):
model = Persona
form_class = PersonaForm
+ window_content = "mixins/window-content/persona-form.html"
submit_url_name = "persona_create"
@@ -30,6 +31,7 @@ class PersonaCreate(LoginRequiredMixin, ObjectCreate):
class PersonaUpdate(LoginRequiredMixin, ObjectUpdate):
model = Persona
form_class = PersonaForm
+ window_content = "mixins/window-content/persona-form.html"
submit_url_name = "persona_update"
diff --git a/core/views/workspace.py b/core/views/workspace.py
index e4a6d0a..b063633 100644
--- a/core/views/workspace.py
+++ b/core/views/workspace.py
@@ -629,6 +629,56 @@ def _compose_page_url_for_person(user, person):
return f"{reverse('compose_page')}?{query}"
+def _participant_feedback_display(conversation, person):
+ payload = conversation.participant_feedback or {}
+ if not isinstance(payload, dict):
+ return None
+
+ raw = payload.get(str(person.id)) or {}
+ if not isinstance(raw, dict):
+ return None
+
+ state_key = str(raw.get("state") or "").strip().lower()
+ state_label = {
+ "withdrawing": "Withdrawing",
+ "overextending": "Overextending",
+ "balanced": "Balanced",
+ }.get(state_key, "Unknown")
+ state_icon = {
+ "withdrawing": "fa-regular fa-face-frown",
+ "overextending": "fa-regular fa-face-meh",
+ "balanced": "fa-regular fa-face-smile",
+ }.get(state_key, "fa-regular fa-face-meh-blank")
+ state_class = {
+ "withdrawing": "has-text-danger",
+ "overextending": "has-text-warning",
+ "balanced": "has-text-success",
+ }.get(state_key, "has-text-grey")
+
+ updated_label = ""
+ updated_raw = raw.get("updated_at")
+ if updated_raw:
+ try:
+ dt_value = datetime.fromisoformat(str(updated_raw))
+ if dt_value.tzinfo is None:
+ dt_value = dt_value.replace(tzinfo=timezone.utc)
+ updated_label = dj_timezone.localtime(dt_value).strftime("%Y-%m-%d %H:%M")
+ except Exception:
+ updated_label = str(updated_raw)
+
+ return {
+ "state_key": state_key or "unknown",
+ "state_label": state_label,
+ "state_icon": state_icon,
+ "state_class": state_class,
+ "inbound_messages": raw.get("inbound_messages"),
+ "outbound_messages": raw.get("outbound_messages"),
+ "sample_messages": raw.get("sample_messages"),
+ "sample_days": raw.get("sample_days"),
+ "updated_at_label": updated_label,
+ }
+
+
def _message_rows_for_person(user, person, limit):
sessions = ChatSession.objects.filter(user=user, identifier__person=person)
identifiers = set(
@@ -3302,6 +3352,9 @@ class AIWorkspacePersonWidget(LoginRequiredMixin, View):
"widget_options": 'gs-w="8" gs-h="11" gs-x="4" gs-y="0" gs-min-w="4"',
"person": person,
"workspace_conversation": conversation,
+ "participant_feedback_display": _participant_feedback_display(
+ conversation, person
+ ),
"limit": limit,
"ai_operations": [
("artifacts", "Plan"),
@@ -3454,6 +3507,10 @@ class AIWorkspaceInformation(LoginRequiredMixin, View):
"ai_workspace_insight_graphs",
kwargs={"type": "page", "person_id": person.id},
),
+ "information_url": reverse(
+ "ai_workspace_information",
+ kwargs={"type": "page", "person_id": person.id},
+ ),
"help_url": reverse(
"ai_workspace_insight_help",
kwargs={"type": "page", "person_id": person.id},
@@ -3734,9 +3791,7 @@ class AIWorkspaceRunOperation(LoginRequiredMixin, View):
limit = max(5, min(limit, 200))
user_notes = request.GET.get("user_notes", "")
- messages = AIWorkspacePersonWidget()._recent_messages(
- request.user, person, limit
- )
+ messages = _recent_messages_for_person(request.user, person, limit)
owner_name = (
request.user.first_name
or request.user.get_full_name().strip()
|