Increase platform abstraction cohesion

This commit is contained in:
2026-03-06 17:47:58 +00:00
parent 438e561da0
commit 8c091b1e6d
55 changed files with 6555 additions and 440 deletions

View File

@@ -200,8 +200,61 @@ Query memory backend:
podman exec ur_gia /venv/bin/python manage.py memory_search_query --user-id 1 --query "reply style" podman exec ur_gia /venv/bin/python manage.py memory_search_query --user-id 1 --query "reply style"
``` ```
Generate proposed memories from recent inbound messages:
```bash
podman exec ur_gia /venv/bin/python manage.py memory_suggest_from_messages --user-id 1 --limit-messages 300 --max-items 30
```
Run memory hygiene (expiry decay + contradiction queueing):
```bash
podman exec ur_gia /venv/bin/python manage.py memory_hygiene --user-id 1
```
Performance defaults now applied in GIA:
- Batched Manticore reindex writes (`REPLACE ... VALUES (...)` in chunks) for lower ingest latency.
- Cached table-ensure checks to avoid `CREATE TABLE IF NOT EXISTS` overhead on every query.
- Runtime table maintenance available through MCP (`FLUSH RAMCHUNK`, `OPTIMIZE TABLE`) for steady query responsiveness.
### F) MCP server for task + memory tooling (VS Code)
The workspace includes an MCP config at `/code/xf/.vscode/mcp.json` for server `manticore`.
It launches inside the running `ur_gia` container and forces:
- `MEMORY_SEARCH_BACKEND=manticore`
`MANTICORE_HTTP_URL` is read from the container environment (`stack.env` / app settings).
Start requirements first:
```bash
make run
./utilities/memory/manage_manticore_container.sh up
```
Then approve/enable the `manticore` MCP server in VS Code when prompted.
Optional ultra-light Rust MCP worker:
```bash
cd /code/xf/GIA
make mcp-rust-build
```
Then enable `manticore-rust-worker` in `/code/xf/.vscode/mcp.json`.
It is intentionally `disabled: true` by default so the existing Python MCP server remains the baseline.
### C) Signal or WhatsApp send failures ### C) Signal or WhatsApp send failures
- Verify account/link status in service pages. - Verify account/link status in service pages.
- Verify `ur` service is running. - Verify `ur` service is running.
- Inspect `ur` logs for transport-specific errors. - Inspect `ur` logs for transport-specific errors.
### G) XMPP reconnect loop in logs
- Confirm `XMPP_ADDRESS`, `XMPP_JID`, `XMPP_PORT`, and `XMPP_SECRET` are populated in `stack.env`.
- `XMPP_PORT` is parsed as an integer in settings; invalid values can cause repeated reconnect failures.
- The runtime now uses a single reconnect loop with exponential backoff to avoid overlapping reconnect churn.

View File

@@ -61,3 +61,6 @@ token:
echo "Container '$(APP_CONTAINER)' is not running. Start the stack first with 'make run'." >&2; \ echo "Container '$(APP_CONTAINER)' is not running. Start the stack first with 'make run'." >&2; \
exit 125; \ exit 125; \
fi fi
mcp-rust-build:
cd rust/manticore-mcp-worker && cargo build --release

View File

@@ -119,6 +119,7 @@ Core components:
- `core/clients/signal.py`, `core/clients/signalapi.py`: Signal event + REST transport handling. - `core/clients/signal.py`, `core/clients/signalapi.py`: Signal event + REST transport handling.
- `core/clients/whatsapp.py`: Neonize-backed runtime transport. - `core/clients/whatsapp.py`: Neonize-backed runtime transport.
- `core/clients/xmpp.py`: XMPP component bridge and media upload relay. - `core/clients/xmpp.py`: XMPP component bridge and media upload relay.
- `rust/manticore-mcp-worker`: optional ultra-light MCP frontend for direct Manticore status/query/maintenance.
- `core/views/compose.py`: Manual compose UX, polling/ws, send pipeline, media blob endpoint. - `core/views/compose.py`: Manual compose UX, polling/ws, send pipeline, media blob endpoint.
- `core/views/workspace.py`: AI workspace operations and insight surfaces. - `core/views/workspace.py`: AI workspace operations and insight surfaces.
- `core/views/osint.py`: Search/workspace OSINT interactions. - `core/views/osint.py`: Search/workspace OSINT interactions.

View File

@@ -1,16 +1,39 @@
from os import getenv from os import getenv
from urllib.parse import urlparse
trues = ("t", "true", "yes", "y", "1") trues = ("t", "true", "yes", "y", "1")
def _csv_env(name: str, default: str) -> list[str]:
return [item.strip() for item in getenv(name, default).split(",") if item.strip()]
# URLs # URLs
DOMAIN = getenv("DOMAIN", "example.com") DOMAIN = getenv("DOMAIN", "example.com")
URL = getenv("URL", f"https://{DOMAIN}") URL = getenv("URL", f"https://{DOMAIN}")
URL_HOST = urlparse(URL).hostname or ""
DEBUG = getenv("DEBUG", "false").lower() in trues
# Access control # Access control
ALLOWED_HOSTS = getenv("ALLOWED_HOSTS", f"localhost,{DOMAIN}").split(",") ALLOWED_HOSTS = _csv_env(
"ALLOWED_HOSTS",
",".join(
item
for item in (
"localhost",
"127.0.0.1",
DOMAIN,
URL_HOST,
)
if item
),
)
if DEBUG:
# Local/dev stack runs behind varying hostnames/tunnels.
ALLOWED_HOSTS = ["*"]
# CSRF # CSRF
CSRF_TRUSTED_ORIGINS = getenv("CSRF_TRUSTED_ORIGINS", URL).split(",") CSRF_TRUSTED_ORIGINS = _csv_env("CSRF_TRUSTED_ORIGINS", URL)
# Stripe # Stripe
BILLING_ENABLED = getenv("BILLING_ENABLED", "false").lower() in trues BILLING_ENABLED = getenv("BILLING_ENABLED", "false").lower() in trues
@@ -23,7 +46,10 @@ STRIPE_PUBLIC_API_KEY_PROD = getenv("STRIPE_PUBLIC_API_KEY_PROD", "")
STRIPE_ENDPOINT_SECRET = getenv("STRIPE_ENDPOINT_SECRET", "") STRIPE_ENDPOINT_SECRET = getenv("STRIPE_ENDPOINT_SECRET", "")
STATIC_ROOT = getenv("STATIC_ROOT", "") STATIC_ROOT = getenv("STATIC_ROOT", "")
SECRET_KEY = getenv("SECRET_KEY", "") SECRET_KEY = (getenv("SECRET_KEY", "") or "").strip()
if not SECRET_KEY:
# Keep local developer stacks usable when stack.env is uninitialized.
SECRET_KEY = "gia-dev-secret-key"
STRIPE_ADMIN_COUPON = getenv("STRIPE_ADMIN_COUPON", "") STRIPE_ADMIN_COUPON = getenv("STRIPE_ADMIN_COUPON", "")
@@ -33,7 +59,6 @@ LAGO_API_KEY = getenv("LAGO_API_KEY", "")
LAGO_ORG_ID = getenv("LAGO_ORG_ID", "") LAGO_ORG_ID = getenv("LAGO_ORG_ID", "")
LAGO_URL = getenv("LAGO_URL", "") LAGO_URL = getenv("LAGO_URL", "")
DEBUG = getenv("DEBUG", "false") in trues
PROFILER = getenv("PROFILER", "false") in trues PROFILER = getenv("PROFILER", "false") in trues
if DEBUG: if DEBUG:
@@ -62,7 +87,8 @@ INSTAGRAM_HTTP_URL = getenv("INSTAGRAM_HTTP_URL", "http://instagram:8080")
XMPP_ADDRESS = getenv("XMPP_ADDRESS") XMPP_ADDRESS = getenv("XMPP_ADDRESS")
XMPP_JID = getenv("XMPP_JID") XMPP_JID = getenv("XMPP_JID")
XMPP_PORT = getenv("XMPP_PORT") XMPP_USER_DOMAIN = getenv("XMPP_USER_DOMAIN", "")
XMPP_PORT = int(getenv("XMPP_PORT", "8888") or 8888)
XMPP_SECRET = getenv("XMPP_SECRET") XMPP_SECRET = getenv("XMPP_SECRET")
EVENT_LEDGER_DUAL_WRITE = getenv("EVENT_LEDGER_DUAL_WRITE", "false").lower() in trues EVENT_LEDGER_DUAL_WRITE = getenv("EVENT_LEDGER_DUAL_WRITE", "false").lower() in trues

View File

@@ -35,6 +35,7 @@ from core.views import (
osint, osint,
people, people,
personas, personas,
prosody,
queues, queues,
sessions, sessions,
signal, signal,
@@ -98,6 +99,11 @@ urlpatterns = [
system.MemorySearchQueryAPI.as_view(), system.MemorySearchQueryAPI.as_view(),
name="system_memory_search_query", name="system_memory_search_query",
), ),
path(
"internal/prosody/auth/",
prosody.ProsodyAuthBridge.as_view(),
name="prosody_auth_bridge",
),
path( path(
"settings/command-routing/", "settings/command-routing/",
automation.CommandRoutingSettings.as_view(), automation.CommandRoutingSettings.as_view(),

View File

@@ -0,0 +1,71 @@
# Manticore MCP Server (GIA)
This document describes the MCP server wired for task + memory operations in GIA.
## Server entrypoint
- Django management command: `python manage.py mcp_manticore_server`
- Python module: `core.mcp.server`
- Tool handlers: `core.mcp.tools`
## Rust worker frontend (optional)
For low-overhead direct Manticore operations, a Rust stdio MCP worker is included:
- Project: `rust/manticore-mcp-worker`
- Build: `make mcp-rust-build`
- Binary: `rust/manticore-mcp-worker/target/release/manticore-mcp-worker`
- VS Code server name: `manticore-rust-worker` (disabled by default in `/code/xf/.vscode/mcp.json`)
This worker exposes fast table/status/query/maintenance operations and can be enabled when you want a minimal MCP process in front of Manticore.
## VS Code wiring
Workspace config is in `/code/xf/.vscode/mcp.json`:
- Server name: `manticore`
- Launch method: `podman exec -i ur_gia /venv/bin/python manage.py mcp_manticore_server`
- Forced env:
- `MEMORY_SEARCH_BACKEND=manticore`
`MANTICORE_HTTP_URL` is inherited from container environment so each deployment can set the correct reachable address.
This allows MCP tool calls from VS Code to run against the live GIA container without requiring local Django dependencies.
## Implemented MCP tools
- `manticore.status`
- `manticore.query`
- `manticore.reindex`
- `memory.list`
- `memory.propose`
- `memory.pending`
- `memory.review`
- `memory.suggest_from_messages`
- `tasks.list`
- `tasks.search`
- `tasks.get`
- `tasks.events`
- `tasks.create_note`
- `tasks.link_artifact`
- `wiki.create_article`
- `wiki.update_article`
- `wiki.list`
- `wiki.get`
- `project.get_guidelines`
- `project.get_layout`
- `project.get_runbook`
- `docs.append_run_note`
`docs.append_run_note` appends markdown notes to `/tmp/gia-mcp-run-notes.md` by default (or a project path you pass explicitly).
All MCP tool invocations are audit-logged in `core_mcptoolauditlog` (`MCPToolAuditLog` model).
## Runtime notes
1. Ensure GIA services are running (`make run`).
2. Start Manticore container:
- `./utilities/memory/manage_manticore_container.sh up`
3. Optional initial index:
- `podman exec ur_gia /venv/bin/python manage.py memory_search_reindex --user-id <id> --statuses active`
4. In VS Code, approve/enabled the workspace MCP server when prompted.

View File

@@ -1,28 +0,0 @@
# Feature Plan: Personal AI Memory (Per Person)
## Goal
Store and manage long-lived person-specific memory for better continuity and assistant quality.
## Why This Fits GIA
- Person-centric data model already exists.
- Current approvals pattern can gate memory writes.
## Scope
- Memory entries: preferences, commitments, facts, communication style.
- Confidence/expiry fields and provenance links.
- Approval-required writes with edit/delete controls.
## Implementation
1. Add memory model linked to `Person` with source references.
2. Add extraction pipeline (suggested memory from messages).
3. Add approval queue for memory create/update/delete.
4. Add retrieval service for compose/AI workspace prompts.
5. Add memory hygiene jobs: stale decay, contradiction detection.
## Acceptance Criteria
- Every memory has provenance and last-verified timestamp.
- Unapproved memory never influences generated output.
- Users can inspect, edit, and purge memory entries.
## Out of Scope
- Cross-user shared memory graph.

View File

@@ -1,60 +0,0 @@
# Feature Plan: MCP Server for Tasks + Wiki/Knowledgebase
## Goal
Create an MCP server that allows agents to:
- read/write task context,
- create/update knowledgebase/wiki artifacts during task execution,
- retrieve coding guidelines/project layout for continuity between runs.
## Why This Fits GIA
- Tasks, approvals, and command-driven automation already exist.
- This provides durable agent memory and operator visibility of "what changed, why, and how to use it".
## Scope
- MCP server with authenticated tools for:
- `tasks.list`, `tasks.get`, `tasks.search`, `tasks.events`
- `tasks.create_note`, `tasks.link_artifact`
- `wiki.create_article`, `wiki.update_article`, `wiki.list`, `wiki.get`
- `project.get_layout`, `project.get_guidelines`, `project.get_runbook`
- Permission model tied to user and chat/project scope.
- Audit log for all MCP tool calls.
## Proposed Data Additions
- `KnowledgeArticle` (title, slug, markdown, tags, status, owner, related_task).
- `KnowledgeRevision` (article, revision, author/tool, diff, created_at).
- Optional `TaskArtifactLink` (task, kind, uri/path, summary).
## Implementation
1. Build MCP server process (Python) with JSON-RPC transport and token auth.
2. Implement task read tools against existing task models/views.
3. Implement wiki CRUD tools with revision history.
4. Implement project context tools that read:
- `AGENTS.md`,
- coding standards docs,
- key architecture docs.
5. Add agent-run convention:
- on task start: fetch task + related wiki + guidelines,
- during run: append execution notes,
- on completion: publish "what was done / how to use" article and link to task.
6. Add web UI page for knowledge articles and task-linked docs.
7. Add approvals for destructive knowledge actions (delete/overwrite).
## Acceptance Criteria
- Agent can fetch full task context in one MCP call sequence.
- Agent can publish/update wiki pages tied to tasks.
- Operators can open a task and see linked implementation notes + usage docs.
- MCP actions are fully auditable and scoped by user permissions.
## Security and Guardrails
- Tool-level RBAC and per-user scoping.
- Redact secrets from returned context.
- Rate limits and request signing for external agent clients.
## Rollout
1. Read-only task tools.
2. Wiki write tools with revisioning.
3. Task artifact linking + UI surfaces.
4. Agent workflow templates and docs.
## Out of Scope
- Autonomous code execution from MCP itself.

View File

@@ -1,16 +0,0 @@
Perfect, so it all works?
the message saying "the recipient does the same" has been reacted to with a heart but it is not shown in web compose
I also sent an erronrous message, a literal reply to a message that i said i would react to with a heart. the message contained a heart emoji, so it is a reply with a heart and not a reaction
after some confusion I deleted this message
can deleted messages be noted and collected for storage in a deleted message tab in compose that lists what each recipient deleted and when
ensure message edit history is shown, and preserved if the message is deleted, seamlessly reusing the navigation code to preserve a unified interface
work on implementing edit message tracking and delete message indications
consider how to implement

View File

@@ -1,2 +0,0 @@
# 14) Run security audit using artifacts/1-initial.json. Generated using ship-safe.
https://github.com/asamassekou10/ship-safe

View File

@@ -1,19 +0,0 @@
No Tasks Yet
This group has no derived tasks yet. To start populating this view:
Open Task Settings and confirm this chat is mapped under Group Mapping.
Send task-like messages in this group, for example: task: ship v1, todo: write tests, please review PR.
Mark completion explicitly with a phrase + reference, for example: done #12, completed #12, fixed #12.
Refresh this page; new derived tasks and events should appear automatically.
task settings sound complicated, make them simpler
--
# https://gia.zm.is/settings/system/
assume the user cannot access the log
Use a trace id from the dropdown (recent traces), Event Ledger Smoke `sample[].trace_id`, or UR logs.

View File

@@ -1,34 +0,0 @@
# Feature Plan: Agent Knowledge Memory Foundation (Pre-11/12)
## Goal
Establish a scalable, queryable memory substrate so wiki and MCP features can rely on fast retrieval instead of markdown-file scans.
## Why This Comes Before 11/12
- Plan 11 (personal memory) needs performant retrieval and indexing guarantees.
- Plan 12 (MCP wiki/tools) needs a stable backend abstraction independent of UI and tool transport.
## Scope
- Pluggable memory search backend interface.
- Default Django backend for zero-infra operation.
- Optional Manticore backend for scalable full-text/vector-ready indexing.
- Reindex + query operational commands.
- System diagnostics endpoints for backend status and query inspection.
## Implementation Slice
1. Add `core/memory/search_backend.py` abstraction and backends.
2. Add `memory_search_reindex` and `memory_search_query` management commands.
3. Add system APIs:
- backend status
- memory query
4. Add lightweight Podman utility script for Manticore runtime.
5. Add tests for diagnostics and query behavior.
## Acceptance Criteria
- Memory retrieval works with `MEMORY_SEARCH_BACKEND=django` out of the box.
- Switching to `MEMORY_SEARCH_BACKEND=manticore` requires only env/config + container startup.
- Operators can verify backend health and query output from system settings.
## Out of Scope
- Full wiki article model/UI.
- Full MCP server process/tooling.
- Embedding generation pipeline (next slice after backend foundation).

View File

@@ -1,25 +0,0 @@
# Memory Backend Evaluation: Manticore vs Alternatives
## Decision Summary
- **Recommended now:** Manticore for indexed text retrieval and future vector layering.
- **Default fallback:** Django/ORM backend for zero-infra environments.
- **Revisit later:** dedicated vector DB only if recall quality or ANN latency requires it.
## Why Manticore Fits This Stage
- Already present in adjacent infra and codebase history.
- Runs well as a small standalone container with low operational complexity.
- Supports SQL-like querying and fast full-text retrieval for agent memory/wiki content.
- Lets us keep one retrieval abstraction while deferring embedding complexity.
## Tradeoff Notes
- Manticore-first gives immediate performance over markdown scans.
- For advanced ANN/vector-only workloads, Qdrant/pgvector/Weaviate may outperform with less custom shaping.
- A hybrid approach remains possible:
- Manticore for lexical + metadata filtering,
- optional vector store for semantic recall.
## Practical Rollout
1. Start with `MEMORY_SEARCH_BACKEND=django` and verify API/command workflows.
2. Start Manticore container and switch to `MEMORY_SEARCH_BACKEND=manticore`.
3. Run reindex and validate query latency/quality on real agent workflows.
4. Add embedding pipeline only after baseline lexical retrieval is stable.

View File

@@ -0,0 +1,95 @@
# Feature Plan: Person Model Enrichment (Non-LLM First)
## Goal
Populate `Person` fields from existing message history without spending OpenAI tokens by default:
- `summary`
- `profile`
- `revealed`
- `likes`
- `dislikes`
- `sentiment`
- `timezone`
- `last_interaction`
## Problem We Are Solving
- We have high-volume message data but limited durable person intelligence.
- LLM analysis is expensive for continuous/background processing.
- We need fast, deterministic extraction first, with optional semantic ranking.
## Design Decisions
1. Config scope:
- global defaults
- optional group-level overrides
- per-user overrides
2. Resolution order:
- `user > group > global`
3. Global toggle:
- hard kill-switch (`PERSON_ENRICHMENT_ENABLED`)
4. Per-user/group controls:
- enable/disable enrichment
- write mode (`proposal_required` or `direct`)
- confidence threshold
- max messages scanned per run
- semantic-ranking toggle
## Proposed Data Additions
- `PersonEnrichmentSettings`:
- scope fields (`user`, optional `group`)
- toggle/threshold/runtime limits
- `PersonSignal`:
- normalized extracted clue
- source references (message ids/events)
- confidence and detector name
- `PersonUpdateProposal`:
- pending/approved/rejected person field updates
- reason and provenance
- Optional `PersonFieldRevision`:
- before/after snapshots for auditability
## Processing Flow
1. Select message window:
- recent inbound/outbound messages per person/service
- bounded by configurable caps
2. Fast extraction:
- deterministic rules/regex for:
- timezone cues
- explicit likes/dislikes
- self-revealed facts
- interaction-derived sentiment hints
3. Semantic ranking (optional):
- use Manticore-backed similarity search for classifier labels
- rank candidate signals; do not call OpenAI in default path
4. Signal aggregation:
- merge repeated evidence
- decay stale evidence
- detect contradictions
5. Apply update:
- `proposal_required`: create `PersonUpdateProposal`
- `direct`: write only above confidence threshold and with no conflict
6. Persist audit trail:
- record detector/classifier source and exact message provenance
## Field-Specific Policy
- `summary/profile`: generated from stable high-confidence aggregates only.
- `revealed`: only explicit self-disclosures.
- `likes/dislikes`: require explicit statement or repeated pattern.
- `sentiment`: rolling value with recency decay; never absolute truth label.
- `timezone`: explicit declaration preferred; behavioral inference secondary.
- `last_interaction`: deterministic from most recent message timestamps.
## Rollout
1. Schema and settings models.
2. Deterministic extractor pipeline and commands.
3. Proposal queue + review flow.
4. Optional Manticore semantic ranking layer.
5. Backfill job for existing persons with safe rate limits.
## Acceptance Criteria
- Default enrichment path runs with zero OpenAI usage.
- Person updates are traceable to concrete message evidence.
- Config hierarchy behaves predictably (`user > group > global`).
- Operators can switch between proposal and direct write modes per scope.
## Out of Scope
- Cross-user shared person graph.
- Autonomous LLM-generated profile writing as default.

View File

@@ -193,6 +193,85 @@ def _extract_signal_reaction(envelope):
} }
def _extract_signal_edit(envelope):
paths = [
("dataMessage", "editMessage"),
("syncMessage", "sentMessage", "editMessage"),
("syncMessage", "editMessage"),
]
node = None
for path in paths:
candidate = _get_nested(envelope, path)
if isinstance(candidate, dict):
node = candidate
break
if not isinstance(node, dict):
return None
target_ts = node.get("targetSentTimestamp")
if target_ts is None:
target_ts = node.get("targetTimestamp")
if target_ts is None:
target_ts = node.get("targetTs")
try:
target_ts = int(target_ts)
except Exception:
target_ts = 0
if target_ts <= 0:
return None
data_message = node.get("dataMessage") or node.get("message") or {}
new_text = ""
if isinstance(data_message, dict):
for key in ("message", "text", "body", "caption"):
value = str(data_message.get(key) or "").strip()
if value:
new_text = value
break
if not new_text:
new_text = str(node.get("message") or "").strip()
if not new_text:
return None
return {
"target_ts": target_ts,
"new_text": new_text,
"raw": dict(node),
}
def _extract_signal_delete(envelope):
paths = [
("dataMessage", "delete"),
("dataMessage", "remoteDelete"),
("syncMessage", "sentMessage", "delete"),
("syncMessage", "delete"),
]
node = None
for path in paths:
candidate = _get_nested(envelope, path)
if isinstance(candidate, dict):
node = candidate
break
if not isinstance(node, dict):
return None
target_ts = node.get("targetSentTimestamp")
if target_ts is None:
target_ts = node.get("targetTimestamp")
if target_ts is None:
target_ts = node.get("targetTs")
try:
target_ts = int(target_ts)
except Exception:
target_ts = 0
if target_ts <= 0:
return None
return {
"target_ts": target_ts,
"raw": dict(node),
}
def _extract_signal_text(raw_payload, default_text=""): def _extract_signal_text(raw_payload, default_text=""):
text = str(default_text or "").strip() text = str(default_text or "").strip()
if text: if text:
@@ -1299,6 +1378,8 @@ class SignalClient(ClientBase):
destination_number, destination_number,
) )
reaction_payload = _extract_signal_reaction(envelope) reaction_payload = _extract_signal_reaction(envelope)
edit_payload = _extract_signal_edit(envelope)
delete_payload = _extract_signal_delete(envelope)
if identifiers and isinstance(reaction_payload, dict): if identifiers and isinstance(reaction_payload, dict):
source_uuid = str( source_uuid = str(
envelope.get("sourceUuid") or envelope.get("source") or "" envelope.get("sourceUuid") or envelope.get("source") or ""
@@ -1343,6 +1424,61 @@ class SignalClient(ClientBase):
self.log.warning( self.log.warning(
"signal raw sync reaction relay to XMPP failed: %s", exc "signal raw sync reaction relay to XMPP failed: %s", exc
) )
if identifiers and isinstance(edit_payload, dict):
source_uuid = str(
envelope.get("sourceUuid") or envelope.get("source") or ""
).strip()
source_number = str(envelope.get("sourceNumber") or "").strip()
for identifier in identifiers:
try:
await history.apply_message_edit(
identifier.user,
identifier,
target_message_id="",
target_ts=int(edit_payload.get("target_ts") or 0),
new_text=str(edit_payload.get("new_text") or ""),
source_service="signal",
actor=(source_uuid or source_number or ""),
payload=edit_payload.get("raw") or {},
)
except Exception as exc:
self.log.warning(
"signal raw sync edit history apply failed: %s", exc
)
transport.update_runtime_state(
self.service,
last_inbound_ok_ts=int(time.time() * 1000),
last_inbound_exception_type="",
last_inbound_exception_message="",
)
return
if identifiers and isinstance(delete_payload, dict):
source_uuid = str(
envelope.get("sourceUuid") or envelope.get("source") or ""
).strip()
source_number = str(envelope.get("sourceNumber") or "").strip()
for identifier in identifiers:
try:
await history.apply_message_delete(
identifier.user,
identifier,
target_message_id="",
target_ts=int(delete_payload.get("target_ts") or 0),
source_service="signal",
actor=(source_uuid or source_number or ""),
payload=delete_payload.get("raw") or {},
)
except Exception as exc:
self.log.warning(
"signal raw sync delete history apply failed: %s", exc
)
transport.update_runtime_state(
self.service,
last_inbound_ok_ts=int(time.time() * 1000),
last_inbound_exception_type="",
last_inbound_exception_message="",
)
return
if identifiers and text: if identifiers and text:
ts_raw = ( ts_raw = (
sync_sent_message.get("timestamp") sync_sent_message.get("timestamp")
@@ -1427,8 +1563,14 @@ class SignalClient(ClientBase):
identifiers = await self._resolve_signal_identifiers(source_uuid, source_number) identifiers = await self._resolve_signal_identifiers(source_uuid, source_number)
reaction_payload = _extract_signal_reaction(envelope) reaction_payload = _extract_signal_reaction(envelope)
if (not identifiers) and isinstance(reaction_payload, dict): edit_payload = _extract_signal_edit(envelope)
# Sync reactions from our own linked device can arrive with source=our delete_payload = _extract_signal_delete(envelope)
if (not identifiers) and (
isinstance(reaction_payload, dict)
or isinstance(edit_payload, dict)
or isinstance(delete_payload, dict)
):
# Sync events from our own linked device can arrive with source=our
# account and destination=<contact>. Resolve by destination as fallback. # account and destination=<contact>. Resolve by destination as fallback.
destination_uuid = str( destination_uuid = str(
envelope.get("destinationServiceId") envelope.get("destinationServiceId")
@@ -1497,6 +1639,49 @@ class SignalClient(ClientBase):
last_inbound_exception_message="", last_inbound_exception_message="",
) )
return return
if isinstance(edit_payload, dict):
for identifier in identifiers:
try:
await history.apply_message_edit(
identifier.user,
identifier,
target_message_id="",
target_ts=int(edit_payload.get("target_ts") or 0),
new_text=str(edit_payload.get("new_text") or ""),
source_service="signal",
actor=(source_uuid or source_number or ""),
payload=edit_payload.get("raw") or {},
)
except Exception as exc:
self.log.warning("signal raw edit history apply failed: %s", exc)
transport.update_runtime_state(
self.service,
last_inbound_ok_ts=int(time.time() * 1000),
last_inbound_exception_type="",
last_inbound_exception_message="",
)
return
if isinstance(delete_payload, dict):
for identifier in identifiers:
try:
await history.apply_message_delete(
identifier.user,
identifier,
target_message_id="",
target_ts=int(delete_payload.get("target_ts") or 0),
source_service="signal",
actor=(source_uuid or source_number or ""),
payload=delete_payload.get("raw") or {},
)
except Exception as exc:
self.log.warning("signal raw delete history apply failed: %s", exc)
transport.update_runtime_state(
self.service,
last_inbound_ok_ts=int(time.time() * 1000),
last_inbound_exception_type="",
last_inbound_exception_message="",
)
return
text = _extract_signal_text(payload, str(data_message.get("message") or "").strip()) text = _extract_signal_text(payload, str(data_message.get("message") or "").strip())
if not text: if not text:

View File

@@ -776,6 +776,14 @@ async def send_message_raw(
Unified outbound send path used by models/views/UR. Unified outbound send path used by models/views/UR.
""" """
service_key = _service_key(service) service_key = _service_key(service)
if _capability_checks_enabled() and not supports(service_key, "send"):
reason = unsupported_reason(service_key, "send")
log.warning(
"capability-check failed service=%s feature=send: %s",
service_key,
reason,
)
return False
if service_key == "signal": if service_key == "signal":
prepared_attachments = await prepare_outbound_attachments( prepared_attachments = await prepare_outbound_attachments(
service_key, attachments or [] service_key, attachments or []

View File

@@ -141,10 +141,14 @@ class XMPPComponent(ComponentXMPP):
self._reconnect_task = None self._reconnect_task = None
self._reconnect_delay_seconds = 1.0 self._reconnect_delay_seconds = 1.0
self._reconnect_delay_max_seconds = 30.0 self._reconnect_delay_max_seconds = 30.0
self._connect_inflight = False
self._session_live = False
self.log = logs.get_logger("XMPP") self.log = logs.get_logger("XMPP")
super().__init__(jid, secret, server, port) super().__init__(jid, secret, server, port)
# Use one reconnect strategy (our backoff loop) to avoid reconnect churn.
self.auto_reconnect = False
# Register chat state plugins # Register chat state plugins
register_stanza_plugin(Message, Active) register_stanza_plugin(Message, Active)
register_stanza_plugin(Message, Composing) register_stanza_plugin(Message, Composing)
@@ -178,6 +182,21 @@ class XMPPComponent(ComponentXMPP):
self.add_event_handler("chatstate_inactive", self.on_chatstate_inactive) self.add_event_handler("chatstate_inactive", self.on_chatstate_inactive)
self.add_event_handler("chatstate_gone", self.on_chatstate_gone) self.add_event_handler("chatstate_gone", self.on_chatstate_gone)
def _user_xmpp_domain(self):
domain = str(getattr(settings, "XMPP_USER_DOMAIN", "") or "").strip()
if domain:
return domain
component_jid = str(getattr(settings, "XMPP_JID", "") or "").strip()
if "." in component_jid:
return component_jid.split(".", 1)[1]
configured_domain = str(getattr(settings, "DOMAIN", "") or "").strip()
if configured_domain:
return configured_domain
return str(getattr(settings, "XMPP_ADDRESS", "") or "").strip()
def _user_jid(self, username):
return f"{username}@{self._user_xmpp_domain()}"
async def enable_carbons(self): async def enable_carbons(self):
"""Enable XMPP Message Carbons (XEP-0280)""" """Enable XMPP Message Carbons (XEP-0280)"""
try: try:
@@ -827,25 +846,33 @@ class XMPPComponent(ComponentXMPP):
async def session_start(self, *args): async def session_start(self, *args):
self.log.info("XMPP session started") self.log.info("XMPP session started")
self._session_live = True
self._connect_inflight = False
self._reconnect_delay_seconds = 1.0 self._reconnect_delay_seconds = 1.0
if self._reconnect_task and not self._reconnect_task.done(): if self._reconnect_task and not self._reconnect_task.done():
self._reconnect_task.cancel() self._reconnect_task.cancel()
self._reconnect_task = None self._reconnect_task = None
await self.enable_carbons() # This client connects as an external component, not a user client;
# XEP-0280 (carbons) is client-scoped and not valid here.
self.log.debug("Skipping carbons enable for component session")
async def _reconnect_loop(self): async def _reconnect_loop(self):
try: try:
while True: while True:
delay = float(self._reconnect_delay_seconds) delay = float(self._reconnect_delay_seconds)
await asyncio.sleep(delay) await asyncio.sleep(delay)
if self._session_live or self._connect_inflight:
return
try: try:
self.log.info("XMPP reconnect attempt delay_s=%.1f", delay) self.log.info("XMPP reconnect attempt delay_s=%.1f", delay)
self._connect_inflight = True
connected = self.connect() connected = self.connect()
if connected is False: if connected is False:
raise RuntimeError("connect returned false") raise RuntimeError("connect returned false")
return return
except Exception as exc: except Exception as exc:
self.log.warning("XMPP reconnect attempt failed: %s", exc) self.log.warning("XMPP reconnect attempt failed: %s", exc)
self._connect_inflight = False
self._reconnect_delay_seconds = min( self._reconnect_delay_seconds = min(
self._reconnect_delay_max_seconds, self._reconnect_delay_max_seconds,
max(1.0, float(self._reconnect_delay_seconds) * 2.0), max(1.0, float(self._reconnect_delay_seconds) * 2.0),
@@ -853,6 +880,8 @@ class XMPPComponent(ComponentXMPP):
except asyncio.CancelledError: except asyncio.CancelledError:
return return
finally: finally:
if not self._session_live:
self._connect_inflight = False
self._reconnect_task = None self._reconnect_task = None
def _schedule_reconnect(self): def _schedule_reconnect(self):
@@ -864,6 +893,8 @@ class XMPPComponent(ComponentXMPP):
""" """
Handles XMPP disconnection and triggers a reconnect loop. Handles XMPP disconnection and triggers a reconnect loop.
""" """
self._session_live = False
self._connect_inflight = False
self.log.warning( self.log.warning(
"XMPP disconnected, scheduling reconnect attempt in %.1fs", "XMPP disconnected, scheduling reconnect attempt in %.1fs",
float(self._reconnect_delay_seconds), float(self._reconnect_delay_seconds),
@@ -1576,7 +1607,7 @@ class XMPPComponent(ComponentXMPP):
f"{person_identifier.person.name.lower()}|" f"{person_identifier.person.name.lower()}|"
f"{person_identifier.service}@{settings.XMPP_JID}" f"{person_identifier.service}@{settings.XMPP_JID}"
) )
recipient_jid = f"{user.username}@{settings.XMPP_ADDRESS}" recipient_jid = self._user_jid(user.username)
await self.send_xmpp_reaction( await self.send_xmpp_reaction(
recipient_jid, recipient_jid,
sender_jid, sender_jid,
@@ -1625,7 +1656,7 @@ class XMPPComponent(ComponentXMPP):
f"{person_identifier.person.name.lower()}|" f"{person_identifier.person.name.lower()}|"
f"{person_identifier.service}@{settings.XMPP_JID}" f"{person_identifier.service}@{settings.XMPP_JID}"
) )
recipient_jid = f"{user.username}@{settings.XMPP_ADDRESS}" recipient_jid = self._user_jid(user.username)
await self.send_chat_state(recipient_jid, sender_jid, started) await self.send_chat_state(recipient_jid, sender_jid, started)
async def send_from_external( async def send_from_external(
@@ -1640,7 +1671,7 @@ class XMPPComponent(ComponentXMPP):
"""Handles sending XMPP messages with text and attachments.""" """Handles sending XMPP messages with text and attachments."""
sender_jid = f"{person_identifier.person.name.lower()}|{person_identifier.service}@{settings.XMPP_JID}" sender_jid = f"{person_identifier.person.name.lower()}|{person_identifier.service}@{settings.XMPP_JID}"
recipient_jid = f"{person_identifier.user.username}@{settings.XMPP_ADDRESS}" recipient_jid = self._user_jid(person_identifier.user.username)
if is_outgoing_message: if is_outgoing_message:
xmpp_id = await self.send_xmpp_message( xmpp_id = await self.send_xmpp_message(
recipient_jid, recipient_jid,
@@ -1767,22 +1798,45 @@ class XMPPComponent(ComponentXMPP):
class XMPPClient(ClientBase): class XMPPClient(ClientBase):
def __init__(self, ur, *args, **kwargs): def __init__(self, ur, *args, **kwargs):
super().__init__(ur, *args, **kwargs) super().__init__(ur, *args, **kwargs)
self.client = XMPPComponent( self._enabled = True
ur, self.client = None
jid=settings.XMPP_JID, jid = str(getattr(settings, "XMPP_JID", "") or "").strip()
secret=settings.XMPP_SECRET, secret = str(getattr(settings, "XMPP_SECRET", "") or "").strip()
server=settings.XMPP_ADDRESS, server = str(getattr(settings, "XMPP_ADDRESS", "") or "").strip()
port=settings.XMPP_PORT, port = int(getattr(settings, "XMPP_PORT", 8888) or 8888)
) missing = []
if not jid:
missing.append("XMPP_JID")
if not secret:
missing.append("XMPP_SECRET")
if not server:
missing.append("XMPP_ADDRESS")
if missing:
self._enabled = False
self.log.warning(
"XMPP client disabled due to missing configuration: %s",
", ".join(missing),
)
self.client.register_plugin("xep_0030") # Service Discovery if self._enabled:
self.client.register_plugin("xep_0004") # Data Forms self.client = XMPPComponent(
self.client.register_plugin("xep_0060") # PubSub ur,
self.client.register_plugin("xep_0199") # XMPP Ping jid=jid,
self.client.register_plugin("xep_0085") # Chat State Notifications secret=secret,
self.client.register_plugin("xep_0363") # HTTP File Upload server=server,
port=port,
)
self.client.register_plugin("xep_0030") # Service Discovery
self.client.register_plugin("xep_0004") # Data Forms
self.client.register_plugin("xep_0060") # PubSub
self.client.register_plugin("xep_0199") # XMPP Ping
self.client.register_plugin("xep_0085") # Chat State Notifications
self.client.register_plugin("xep_0363") # HTTP File Upload
def start(self): def start(self):
if not self._enabled or self.client is None:
return
self.log.info("XMPP client starting...") self.log.info("XMPP client starting...")
# ensure slixmpp uses the same asyncio loop as the router # ensure slixmpp uses the same asyncio loop as the router
@@ -1791,7 +1845,11 @@ class XMPPClient(ClientBase):
self.client.connect() self.client.connect()
async def start_typing_for_person(self, user, person_identifier): async def start_typing_for_person(self, user, person_identifier):
if self.client is None:
return
await self.client.send_typing_for_person(user, person_identifier, True) await self.client.send_typing_for_person(user, person_identifier, True)
async def stop_typing_for_person(self, user, person_identifier): async def stop_typing_for_person(self, user, person_identifier):
if self.client is None:
return
await self.client.send_typing_for_person(user, person_identifier, False) await self.client.send_typing_for_person(user, person_identifier, False)

View File

@@ -0,0 +1,11 @@
from django.core.management.base import BaseCommand
from core.mcp.server import run_stdio_server
class Command(BaseCommand):
help = "Run GIA MCP stdio server with manticore/task/documentation tools."
def handle(self, *args, **options):
_ = args, options
run_stdio_server()

View File

@@ -0,0 +1,40 @@
from __future__ import annotations
import json
from django.core.management.base import BaseCommand
from core.memory.pipeline import run_memory_hygiene
class Command(BaseCommand):
help = "Run memory hygiene checks (stale decay + contradiction queueing)."
def add_arguments(self, parser):
parser.add_argument("--user-id", default="")
parser.add_argument("--dry-run", action="store_true", default=False)
parser.add_argument("--json", action="store_true", default=False)
def handle(self, *args, **options):
user_id_raw = str(options.get("user_id") or "").strip()
dry_run = bool(options.get("dry_run"))
as_json = bool(options.get("json"))
user_id = int(user_id_raw) if user_id_raw else None
result = run_memory_hygiene(user_id=user_id, dry_run=dry_run)
payload = {
"user_id": user_id,
"dry_run": dry_run,
"result": result,
}
if as_json:
self.stdout.write(json.dumps(payload, indent=2, sort_keys=True))
return
self.stdout.write(
"memory-hygiene "
f"user={user_id if user_id is not None else '-'} "
f"dry_run={'yes' if dry_run else 'no'} "
f"expired={int(result.get('expired') or 0)} "
f"contradictions={int(result.get('contradictions') or 0)} "
f"queued={int(result.get('queued_requests') or 0)}"
)

View File

@@ -0,0 +1,46 @@
from __future__ import annotations
import json
from django.core.management.base import BaseCommand, CommandError
from core.memory.pipeline import suggest_memories_from_recent_messages
class Command(BaseCommand):
help = "Suggest proposed MemoryItem rows from recent inbound message text."
def add_arguments(self, parser):
parser.add_argument("--user-id", required=True)
parser.add_argument("--limit-messages", type=int, default=300)
parser.add_argument("--max-items", type=int, default=30)
parser.add_argument("--json", action="store_true", default=False)
def handle(self, *args, **options):
user_id_raw = str(options.get("user_id") or "").strip()
if not user_id_raw:
raise CommandError("--user-id is required")
limit_messages = max(1, int(options.get("limit_messages") or 300))
max_items = max(1, int(options.get("max_items") or 30))
as_json = bool(options.get("json"))
result = suggest_memories_from_recent_messages(
user_id=int(user_id_raw),
limit_messages=limit_messages,
max_items=max_items,
)
payload = {
"user_id": int(user_id_raw),
"limit_messages": limit_messages,
"max_items": max_items,
"result": result,
}
if as_json:
self.stdout.write(json.dumps(payload, indent=2, sort_keys=True))
return
self.stdout.write(
"memory-suggest-from-messages "
f"user={payload['user_id']} "
f"scanned={int(result.get('scanned') or 0)} "
f"queued={int(result.get('queued') or 0)}"
)

3
core/mcp/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from .server import run_stdio_server
__all__ = ["run_stdio_server"]

149
core/mcp/server.py Normal file
View File

@@ -0,0 +1,149 @@
from __future__ import annotations
import json
import os
import sys
from typing import Any
import django
from core.mcp.tools import execute_tool, format_tool_content, tool_specs
from core.util import logs
log = logs.get_logger("mcp-server")
_compat_newline_mode = False
def _setup_django() -> None:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
django.setup()
def _response(msg_id: Any, result: dict[str, Any]) -> dict[str, Any]:
return {"jsonrpc": "2.0", "id": msg_id, "result": result}
def _error(msg_id: Any, code: int, message: str) -> dict[str, Any]:
return {"jsonrpc": "2.0", "id": msg_id, "error": {"code": code, "message": message}}
def _read_message() -> dict[str, Any] | None:
global _compat_newline_mode
headers: dict[str, str] = {}
pending_body = b""
while True:
line = sys.stdin.buffer.readline()
if not line:
return None
if not headers and line.lstrip().startswith((b"{", b"[")):
_compat_newline_mode = True
return json.loads(line.decode("utf-8").strip())
sep = None
if b"\r\n\r\n" in line:
sep = b"\r\n\r\n"
elif b"\n\n" in line:
sep = b"\n\n"
if sep is not None:
header_line, tail = line.split(sep, 1)
pending_body = tail
else:
header_line = line
if header_line in (b"\r\n", b"\n"):
break
decoded = header_line.decode("utf-8").strip()
if ":" in decoded:
key, value = decoded.split(":", 1)
headers[key.strip().lower()] = value.strip()
if sep is not None:
break
length_raw = headers.get("content-length")
if not length_raw:
if not pending_body:
pending_body = sys.stdin.buffer.readline()
if not pending_body:
return None
_compat_newline_mode = True
return json.loads(pending_body.decode("utf-8").strip())
length = int(length_raw)
body = pending_body
if len(body) < length:
body += sys.stdin.buffer.read(length - len(body))
body = body[:length]
if not body:
return None
return json.loads(body.decode("utf-8"))
def _write_message(payload: dict[str, Any]) -> None:
raw_json = json.dumps(payload, separators=(",", ":"), ensure_ascii=False)
if _compat_newline_mode:
sys.stdout.buffer.write((raw_json + "\n").encode("utf-8"))
else:
raw = raw_json.encode("utf-8")
sys.stdout.buffer.write(f"Content-Length: {len(raw)}\r\n\r\n".encode("utf-8"))
sys.stdout.buffer.write(raw)
sys.stdout.buffer.flush()
def _handle_message(message: dict[str, Any]) -> dict[str, Any] | None:
msg_id = message.get("id")
method = str(message.get("method") or "")
params = message.get("params") or {}
if method == "notifications/initialized":
return None
if method == "initialize":
return _response(
msg_id,
{
"protocolVersion": "2025-06-18",
"serverInfo": {"name": "gia-manticore-mcp", "version": "0.1.0"},
"capabilities": {"tools": {}},
},
)
if method == "ping":
return _response(msg_id, {})
if method == "tools/list":
return _response(msg_id, {"tools": tool_specs()})
if method == "tools/call":
name = str(params.get("name") or "").strip()
arguments = params.get("arguments") or {}
try:
payload = execute_tool(name, arguments)
return _response(msg_id, format_tool_content(payload))
except Exception as exc:
log.warning("tool call failed name=%s err=%s", name, exc)
return _response(
msg_id,
{
"isError": True,
"content": [{"type": "text", "text": json.dumps({"error": str(exc)})}],
},
)
return _error(msg_id, -32601, f"Method not found: {method}")
def run_stdio_server() -> None:
_setup_django()
while True:
message = _read_message()
if message is None:
return
try:
response = _handle_message(message)
if response is not None:
_write_message(response)
except Exception as exc:
msg_id = message.get("id")
_write_message(_error(msg_id, -32000, str(exc)))
if __name__ == "__main__":
run_stdio_server()

1220
core/mcp/tools.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,4 @@
from .search_backend import get_memory_search_backend from .search_backend import get_memory_search_backend
from .retrieval import retrieve_memories_for_prompt
__all__ = ["get_memory_search_backend"] __all__ = ["get_memory_search_backend", "retrieve_memories_for_prompt"]

419
core/memory/pipeline.py Normal file
View File

@@ -0,0 +1,419 @@
from __future__ import annotations
import re
from datetime import timezone as dt_timezone
from typing import Any
from django.db import transaction
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from core.models import (
MemoryChangeRequest,
MemoryItem,
MemorySourceReference,
MessageEvent,
WorkspaceConversation,
)
from core.util import logs
log = logs.get_logger("memory-pipeline")
_LIKE_RE = re.compile(
r"\b(?:i (?:like|love|prefer)|my favorite)\s+(?P<value>[^.!?]{2,120})",
re.IGNORECASE,
)
_DISLIKE_RE = re.compile(
r"\b(?:i (?:dislike|hate|avoid)|i don't like)\s+(?P<value>[^.!?]{2,120})",
re.IGNORECASE,
)
_STYLE_RE = re.compile(
r"\b(?:please|pls)\s+(?P<value>[^.!?]{3,120})",
re.IGNORECASE,
)
def _clean_value(value: str) -> str:
return " ".join(str(value or "").strip().split())
def extract_memory_candidates(text: str) -> list[dict[str, Any]]:
source = str(text or "").strip()
if not source:
return []
candidates: list[dict[str, Any]] = []
for regex, field, kind, confidence in (
(_LIKE_RE, "likes", "fact", 0.68),
(_DISLIKE_RE, "dislikes", "fact", 0.68),
(_STYLE_RE, "communication_style", "state", 0.52),
):
for match in regex.finditer(source):
value = _clean_value(match.group("value"))
if len(value) < 3:
continue
candidates.append(
{
"memory_kind": kind,
"field": field,
"text": value,
"confidence_score": confidence,
}
)
return candidates
def _existing_fingerprints(user_id: int) -> set[tuple[str, str, str, str]]:
items = MemoryItem.objects.filter(user_id=int(user_id)).only(
"memory_kind",
"conversation_id",
"person_id",
"content",
)
fingerprints = set()
for item in items:
content = item.content or {}
field = str(content.get("field") or "").strip().lower()
text = _clean_value(str(content.get("text") or "")).lower()
fingerprints.add(
(
str(item.memory_kind or "").strip().lower(),
str(item.conversation_id or "").strip(),
str(item.person_id or "").strip(),
f"{field}:{text}",
)
)
return fingerprints
def _infer_single_person_id(conversation: WorkspaceConversation) -> str:
participant_ids = list(conversation.participants.values_list("id", flat=True)[:2])
if len(participant_ids) != 1:
return ""
return str(participant_ids[0] or "")
@transaction.atomic
def suggest_memories_from_recent_messages(
*,
user_id: int,
limit_messages: int = 300,
max_items: int = 30,
) -> dict[str, int]:
safe_limit_messages = max(1, min(2000, int(limit_messages or 300)))
safe_max_items = max(1, min(500, int(max_items or 30)))
existing = _existing_fingerprints(int(user_id))
scanned = 0
queued = 0
rows = (
MessageEvent.objects.filter(user_id=int(user_id), direction="in")
.select_related("conversation")
.order_by("-ts")[:safe_limit_messages]
)
for event in rows:
scanned += 1
person_id = _infer_single_person_id(event.conversation)
for candidate in extract_memory_candidates(event.text or ""):
field = str(candidate.get("field") or "").strip().lower()
text = _clean_value(str(candidate.get("text") or ""))
if not text:
continue
fingerprint = (
str(candidate.get("memory_kind") or "fact").strip().lower(),
str(event.conversation_id or "").strip(),
person_id,
f"{field}:{text.lower()}",
)
if fingerprint in existing:
continue
item = MemoryItem.objects.create(
user_id=int(user_id),
conversation=event.conversation,
person_id=person_id or None,
memory_kind=str(candidate.get("memory_kind") or "fact"),
status="proposed",
content={"field": field, "text": text},
provenance={
"pipeline": "message_regex",
"message_event_id": str(event.id),
},
confidence_score=float(candidate.get("confidence_score") or 0.5),
)
MemorySourceReference.objects.create(
memory=item,
message_event=event,
source_label="message_event",
)
MemoryChangeRequest.objects.create(
user_id=int(user_id),
memory=item,
conversation=event.conversation,
person_id=person_id or None,
action="create",
status="pending",
proposed_memory_kind=item.memory_kind,
proposed_content=item.content,
proposed_confidence_score=item.confidence_score,
reason="Auto-suggested from recent inbound messages.",
requested_by_identifier="memory-pipeline",
)
existing.add(fingerprint)
queued += 1
if queued >= safe_max_items:
return {"scanned": scanned, "queued": queued}
return {"scanned": scanned, "queued": queued}
def _coerce_expires_at(value: Any):
raw = str(value or "").strip()
if not raw:
return None
parsed = parse_datetime(raw)
if parsed is None:
raise ValueError("expires_at must be an ISO datetime")
if parsed.tzinfo is None:
return timezone.make_aware(parsed, dt_timezone.utc)
return parsed
@transaction.atomic
def create_memory_change_request(
*,
user_id: int,
action: str,
conversation_id: str = "",
person_id: str = "",
memory_id: str = "",
memory_kind: str = "",
content: dict[str, Any] | None = None,
confidence_score: float | None = None,
expires_at: str = "",
reason: str = "",
requested_by_identifier: str = "",
) -> MemoryChangeRequest:
normalized_action = str(action or "").strip().lower()
if normalized_action not in {"create", "update", "delete"}:
raise ValueError("action must be create/update/delete")
memory = None
if memory_id:
memory = MemoryItem.objects.filter(user_id=int(user_id), id=memory_id).first()
if memory is None:
raise ValueError("memory_id not found")
conversation = None
if conversation_id:
conversation = WorkspaceConversation.objects.filter(
user_id=int(user_id),
id=conversation_id,
).first()
if conversation is None:
raise ValueError("conversation_id not found")
if normalized_action == "create" and conversation is None:
raise ValueError("conversation_id is required for create")
if normalized_action in {"update", "delete"} and memory is None:
raise ValueError("memory_id is required for update/delete")
return MemoryChangeRequest.objects.create(
user_id=int(user_id),
memory=memory,
conversation=conversation or (memory.conversation if memory else None),
person_id=person_id or (str(memory.person_id or "") if memory else "") or None,
action=normalized_action,
status="pending",
proposed_memory_kind=str(memory_kind or (memory.memory_kind if memory else "")).strip(),
proposed_content=dict(content or {}),
proposed_confidence_score=(
float(confidence_score)
if confidence_score is not None
else (float(memory.confidence_score) if memory else None)
),
proposed_expires_at=_coerce_expires_at(expires_at),
reason=str(reason or "").strip(),
requested_by_identifier=str(requested_by_identifier or "").strip(),
)
@transaction.atomic
def review_memory_change_request(
*,
user_id: int,
request_id: str,
decision: str,
reviewer_identifier: str = "",
note: str = "",
) -> MemoryChangeRequest:
req = MemoryChangeRequest.objects.select_related("memory", "conversation").get(
id=request_id,
user_id=int(user_id),
)
if req.status != "pending":
raise ValueError("request is not pending")
now = timezone.now()
normalized_decision = str(decision or "").strip().lower()
if normalized_decision not in {"approve", "reject"}:
raise ValueError("decision must be approve/reject")
req.reviewed_by_identifier = str(reviewer_identifier or "").strip()
req.reviewed_at = now
if note:
req.reason = f"{req.reason}\n\nReview note: {str(note).strip()}".strip()
if normalized_decision == "reject":
req.status = "rejected"
req.save(
update_fields=[
"status",
"reviewed_by_identifier",
"reviewed_at",
"reason",
"updated_at",
]
)
return req
req.status = "approved"
req.save(
update_fields=[
"status",
"reviewed_by_identifier",
"reviewed_at",
"reason",
"updated_at",
]
)
memory = req.memory
if req.action == "create":
if memory is None:
if req.conversation is None:
raise ValueError("approved create request missing conversation")
memory = MemoryItem.objects.create(
user_id=int(user_id),
conversation=req.conversation,
person_id=req.person_id,
memory_kind=req.proposed_memory_kind or "fact",
status="active",
content=req.proposed_content or {},
confidence_score=float(req.proposed_confidence_score or 0.5),
expires_at=req.proposed_expires_at,
last_verified_at=now,
provenance={"approved_request_id": str(req.id)},
)
req.memory = memory
else:
memory.status = "active"
memory.last_verified_at = now
memory.save(update_fields=["status", "last_verified_at", "updated_at"])
elif req.action == "update":
if memory is None:
raise ValueError("approved update request missing memory")
if req.proposed_memory_kind:
memory.memory_kind = req.proposed_memory_kind
if req.proposed_content:
memory.content = req.proposed_content
if req.proposed_confidence_score is not None:
memory.confidence_score = float(req.proposed_confidence_score)
memory.expires_at = req.proposed_expires_at
memory.last_verified_at = now
memory.status = "active"
memory.save()
else:
if memory is None:
raise ValueError("approved delete request missing memory")
memory.status = "deprecated"
memory.last_verified_at = now
memory.save(update_fields=["status", "last_verified_at", "updated_at"])
req.status = "applied"
req.save(update_fields=["status", "memory", "updated_at"])
return req
@transaction.atomic
def run_memory_hygiene(*, user_id: int | None = None, dry_run: bool = False) -> dict[str, int]:
now = timezone.now()
queryset = MemoryItem.objects.filter(status="active")
if user_id is not None:
queryset = queryset.filter(user_id=int(user_id))
expired_ids = list(
queryset.filter(expires_at__isnull=False, expires_at__lte=now).values_list(
"id",
flat=True,
)
)
expired = len(expired_ids)
if expired and not dry_run:
MemoryItem.objects.filter(id__in=expired_ids).update(status="deprecated")
contradictions = 0
queued = 0
grouped: dict[tuple[int, str, str, str, str], dict[str, list[MemoryItem]]] = {}
for item in queryset.select_related("conversation", "person"):
content = item.content or {}
field = str(content.get("field") or content.get("key") or "").strip().lower()
text = _clean_value(str(content.get("text") or content.get("value") or "")).lower()
if not field or not text:
continue
scope = (
int(item.user_id),
str(item.person_id or ""),
str(item.conversation_id or ""),
str(item.memory_kind or ""),
field,
)
grouped.setdefault(scope, {})
grouped[scope].setdefault(text, [])
grouped[scope][text].append(item)
for values in grouped.values():
if len(values.keys()) <= 1:
continue
flat = [item for subset in values.values() for item in subset]
contradictions += len(flat)
if dry_run:
continue
for item in flat:
already_pending = MemoryChangeRequest.objects.filter(
user_id=item.user_id,
memory=item,
action="update",
status="pending",
reason__icontains="contradiction",
).exists()
if already_pending:
continue
MemoryChangeRequest.objects.create(
user_id=item.user_id,
memory=item,
conversation=item.conversation,
person=item.person,
action="update",
status="pending",
proposed_memory_kind=item.memory_kind,
proposed_content=item.content,
proposed_confidence_score=item.confidence_score,
proposed_expires_at=item.expires_at,
reason="Contradiction detected by hygiene job.",
requested_by_identifier="memory-hygiene",
)
queued += 1
log.info(
"memory hygiene user=%s dry_run=%s expired=%s contradictions=%s queued=%s",
user_id if user_id is not None else "-",
dry_run,
expired,
contradictions,
queued,
)
return {
"expired": expired,
"contradictions": contradictions,
"queued_requests": queued,
}

123
core/memory/retrieval.py Normal file
View File

@@ -0,0 +1,123 @@
from __future__ import annotations
from typing import Any
from django.db.models import Q
from django.utils import timezone
from core.memory.search_backend import get_memory_search_backend
from core.models import MemoryItem
def _coerce_statuses(value: Any, default: tuple[str, ...]) -> tuple[str, ...]:
if isinstance(value, (list, tuple, set)):
items = [str(item or "").strip().lower() for item in value]
else:
items = [item.strip().lower() for item in str(value or "").split(",")]
cleaned = tuple(item for item in items if item)
return cleaned or default
def _base_queryset(
*,
user_id: int,
person_id: str = "",
conversation_id: str = "",
statuses: tuple[str, ...] = ("active",),
):
now = timezone.now()
queryset = MemoryItem.objects.filter(user_id=int(user_id))
if statuses:
queryset = queryset.filter(status__in=list(statuses))
queryset = queryset.filter(Q(expires_at__isnull=True) | Q(expires_at__gt=now))
if person_id:
queryset = queryset.filter(person_id=person_id)
if conversation_id:
queryset = queryset.filter(conversation_id=conversation_id)
return queryset
def retrieve_memories_for_prompt(
*,
user_id: int,
query: str = "",
person_id: str = "",
conversation_id: str = "",
statuses: tuple[str, ...] = ("active",),
limit: int = 20,
) -> list[dict[str, Any]]:
statuses = _coerce_statuses(statuses, ("active",))
safe_limit = max(1, min(200, int(limit or 20)))
search_text = str(query or "").strip()
if search_text:
backend = get_memory_search_backend()
hits = backend.search(
user_id=int(user_id),
query=search_text,
conversation_id=conversation_id,
limit=safe_limit,
include_statuses=statuses,
)
ids = [str(hit.memory_id or "").strip() for hit in hits if str(hit.memory_id or "").strip()]
scoped = _base_queryset(
user_id=int(user_id),
person_id=person_id,
conversation_id=conversation_id,
statuses=statuses,
).filter(id__in=ids)
by_id = {str(item.id): item for item in scoped}
rows = []
for hit in hits:
item = by_id.get(str(hit.memory_id))
if not item:
continue
rows.append(
{
"id": str(item.id),
"memory_kind": str(item.memory_kind or ""),
"status": str(item.status or ""),
"person_id": str(item.person_id or ""),
"conversation_id": str(item.conversation_id or ""),
"content": item.content or {},
"provenance": item.provenance or {},
"confidence_score": float(item.confidence_score or 0.0),
"expires_at": item.expires_at.isoformat() if item.expires_at else "",
"last_verified_at": (
item.last_verified_at.isoformat() if item.last_verified_at else ""
),
"updated_at": item.updated_at.isoformat() if item.updated_at else "",
"search_score": float(hit.score or 0.0),
"search_summary": str(hit.summary or ""),
}
)
return rows
queryset = _base_queryset(
user_id=int(user_id),
person_id=person_id,
conversation_id=conversation_id,
statuses=statuses,
).order_by("-last_verified_at", "-updated_at")
rows = []
for item in queryset[:safe_limit]:
rows.append(
{
"id": str(item.id),
"memory_kind": str(item.memory_kind or ""),
"status": str(item.status or ""),
"person_id": str(item.person_id or ""),
"conversation_id": str(item.conversation_id or ""),
"content": item.content or {},
"provenance": item.provenance or {},
"confidence_score": float(item.confidence_score or 0.0),
"expires_at": item.expires_at.isoformat() if item.expires_at else "",
"last_verified_at": (
item.last_verified_at.isoformat() if item.last_verified_at else ""
),
"updated_at": item.updated_at.isoformat() if item.updated_at else "",
"search_score": 0.0,
"search_summary": "",
}
)
return rows

View File

@@ -137,6 +137,8 @@ class DjangoMemorySearchBackend(BaseMemorySearchBackend):
class ManticoreMemorySearchBackend(BaseMemorySearchBackend): class ManticoreMemorySearchBackend(BaseMemorySearchBackend):
name = "manticore" name = "manticore"
_table_ready_cache: dict[str, float] = {}
_table_ready_ttl_seconds = 30.0
def __init__(self): def __init__(self):
self.base_url = str( self.base_url = str(
@@ -146,6 +148,7 @@ class ManticoreMemorySearchBackend(BaseMemorySearchBackend):
getattr(settings, "MANTICORE_MEMORY_TABLE", "gia_memory_items") getattr(settings, "MANTICORE_MEMORY_TABLE", "gia_memory_items")
).strip() or "gia_memory_items" ).strip() or "gia_memory_items"
self.timeout_seconds = int(getattr(settings, "MANTICORE_HTTP_TIMEOUT", 5) or 5) self.timeout_seconds = int(getattr(settings, "MANTICORE_HTTP_TIMEOUT", 5) or 5)
self._table_cache_key = f"{self.base_url}|{self.table}"
def _sql(self, query: str) -> dict[str, Any]: def _sql(self, query: str) -> dict[str, Any]:
response = requests.post( response = requests.post(
@@ -160,6 +163,9 @@ class ManticoreMemorySearchBackend(BaseMemorySearchBackend):
return dict(payload or {}) return dict(payload or {})
def ensure_table(self) -> None: def ensure_table(self) -> None:
last_ready = float(self._table_ready_cache.get(self._table_cache_key, 0.0) or 0.0)
if (time.time() - last_ready) <= float(self._table_ready_ttl_seconds):
return
self._sql( self._sql(
( (
f"CREATE TABLE IF NOT EXISTS {self.table} (" f"CREATE TABLE IF NOT EXISTS {self.table} ("
@@ -175,6 +181,7 @@ class ManticoreMemorySearchBackend(BaseMemorySearchBackend):
")" ")"
) )
) )
self._table_ready_cache[self._table_cache_key] = time.time()
def _doc_id(self, memory_id: str) -> int: def _doc_id(self, memory_id: str) -> int:
digest = hashlib.blake2b( digest = hashlib.blake2b(
@@ -206,11 +213,66 @@ class ManticoreMemorySearchBackend(BaseMemorySearchBackend):
) )
self._sql(query) self._sql(query)
def _build_upsert_values_clause(self, item: MemoryItem) -> str:
memory_id = str(item.id)
doc_id = self._doc_id(memory_id)
summary = _flatten_to_text(item.content)[:280]
body = _flatten_to_text(item.content)
updated_ts = int(item.updated_at.timestamp() * 1000)
return (
f"({doc_id},'{self._escape(memory_id)}',{int(item.user_id)},"
f"'{self._escape(item.conversation_id)}','{self._escape(item.memory_kind)}',"
f"'{self._escape(item.status)}',{updated_ts},"
f"'{self._escape(summary)}','{self._escape(body)}')"
)
def delete(self, memory_id: str) -> None: def delete(self, memory_id: str) -> None:
self.ensure_table() self.ensure_table()
doc_id = self._doc_id(memory_id) doc_id = self._doc_id(memory_id)
self._sql(f"DELETE FROM {self.table} WHERE id={doc_id}") self._sql(f"DELETE FROM {self.table} WHERE id={doc_id}")
def reindex(
self,
*,
user_id: int | None = None,
include_statuses: tuple[str, ...] = ("active",),
limit: int = 2000,
) -> dict[str, int]:
self.ensure_table()
queryset = MemoryItem.objects.all().order_by("-updated_at")
if user_id is not None:
queryset = queryset.filter(user_id=int(user_id))
if include_statuses:
queryset = queryset.filter(status__in=list(include_statuses))
scanned = 0
indexed = 0
batch_size = 100
values: list[str] = []
for item in queryset[: max(1, int(limit))]:
scanned += 1
try:
values.append(self._build_upsert_values_clause(item))
except Exception as exc:
log.warning("memory-search upsert build failed id=%s err=%s", item.id, exc)
continue
if len(values) >= batch_size:
self._sql(
f"REPLACE INTO {self.table} "
"(id,memory_uuid,user_id,conversation_id,memory_kind,status,updated_ts,summary,body) "
f"VALUES {','.join(values)}"
)
indexed += len(values)
values = []
if values:
self._sql(
f"REPLACE INTO {self.table} "
"(id,memory_uuid,user_id,conversation_id,memory_kind,status,updated_ts,summary,body) "
f"VALUES {','.join(values)}"
)
indexed += len(values)
return {"scanned": scanned, "indexed": indexed}
def search( def search(
self, self,
*, *,

View File

@@ -1,5 +1,6 @@
from asgiref.sync import sync_to_async from asgiref.sync import sync_to_async
from django.conf import settings from django.conf import settings
import time
import uuid import uuid
from core.events.ledger import append_event from core.events.ledger import append_event
@@ -628,6 +629,277 @@ async def apply_reaction(
return target return target
async def _resolve_message_target(
user,
identifier,
*,
target_message_id="",
target_ts=0,
target_author="",
):
queryset = Message.objects.filter(
user=user,
session__identifier=identifier,
).select_related("session")
target = None
match_strategy = "none"
target_author_value = str(target_author or "").strip()
target_uuid = str(target_message_id or "").strip()
if target_uuid:
is_uuid = True
try:
uuid.UUID(str(target_uuid))
except Exception:
is_uuid = False
if is_uuid:
target = await sync_to_async(
lambda: queryset.filter(id=target_uuid).order_by("-ts").first()
)()
if target is not None:
match_strategy = "local_message_id"
if target is None:
target = await sync_to_async(
lambda: queryset.filter(source_message_id=target_uuid)
.order_by("-ts")
.first()
)()
if target is not None:
match_strategy = "source_message_id"
if target is None:
try:
ts_value = int(target_ts or 0)
except Exception:
ts_value = 0
if ts_value > 0:
exact_candidates = await sync_to_async(list)(
queryset.filter(source_message_id=str(ts_value)).order_by("-ts")[:20]
)
if target_author_value and exact_candidates:
filtered = [
row
for row in exact_candidates
if str(row.sender_uuid or "").strip() == target_author_value
]
if filtered:
exact_candidates = filtered
if exact_candidates:
target = exact_candidates[0]
match_strategy = "exact_source_message_id_ts"
if target is None and ts_value > 0:
strict_ts_rows = await sync_to_async(list)(
queryset.filter(ts=ts_value).order_by("-id")[:20]
)
if target_author_value and strict_ts_rows:
filtered = [
row
for row in strict_ts_rows
if str(row.sender_uuid or "").strip() == target_author_value
]
if filtered:
strict_ts_rows = filtered
if strict_ts_rows:
target = strict_ts_rows[0]
match_strategy = "strict_ts_match"
if target is None and ts_value > 0:
lower = ts_value - 10_000
upper = ts_value + 10_000
window_rows = await sync_to_async(list)(
queryset.filter(ts__gte=lower, ts__lte=upper).order_by("ts")[:200]
)
if target_author_value and window_rows:
author_rows = [
row
for row in window_rows
if str(row.sender_uuid or "").strip() == target_author_value
]
if author_rows:
window_rows = author_rows
if window_rows:
target = min(
window_rows,
key=lambda row: (
abs(int(row.ts or 0) - ts_value),
-int(row.ts or 0),
),
)
match_strategy = "nearest_ts_window"
return target, match_strategy
async def apply_message_edit(
user,
identifier,
*,
target_message_id="",
target_ts=0,
new_text="",
source_service="",
actor="",
payload=None,
trace_id="",
target_author="",
):
target, match_strategy = await _resolve_message_target(
user,
identifier,
target_message_id=target_message_id,
target_ts=target_ts,
target_author=target_author,
)
if target is None:
log.warning(
"edit-sync history-apply miss user=%s person_identifier=%s target_message_id=%s target_ts=%s",
getattr(user, "id", "-"),
getattr(identifier, "id", "-"),
str(target_message_id or "") or "-",
int(target_ts or 0),
)
return None
old_text = str(target.text or "")
updated_text = str(new_text or "")
event_ts = int(target_ts or target.ts or int(time.time() * 1000))
receipt_payload = dict(target.receipt_payload or {})
edit_history = list(receipt_payload.get("edit_history") or [])
edit_history.append(
{
"edited_ts": int(event_ts),
"source_service": str(source_service or "").strip().lower(),
"actor": str(actor or "").strip(),
"previous_text": old_text,
"new_text": updated_text,
"match_strategy": str(match_strategy or ""),
"payload": dict(payload or {}),
}
)
if len(edit_history) > 200:
edit_history = edit_history[-200:]
receipt_payload["edit_history"] = edit_history
receipt_payload["last_edited_ts"] = int(event_ts)
receipt_payload["edit_count"] = len(edit_history)
target.receipt_payload = receipt_payload
update_fields = ["receipt_payload"]
if old_text != updated_text:
target.text = updated_text
update_fields.append("text")
await sync_to_async(target.save)(update_fields=update_fields)
try:
await append_event(
user=user,
session=target.session,
ts=int(event_ts),
event_type="message_edited",
direction="system",
actor_identifier=str(actor or ""),
origin_transport=str(source_service or ""),
origin_message_id=str(target.source_message_id or target.id),
origin_chat_id=str(target.source_chat_id or ""),
payload={
"message_id": str(target.id),
"target_message_id": str(target_message_id or target.id),
"target_ts": int(target_ts or target.ts or 0),
"old_text": old_text,
"new_text": updated_text,
"source_service": str(source_service or "").strip().lower(),
"actor": str(actor or ""),
"match_strategy": str(match_strategy or ""),
},
raw_payload=dict(payload or {}),
trace_id=ensure_trace_id(trace_id, payload or {}),
)
except Exception as exc:
log.warning(
"Event ledger append failed for message edit message=%s: %s",
target.id,
exc,
)
return target
async def apply_message_delete(
user,
identifier,
*,
target_message_id="",
target_ts=0,
source_service="",
actor="",
payload=None,
trace_id="",
target_author="",
):
target, match_strategy = await _resolve_message_target(
user,
identifier,
target_message_id=target_message_id,
target_ts=target_ts,
target_author=target_author,
)
if target is None:
log.warning(
"delete-sync history-apply miss user=%s person_identifier=%s target_message_id=%s target_ts=%s",
getattr(user, "id", "-"),
getattr(identifier, "id", "-"),
str(target_message_id or "") or "-",
int(target_ts or 0),
)
return None
event_ts = int(target_ts or target.ts or int(time.time() * 1000))
deleted_row = {
"deleted_ts": int(event_ts),
"source_service": str(source_service or "").strip().lower(),
"actor": str(actor or "").strip(),
"match_strategy": str(match_strategy or ""),
"payload": dict(payload or {}),
}
receipt_payload = dict(target.receipt_payload or {})
delete_events = list(receipt_payload.get("delete_events") or [])
delete_events.append(dict(deleted_row))
if len(delete_events) > 200:
delete_events = delete_events[-200:]
receipt_payload["delete_events"] = delete_events
receipt_payload["deleted"] = deleted_row
receipt_payload["is_deleted"] = True
target.receipt_payload = receipt_payload
await sync_to_async(target.save)(update_fields=["receipt_payload"])
try:
await append_event(
user=user,
session=target.session,
ts=int(event_ts),
event_type="message_deleted",
direction="system",
actor_identifier=str(actor or ""),
origin_transport=str(source_service or ""),
origin_message_id=str(target.source_message_id or target.id),
origin_chat_id=str(target.source_chat_id or ""),
payload={
"message_id": str(target.id),
"target_message_id": str(target_message_id or target.id),
"target_ts": int(target_ts or target.ts or 0),
"source_service": str(source_service or "").strip().lower(),
"actor": str(actor or ""),
"match_strategy": str(match_strategy or ""),
},
raw_payload=dict(payload or {}),
trace_id=ensure_trace_id(trace_id, payload or {}),
)
except Exception as exc:
log.warning(
"Event ledger append failed for message delete message=%s: %s",
target.id,
exc,
)
return target
def _iter_bridge_refs(receipt_payload, source_service): def _iter_bridge_refs(receipt_payload, source_service):
payload = dict(receipt_payload or {}) payload = dict(receipt_payload or {})
refs = payload.get("bridge_refs") or {} refs = payload.get("bridge_refs") or {}

View File

@@ -0,0 +1,444 @@
# Generated by ChatGPT on 2026-03-05
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0035_conversationevent_adapterhealthevent"),
]
operations = [
migrations.AddField(
model_name="memoryitem",
name="confidence_score",
field=models.FloatField(
default=0.5,
help_text="Confidence score for this memory (0.0-1.0).",
),
),
migrations.AddField(
model_name="memoryitem",
name="expires_at",
field=models.DateTimeField(
blank=True,
help_text="Optional expiry timestamp for stale memory decay.",
null=True,
),
),
migrations.AddField(
model_name="memoryitem",
name="last_verified_at",
field=models.DateTimeField(
blank=True,
help_text="Last operator verification timestamp.",
null=True,
),
),
migrations.AddField(
model_name="memoryitem",
name="person",
field=models.ForeignKey(
blank=True,
help_text=(
"Optional person this memory is about for person-centric recall."
),
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="memory_items",
to="core.person",
),
),
migrations.AddField(
model_name="memoryitem",
name="provenance",
field=models.JSONField(
blank=True,
default=dict,
help_text=(
"Source metadata for this memory (agent/tool/message references)."
),
),
),
migrations.CreateModel(
name="KnowledgeArticle",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("title", models.CharField(max_length=255)),
("slug", models.SlugField(max_length=255)),
("markdown", models.TextField(blank=True, default="")),
("tags", models.JSONField(blank=True, default=list)),
(
"status",
models.CharField(
choices=[
("draft", "Draft"),
("published", "Published"),
("archived", "Archived"),
],
default="draft",
max_length=16,
),
),
("owner_identifier", models.CharField(blank=True, default="", max_length=255)),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"related_task",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="knowledge_articles",
to="core.derivedtask",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="knowledge_articles",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"indexes": [
models.Index(
fields=["user", "status", "updated_at"],
name="core_knowl_user_id_331625_idx",
),
models.Index(
fields=["related_task", "updated_at"],
name="core_knowl_related_cf6071_idx",
),
],
"constraints": [
models.UniqueConstraint(
fields=("user", "slug"),
name="unique_knowledge_article_slug_per_user",
)
],
},
),
migrations.CreateModel(
name="MCPToolAuditLog",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("tool_name", models.CharField(max_length=255)),
("request_args", models.JSONField(blank=True, default=dict)),
("response_meta", models.JSONField(blank=True, default=dict)),
("ok", models.BooleanField(default=True)),
("error", models.TextField(blank=True, default="")),
("duration_ms", models.PositiveIntegerField(default=0)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="mcp_tool_audit_logs",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"indexes": [
models.Index(
fields=["tool_name", "created_at"],
name="core_mcpau_tool_na_2db9d7_idx",
),
models.Index(
fields=["user", "created_at"],
name="core_mcpau_user_id_4a55f1_idx",
),
models.Index(
fields=["ok", "created_at"],
name="core_mcpau_ok_1f5c91_idx",
),
]
},
),
migrations.CreateModel(
name="MemoryChangeRequest",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"action",
models.CharField(
choices=[
("create", "Create"),
("update", "Update"),
("delete", "Delete"),
],
max_length=16,
),
),
(
"status",
models.CharField(
choices=[
("pending", "Pending"),
("approved", "Approved"),
("rejected", "Rejected"),
("applied", "Applied"),
],
default="pending",
max_length=16,
),
),
("proposed_memory_kind", models.CharField(blank=True, default="", max_length=16)),
("proposed_content", models.JSONField(blank=True, default=dict)),
("proposed_confidence_score", models.FloatField(blank=True, null=True)),
("proposed_expires_at", models.DateTimeField(blank=True, null=True)),
("reason", models.TextField(blank=True, default="")),
("requested_by_identifier", models.CharField(blank=True, default="", max_length=255)),
("reviewed_by_identifier", models.CharField(blank=True, default="", max_length=255)),
("reviewed_at", models.DateTimeField(blank=True, null=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"conversation",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="memory_change_requests",
to="core.workspaceconversation",
),
),
(
"memory",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="change_requests",
to="core.memoryitem",
),
),
(
"person",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="memory_change_requests",
to="core.person",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="memory_change_requests",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"indexes": [
models.Index(
fields=["user", "status", "created_at"],
name="core_memor_user_id_31963a_idx",
),
models.Index(
fields=["memory", "created_at"],
name="core_memor_memory__1b9d7e_idx",
),
]
},
),
migrations.CreateModel(
name="MemorySourceReference",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("source_label", models.CharField(blank=True, default="", max_length=255)),
("source_uri", models.CharField(blank=True, default="", max_length=1024)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"memory",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="source_references",
to="core.memoryitem",
),
),
(
"message",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="memory_source_references",
to="core.message",
),
),
(
"message_event",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="memory_source_references",
to="core.messageevent",
),
),
(
"source_request",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="memory_source_references",
to="core.airequest",
),
),
],
options={
"indexes": [
models.Index(
fields=["memory", "created_at"],
name="core_memor_memory__92752b_idx",
),
models.Index(fields=["source_uri"], name="core_memor_source__5bb587_idx"),
]
},
),
migrations.CreateModel(
name="TaskArtifactLink",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("kind", models.CharField(default="note", max_length=64)),
("uri", models.CharField(blank=True, default="", max_length=1024)),
("path", models.CharField(blank=True, default="", max_length=1024)),
("summary", models.TextField(blank=True, default="")),
("created_by_identifier", models.CharField(blank=True, default="", max_length=255)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="artifact_links",
to="core.derivedtask",
),
),
],
options={
"indexes": [
models.Index(
fields=["task", "created_at"],
name="core_taskar_task_id_cf5572_idx",
),
models.Index(
fields=["kind", "created_at"],
name="core_taskar_kind_5dbab7_idx",
),
]
},
),
migrations.CreateModel(
name="KnowledgeRevision",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("revision", models.PositiveIntegerField()),
("author_tool", models.CharField(blank=True, default="", max_length=255)),
("author_identifier", models.CharField(blank=True, default="", max_length=255)),
("summary", models.TextField(blank=True, default="")),
("markdown", models.TextField(blank=True, default="")),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"article",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="revisions",
to="core.knowledgearticle",
),
),
],
options={
"ordering": ["article", "revision"],
"constraints": [
models.UniqueConstraint(
fields=("article", "revision"),
name="unique_knowledge_revision_per_article",
)
],
},
),
migrations.AddIndex(
model_name="memoryitem",
index=models.Index(
fields=["user", "status", "updated_at"],
name="core_mem_user_stat_upd_idx",
),
),
migrations.AddIndex(
model_name="memoryitem",
index=models.Index(
fields=["user", "person", "status", "updated_at"],
name="core_mem_user_pers_stat_idx",
),
),
migrations.AddIndex(
model_name="memoryitem",
index=models.Index(
fields=["user", "conversation", "status", "updated_at"],
name="core_mem_user_conv_stat_idx",
),
),
]

View File

@@ -1129,6 +1129,14 @@ class MemoryItem(models.Model):
related_name="memory_items", related_name="memory_items",
help_text="Conversation scope this memory item belongs to.", help_text="Conversation scope this memory item belongs to.",
) )
person = models.ForeignKey(
Person,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="memory_items",
help_text="Optional person this memory is about for person-centric recall.",
)
memory_kind = models.CharField( memory_kind = models.CharField(
max_length=16, max_length=16,
choices=MEMORY_KIND_CHOICES, choices=MEMORY_KIND_CHOICES,
@@ -1145,6 +1153,25 @@ class MemoryItem(models.Model):
blank=True, blank=True,
help_text="Structured memory payload (schema can evolve by type).", help_text="Structured memory payload (schema can evolve by type).",
) )
provenance = models.JSONField(
default=dict,
blank=True,
help_text="Source metadata for this memory (agent/tool/message references).",
)
confidence_score = models.FloatField(
default=0.5,
help_text="Confidence score for this memory (0.0-1.0).",
)
expires_at = models.DateTimeField(
null=True,
blank=True,
help_text="Optional expiry timestamp for stale memory decay.",
)
last_verified_at = models.DateTimeField(
null=True,
blank=True,
help_text="Last operator verification timestamp.",
)
source_request = models.ForeignKey( source_request = models.ForeignKey(
AIRequest, AIRequest,
on_delete=models.SET_NULL, on_delete=models.SET_NULL,
@@ -1161,6 +1188,111 @@ class MemoryItem(models.Model):
help_text="Last update timestamp.", help_text="Last update timestamp.",
) )
class Meta:
indexes = [
models.Index(fields=["user", "status", "updated_at"]),
models.Index(fields=["user", "person", "status", "updated_at"]),
models.Index(fields=["user", "conversation", "status", "updated_at"]),
]
class MemorySourceReference(models.Model):
memory = models.ForeignKey(
MemoryItem,
on_delete=models.CASCADE,
related_name="source_references",
)
message_event = models.ForeignKey(
"MessageEvent",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="memory_source_references",
)
message = models.ForeignKey(
Message,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="memory_source_references",
)
source_request = models.ForeignKey(
AIRequest,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="memory_source_references",
)
source_label = models.CharField(max_length=255, blank=True, default="")
source_uri = models.CharField(max_length=1024, blank=True, default="")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
indexes = [
models.Index(fields=["memory", "created_at"]),
models.Index(fields=["source_uri"]),
]
class MemoryChangeRequest(models.Model):
ACTION_CHOICES = (
("create", "Create"),
("update", "Update"),
("delete", "Delete"),
)
STATUS_CHOICES = (
("pending", "Pending"),
("approved", "Approved"),
("rejected", "Rejected"),
("applied", "Applied"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="memory_change_requests",
)
memory = models.ForeignKey(
MemoryItem,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="change_requests",
)
conversation = models.ForeignKey(
WorkspaceConversation,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="memory_change_requests",
)
person = models.ForeignKey(
Person,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="memory_change_requests",
)
action = models.CharField(max_length=16, choices=ACTION_CHOICES)
status = models.CharField(max_length=16, choices=STATUS_CHOICES, default="pending")
proposed_memory_kind = models.CharField(max_length=16, blank=True, default="")
proposed_content = models.JSONField(default=dict, blank=True)
proposed_confidence_score = models.FloatField(null=True, blank=True)
proposed_expires_at = models.DateTimeField(null=True, blank=True)
reason = models.TextField(blank=True, default="")
requested_by_identifier = models.CharField(max_length=255, blank=True, default="")
reviewed_by_identifier = models.CharField(max_length=255, blank=True, default="")
reviewed_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
indexes = [
models.Index(fields=["user", "status", "created_at"]),
models.Index(fields=["memory", "created_at"]),
]
class AIResultSignal(models.Model): class AIResultSignal(models.Model):
""" """
@@ -2249,6 +2381,117 @@ class DerivedTaskEvent(models.Model):
] ]
class TaskArtifactLink(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
task = models.ForeignKey(
DerivedTask,
on_delete=models.CASCADE,
related_name="artifact_links",
)
kind = models.CharField(max_length=64, default="note")
uri = models.CharField(max_length=1024, blank=True, default="")
path = models.CharField(max_length=1024, blank=True, default="")
summary = models.TextField(blank=True, default="")
created_by_identifier = models.CharField(max_length=255, blank=True, default="")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
indexes = [
models.Index(fields=["task", "created_at"]),
models.Index(fields=["kind", "created_at"]),
]
class KnowledgeArticle(models.Model):
STATUS_CHOICES = (
("draft", "Draft"),
("published", "Published"),
("archived", "Archived"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="knowledge_articles",
)
related_task = models.ForeignKey(
DerivedTask,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="knowledge_articles",
)
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255)
markdown = models.TextField(blank=True, default="")
tags = models.JSONField(default=list, blank=True)
status = models.CharField(max_length=16, choices=STATUS_CHOICES, default="draft")
owner_identifier = models.CharField(max_length=255, blank=True, default="")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["user", "slug"],
name="unique_knowledge_article_slug_per_user",
),
]
indexes = [
models.Index(fields=["user", "status", "updated_at"]),
models.Index(fields=["related_task", "updated_at"]),
]
class KnowledgeRevision(models.Model):
article = models.ForeignKey(
KnowledgeArticle,
on_delete=models.CASCADE,
related_name="revisions",
)
revision = models.PositiveIntegerField()
author_tool = models.CharField(max_length=255, blank=True, default="")
author_identifier = models.CharField(max_length=255, blank=True, default="")
summary = models.TextField(blank=True, default="")
markdown = models.TextField(blank=True, default="")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["article", "revision"],
name="unique_knowledge_revision_per_article",
)
]
ordering = ["article", "revision"]
class MCPToolAuditLog(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
tool_name = models.CharField(max_length=255)
user = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="mcp_tool_audit_logs",
)
request_args = models.JSONField(default=dict, blank=True)
response_meta = models.JSONField(default=dict, blank=True)
ok = models.BooleanField(default=True)
error = models.TextField(blank=True, default="")
duration_ms = models.PositiveIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
indexes = [
models.Index(fields=["tool_name", "created_at"]),
models.Index(fields=["user", "created_at"]),
models.Index(fields=["ok", "created_at"]),
]
class ExternalSyncEvent(models.Model): class ExternalSyncEvent(models.Model):
STATUS_CHOICES = ( STATUS_CHOICES = (
("pending", "Pending"), ("pending", "Pending"),

View File

@@ -1,12 +1,13 @@
{% load static %} {% load static %}
{% load cache %} {% load cache %}
{% load page_title %}
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en-GB"> <html lang="en-GB">
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<title>{% block browser_title %}{{ request.resolver_match.url_name|default:request.path_info|cut:"_"|cut:"/"|cut:"-"|upper|slice:":3" }}{% endblock %}</title> <title>{% block browser_title %}{% firstof page_browser_title page_title as explicit_title %}{% if explicit_title %}{{ explicit_title }} · GIA{% else %}{% with route_value=request.resolver_match.url_name|default:request.path_info|humanize_route %}{% if route_value %}{{ route_value }} · GIA{% else %}GIA{% endif %}{% endwith %}{% endif %}{% endblock %}</title>
<link rel="shortcut icon" href="{% static 'favicon.ico' %}"> <link rel="shortcut icon" href="{% static 'favicon.ico' %}">
<link rel="manifest" href="{% static 'manifest.webmanifest' %}"> <link rel="manifest" href="{% static 'manifest.webmanifest' %}">
<link rel="stylesheet" href="{% static 'css/bulma.min.css' %}"> <link rel="stylesheet" href="{% static 'css/bulma.min.css' %}">

View File

@@ -1,117 +1,208 @@
{% extends "base.html" %} {% extends "base.html" %}
{% block content %} {% block content %}
<style>
.ai-stat-box {
height: 100%;
min-height: 92px;
margin: 0;
}
</style>
<section class="section"> <section class="section">
<div class="container"> <div class="container">
<h1 class="title is-4">AI Execution Log</h1> <div class="level">
<p class="subtitle is-6">Tracked model calls and usage metrics for this account.</p> <div class="level-left">
<div class="level-item">
<article class="box"> <div>
<div class="columns is-multiline"> <h1 class="title is-4">AI Execution Log</h1>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Total Runs</p><p class="title is-6">{{ stats.total_runs }}</p></div></div> <p class="subtitle is-6">Tracked model calls and usage metrics for this account.</p>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">OK</p><p class="title is-6 has-text-success">{{ stats.total_ok }}</p></div></div> </div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Failed</p><p class="title is-6 has-text-danger">{{ stats.total_failed }}</p></div></div> </div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Success Rate</p><p class="title is-6 has-text-info">{{ stats.success_rate }}%</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">24h Runs</p><p class="title is-6">{{ stats.last_24h_runs }}</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">24h Failed</p><p class="title is-6 has-text-warning">{{ stats.last_24h_failed }}</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">7d Runs</p><p class="title is-6">{{ stats.last_7d_runs }}</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Avg Duration</p><p class="title is-6">{{ stats.avg_duration_ms }}ms</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Prompt Chars</p><p class="title is-6">{{ stats.total_prompt_chars }}</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Response Chars</p><p class="title is-6">{{ stats.total_response_chars }}</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Avg Prompt</p><p class="title is-6">{{ stats.avg_prompt_chars }}</p></div></div>
<div class="column is-6-mobile is-4-tablet is-3-desktop"><div class="box ai-stat-box"><p class="heading">Avg Response</p><p class="title is-6">{{ stats.avg_response_chars }}</p></div></div>
</div> </div>
<div class="level-right">
<div class="level-item">
{% if stats.total_runs %}
<span class="tag is-success is-light">Tracking Active</span>
{% else %}
<span class="tag is-warning is-light">No Runs Yet</span>
{% endif %}
</div>
</div>
</div>
<article class="notification is-light">
<p class="is-size-7 has-text-grey-dark">Execution health at a glance</p>
<div class="tags mt-2">
<span class="tag is-light">Total {{ stats.total_runs }}</span>
<span class="tag is-success is-light">OK {{ stats.total_ok }}</span>
<span class="tag is-danger is-light">Failed {{ stats.total_failed }}</span>
<span class="tag is-info is-light">24h {{ stats.last_24h_runs }}</span>
<span class="tag is-warning is-light">24h Failed {{ stats.last_24h_failed }}</span>
<span class="tag is-link is-light">7d {{ stats.last_7d_runs }}</span>
</div>
<p class="is-size-7 has-text-grey-dark mt-3">Success Rate</p>
<progress class="progress is-link is-small" value="{{ stats.success_rate }}" max="100">{{ stats.success_rate }}%</progress>
</article> </article>
<div class="columns"> <div class="columns is-multiline">
<div class="column is-6"> <div class="column is-12-tablet is-4-desktop">
<article class="box"> <article class="card">
<h2 class="title is-6">By Operation</h2> <header class="card-header">
<table class="table is-fullwidth is-size-7 is-striped"> <p class="card-header-title is-size-6">Reliability</p>
<thead> </header>
<tr><th>Operation</th><th>Total</th><th>OK</th><th>Failed</th></tr> <div class="card-content">
</thead> <table class="table is-fullwidth is-narrow is-size-7">
<tbody> <tbody>
{% for row in operation_breakdown %} <tr><th>Total Runs</th><td>{{ stats.total_runs }}</td></tr>
<tr> <tr><th>OK</th><td class="has-text-success">{{ stats.total_ok }}</td></tr>
<td>{{ row.operation|default:"(none)" }}</td> <tr><th>Failed</th><td class="has-text-danger">{{ stats.total_failed }}</td></tr>
<td>{{ row.total }}</td> <tr><th>Success Rate</th><td>{{ stats.success_rate }}%</td></tr>
<td>{{ row.ok }}</td> </tbody>
<td>{{ row.failed }}</td> </table>
</tr> </div>
{% empty %}
<tr><td colspan="4">No runs yet.</td></tr>
{% endfor %}
</tbody>
</table>
</article> </article>
</div> </div>
<div class="column is-6"> <div class="column is-12-tablet is-4-desktop">
<article class="box"> <article class="card">
<h2 class="title is-6">By Model</h2> <header class="card-header">
<table class="table is-fullwidth is-size-7 is-striped"> <p class="card-header-title is-size-6">Throughput</p>
<thead> </header>
<tr><th>Model</th><th>Total</th><th>OK</th><th>Failed</th></tr> <div class="card-content">
</thead> <table class="table is-fullwidth is-narrow is-size-7">
<tbody> <tbody>
{% for row in model_breakdown %} <tr><th>Runs (24h)</th><td>{{ stats.last_24h_runs }}</td></tr>
<tr> <tr><th>Failed (24h)</th><td>{{ stats.last_24h_failed }}</td></tr>
<td>{{ row.model|default:"(none)" }}</td> <tr><th>Runs (7d)</th><td>{{ stats.last_7d_runs }}</td></tr>
<td>{{ row.total }}</td> <tr><th>Avg Duration</th><td>{{ stats.avg_duration_ms }}ms</td></tr>
<td>{{ row.ok }}</td> </tbody>
<td>{{ row.failed }}</td> </table>
</tr> </div>
{% empty %} </article>
<tr><td colspan="4">No runs yet.</td></tr> </div>
{% endfor %} <div class="column is-12-tablet is-4-desktop">
</tbody> <article class="card">
</table> <header class="card-header">
<p class="card-header-title is-size-6">Token Proxy (Chars)</p>
</header>
<div class="card-content">
<table class="table is-fullwidth is-narrow is-size-7">
<tbody>
<tr><th>Total Prompt</th><td>{{ stats.total_prompt_chars }}</td></tr>
<tr><th>Total Response</th><td>{{ stats.total_response_chars }}</td></tr>
<tr><th>Avg Prompt</th><td>{{ stats.avg_prompt_chars }}</td></tr>
<tr><th>Avg Response</th><td>{{ stats.avg_response_chars }}</td></tr>
</tbody>
</table>
</div>
</article> </article>
</div> </div>
</div> </div>
<article class="box"> <div class="columns">
<h2 class="title is-6">Recent Runs</h2> <div class="column is-6">
<div class="table-container"> <article class="card">
<table class="table is-fullwidth is-size-7 is-striped"> <header class="card-header">
<thead> <p class="card-header-title is-size-6">By Operation</p>
<tr> </header>
<th>Started</th> <div class="card-content">
<th>Status</th> <div class="table-container">
<th>Operation</th> <table class="table is-fullwidth is-size-7 is-striped is-hoverable">
<th>Model</th> <thead>
<th>Messages</th> <tr><th>Operation</th><th>Total</th><th>OK</th><th>Failed</th></tr>
<th>Prompt</th> </thead>
<th>Response</th> <tbody>
<th>Duration</th> {% for row in operation_breakdown %}
<th>Error</th> <tr>
</tr> <td>{{ row.operation|default:"(none)" }}</td>
</thead> <td>{{ row.total }}</td>
<tbody> <td class="has-text-success">{{ row.ok }}</td>
{% for run in runs %} <td class="has-text-danger">{{ row.failed }}</td>
</tr>
{% empty %}
<tr><td colspan="4">No runs yet.</td></tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</article>
</div>
<div class="column is-6">
<article class="card">
<header class="card-header">
<p class="card-header-title is-size-6">By Model</p>
</header>
<div class="card-content">
<div class="table-container">
<table class="table is-fullwidth is-size-7 is-striped is-hoverable">
<thead>
<tr><th>Model</th><th>Total</th><th>OK</th><th>Failed</th></tr>
</thead>
<tbody>
{% for row in model_breakdown %}
<tr>
<td>{{ row.model|default:"(none)" }}</td>
<td>{{ row.total }}</td>
<td class="has-text-success">{{ row.ok }}</td>
<td class="has-text-danger">{{ row.failed }}</td>
</tr>
{% empty %}
<tr><td colspan="4">No runs yet.</td></tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</article>
</div>
</div>
<article class="card">
<header class="card-header">
<p class="card-header-title is-size-6">Recent Runs</p>
</header>
<div class="card-content">
<div class="table-container">
<table class="table is-fullwidth is-size-7 is-striped is-hoverable">
<thead>
<tr> <tr>
<td>{{ run.started_at }}</td> <th>Started</th>
<td>{{ run.status }}</td> <th>Status</th>
<td>{{ run.operation|default:"-" }}</td> <th>Operation</th>
<td>{{ run.model|default:"-" }}</td> <th>Model</th>
<td>{{ run.message_count }}</td> <th>Messages</th>
<td>{{ run.prompt_chars }}</td> <th>Prompt</th>
<td>{{ run.response_chars }}</td> <th>Response</th>
<td>{% if run.duration_ms %}{{ run.duration_ms }}ms{% else %}-{% endif %}</td> <th>Duration</th>
<td style="max-width: 26rem; white-space: nowrap; overflow: hidden; text-overflow: ellipsis;" title="{{ run.error }}">{{ run.error|default:"-" }}</td> <th>Error</th>
</tr> </tr>
{% empty %} </thead>
<tr><td colspan="9">No runs yet.</td></tr> <tbody>
{% endfor %} {% for run in runs %}
</tbody> <tr>
</table> <td>{{ run.started_at }}</td>
<td>
{% if run.status == "ok" %}
<span class="tag is-success is-light">ok</span>
{% elif run.status == "failed" %}
<span class="tag is-danger is-light">failed</span>
{% else %}
<span class="tag is-light">{{ run.status }}</span>
{% endif %}
</td>
<td>{{ run.operation|default:"-" }}</td>
<td>{{ run.model|default:"-" }}</td>
<td>{{ run.message_count }}</td>
<td>{{ run.prompt_chars }}</td>
<td>{{ run.response_chars }}</td>
<td>{% if run.duration_ms %}{{ run.duration_ms }}ms{% else %}-{% endif %}</td>
<td>
{% if run.error %}
<span title="{{ run.error }}">{{ run.error|truncatechars:120 }}</span>
{% else %}
-
{% endif %}
</td>
</tr>
{% empty %}
<tr><td colspan="9">No runs yet.</td></tr>
{% endfor %}
</tbody>
</table>
</div>
</div> </div>
</article> </article>
</div> </div>

View File

@@ -381,6 +381,47 @@
</span> </span>
{% endfor %} {% endfor %}
</div> </div>
<div
id="{{ panel_id }}-availability-summary"
class="tags are-small mt-1{% if not availability_summary %} is-hidden{% endif %}"
data-summary='{{ availability_summary_json|default:"{}"|escapejs }}'
aria-label="Contact availability summary">
{% if availability_summary %}
<span class="tag is-light {% if availability_summary.state == 'available' %}is-success{% elif availability_summary.state == 'fading' %}is-warning{% elif availability_summary.state == 'unavailable' %}is-danger{% endif %}">
{{ availability_summary.state_label }}
</span>
<span class="tag is-light">{{ availability_summary.service|upper|default:"-" }}</span>
{% if availability_summary.ts_label %}
<span class="tag is-light">Updated {{ availability_summary.ts_label }}</span>
{% endif %}
{% if availability_summary.is_cross_service %}
<span class="tag is-light">Cross-service fallback</span>
{% endif %}
{% endif %}
</div>
<div class="compose-history-nav" role="tablist" aria-label="Conversation views">
<button
type="button"
class="compose-history-tab is-active"
data-target="thread"
aria-selected="true">
Thread
</button>
<button
type="button"
class="compose-history-tab"
data-target="deleted"
aria-selected="false">
Deleted
<span id="{{ panel_id }}-deleted-count" class="compose-history-count">0</span>
</button>
</div>
<div id="{{ panel_id }}-deleted" class="compose-deleted-pane is-hidden">
<p id="{{ panel_id }}-deleted-empty" class="compose-empty">No deleted messages noted yet.</p>
<div id="{{ panel_id }}-deleted-list" class="compose-deleted-list"></div>
</div>
<div <div
id="{{ panel_id }}-thread" id="{{ panel_id }}-thread"
@@ -397,12 +438,14 @@
data-quick-insights-url="{{ compose_quick_insights_url }}" data-quick-insights-url="{{ compose_quick_insights_url }}"
data-history-sync-url="{{ compose_history_sync_url }}" data-history-sync-url="{{ compose_history_sync_url }}"
data-react-url="{% url 'compose_react' %}" data-react-url="{% url 'compose_react' %}"
data-capability-reactions="{% if capability_reactions %}1{% else %}0{% endif %}"
data-capability-reactions-reason="{{ capability_reactions_reason|default:''|escape }}"
data-reaction-actor-prefix="web:{{ request.user.id }}:" data-reaction-actor-prefix="web:{{ request.user.id }}:"
data-toggle-command-url="{{ compose_toggle_command_url }}" data-toggle-command-url="{{ compose_toggle_command_url }}"
data-engage-preview-url="{{ compose_engage_preview_url }}" data-engage-preview-url="{{ compose_engage_preview_url }}"
data-engage-send-url="{{ compose_engage_send_url }}"> data-engage-send-url="{{ compose_engage_send_url }}">
{% for msg in serialized_messages %} {% for msg in serialized_messages %}
<div class="compose-row {% if msg.outgoing %}is-out{% else %}is-in{% endif %}" data-ts="{{ msg.ts }}" data-message-id="{{ msg.id }}" data-author="{{ msg.author|default:''|escape }}" data-sender-uuid="{{ msg.sender_uuid|default:''|escape }}" data-display-ts="{{ msg.display_ts|escape }}" data-source-service="{{ msg.source_service|default:''|escape }}" data-source-label="{{ msg.source_label|default:''|escape }}" data-source-message-id="{{ msg.source_message_id|default:''|escape }}" data-direction="{% if msg.outgoing %}outgoing{% else %}incoming{% endif %}"{% if msg.reply_to_id %} data-reply-to-id="{{ msg.reply_to_id }}"{% endif %} data-reply-snippet="{{ msg.display_text|default:msg.text|default:''|truncatechars:120|escape }}"> <div class="compose-row {% if msg.outgoing %}is-out{% else %}is-in{% endif %}{% if msg.is_deleted %} is-deleted{% endif %}" data-ts="{{ msg.ts }}" data-message-id="{{ msg.id }}" data-author="{{ msg.author|default:''|escape }}" data-sender-uuid="{{ msg.sender_uuid|default:''|escape }}" data-display-ts="{{ msg.display_ts|escape }}" data-source-service="{{ msg.source_service|default:''|escape }}" data-source-label="{{ msg.source_label|default:''|escape }}" data-source-message-id="{{ msg.source_message_id|default:''|escape }}" data-direction="{% if msg.outgoing %}outgoing{% else %}incoming{% endif %}" data-is-deleted="{% if msg.is_deleted %}1{% else %}0{% endif %}" data-deleted-ts="{{ msg.deleted_ts|default:0 }}" data-deleted-display="{{ msg.deleted_display|default:''|escape }}" data-deleted-actor="{{ msg.deleted_actor|default:''|escape }}" data-deleted-source="{{ msg.deleted_source_service|default:''|escape }}" data-edit-count="{{ msg.edit_count|default:0 }}" data-edit-history="{{ msg.edit_history_json|default:'[]'|escapejs }}" data-raw-text="{{ msg.text|default:''|truncatechars:220|escape }}"{% if msg.reply_to_id %} data-reply-to-id="{{ msg.reply_to_id }}"{% endif %} data-reply-snippet="{{ msg.display_text|default:msg.text|default:''|truncatechars:120|escape }}">
{% if msg.gap_fragments %} {% if msg.gap_fragments %}
{% with gap=msg.gap_fragments.0 %} {% with gap=msg.gap_fragments.0 %}
<p <p
@@ -460,7 +503,26 @@
{% else %} {% else %}
<p class="compose-body compose-image-fallback is-hidden">(no text)</p> <p class="compose-body compose-image-fallback is-hidden">(no text)</p>
{% endif %} {% endif %}
{% if service == "signal" or service == "whatsapp" %} {% if msg.edit_count %}
<details class="compose-edit-history">
<summary>Edited {{ msg.edit_count }} time{% if msg.edit_count != 1 %}s{% endif %}</summary>
<ul>
{% for edit in msg.edit_history %}
<li>
{% if edit.edited_display %}{{ edit.edited_display }}{% else %}Unknown time{% endif %}
{% if edit.actor %} · {{ edit.actor }}{% endif %}
{% if edit.source_service %} · {{ edit.source_service|upper }}{% endif %}
<div class="compose-edit-diff">
<span class="compose-edit-old">{{ edit.previous_text|default:"(empty)" }}</span>
<span class="compose-edit-arrow"></span>
<span class="compose-edit-new">{{ edit.new_text|default:"(empty)" }}</span>
</div>
</li>
{% endfor %}
</ul>
</details>
{% endif %}
{% if capability_reactions %}
<div class="compose-reaction-actions" data-message-id="{{ msg.id }}"> <div class="compose-reaction-actions" data-message-id="{{ msg.id }}">
<button type="button" class="compose-react-btn" data-emoji="👍" title="React with thumbs up">👍</button> <button type="button" class="compose-react-btn" data-emoji="👍" title="React with thumbs up">👍</button>
<button type="button" class="compose-react-btn" data-emoji="❤️" title="React with heart">❤️</button> <button type="button" class="compose-react-btn" data-emoji="❤️" title="React with heart">❤️</button>
@@ -495,6 +557,12 @@
{% endif %} {% endif %}
<p class="compose-msg-meta"> <p class="compose-msg-meta">
{{ msg.display_ts }}{% if msg.author %} · {{ msg.author }}{% endif %} {{ msg.display_ts }}{% if msg.author %} · {{ msg.author }}{% endif %}
{% if msg.is_edited %}
<span class="compose-msg-flag is-edited" title="Message edited{% if msg.last_edit_display %} at {{ msg.last_edit_display }}{% endif %}">edited</span>
{% endif %}
{% if msg.is_deleted %}
<span class="compose-msg-flag is-deleted" title="Deleted{% if msg.deleted_display %} at {{ msg.deleted_display }}{% endif %}{% if msg.deleted_actor %} by {{ msg.deleted_actor }}{% endif %}">deleted</span>
{% endif %}
{% if msg.read_ts %} {% if msg.read_ts %}
<span <span
class="compose-ticks js-receipt-trigger" class="compose-ticks js-receipt-trigger"
@@ -561,8 +629,11 @@
<input type="hidden" name="failsafe_confirm" value="0"> <input type="hidden" name="failsafe_confirm" value="0">
<div class="compose-send-safety"> <div class="compose-send-safety">
<label class="checkbox is-size-7"> <label class="checkbox is-size-7">
<input type="checkbox" class="manual-confirm"> Confirm Send <input type="checkbox" class="manual-confirm"{% if not capability_send %} disabled{% endif %}> Confirm Send
</label> </label>
{% if not capability_send %}
<p class="help is-size-7 has-text-grey">Send disabled: {{ capability_send_reason }}</p>
{% endif %}
</div> </div>
<div id="{{ panel_id }}-reply-banner" class="compose-reply-banner is-hidden"> <div id="{{ panel_id }}-reply-banner" class="compose-reply-banner is-hidden">
<span class="compose-reply-banner-label">Replying to:</span> <span class="compose-reply-banner-label">Replying to:</span>
@@ -576,7 +647,7 @@
name="text" name="text"
rows="1" rows="1"
placeholder="Type a message. Enter to send, Shift+Enter for newline."></textarea> placeholder="Type a message. Enter to send, Shift+Enter for newline."></textarea>
<button class="button is-link is-light compose-send-btn" type="submit" disabled> <button class="button is-link is-light compose-send-btn" type="submit" disabled{% if not capability_send %} title="{{ capability_send_reason }}"{% endif %}>
<span class="icon is-small"><i class="{{ manual_icon_class }}"></i></span> <span class="icon is-small"><i class="{{ manual_icon_class }}"></i></span>
<span>Send</span> <span>Send</span>
</button> </button>
@@ -605,6 +676,134 @@
padding: 0.65rem; padding: 0.65rem;
background: linear-gradient(180deg, rgba(248, 250, 252, 0.7), rgba(255, 255, 255, 0.98)); background: linear-gradient(180deg, rgba(248, 250, 252, 0.7), rgba(255, 255, 255, 0.98));
} }
#{{ panel_id }} .compose-history-nav {
margin-top: 0.45rem;
display: inline-flex;
gap: 0.35rem;
}
#{{ panel_id }} .compose-history-tab {
border: 1px solid rgba(38, 68, 111, 0.24);
background: #f3f7fc;
color: #2b4364;
border-radius: 999px;
padding: 0.2rem 0.58rem;
font-size: 0.68rem;
font-weight: 600;
line-height: 1.1;
cursor: pointer;
}
#{{ panel_id }} .compose-history-tab.is-active {
background: #2b4f7a;
color: #fff;
border-color: #2b4f7a;
}
#{{ panel_id }} .compose-history-count {
margin-left: 0.22rem;
display: inline-block;
min-width: 1.05rem;
text-align: center;
border-radius: 999px;
background: rgba(255, 255, 255, 0.35);
font-size: 0.62rem;
padding: 0.03rem 0.24rem;
}
#{{ panel_id }} .compose-deleted-pane {
margin-top: 0.55rem;
margin-bottom: 0.55rem;
min-height: 8rem;
max-height: 46vh;
overflow-y: auto;
border: 1px solid rgba(0, 0, 0, 0.12);
border-radius: 8px;
padding: 0.6rem;
background: linear-gradient(180deg, rgba(253, 248, 247, 0.85), rgba(255, 255, 255, 0.98));
}
#{{ panel_id }} .compose-deleted-pane.is-hidden {
display: none;
}
#{{ panel_id }} .compose-deleted-item {
border: 1px solid rgba(181, 96, 80, 0.2);
border-radius: 8px;
padding: 0.4rem 0.5rem;
margin-bottom: 0.4rem;
background: rgba(255, 248, 247, 0.98);
}
#{{ panel_id }} .compose-deleted-item:last-child {
margin-bottom: 0;
}
#{{ panel_id }} .compose-deleted-meta {
display: flex;
flex-wrap: wrap;
gap: 0.28rem;
font-size: 0.65rem;
color: #7b4c42;
margin-bottom: 0.18rem;
}
#{{ panel_id }} .compose-deleted-preview {
margin: 0;
font-size: 0.71rem;
color: #4b3b38;
white-space: pre-wrap;
word-break: break-word;
}
#{{ panel_id }} .compose-deleted-jump {
margin-top: 0.3rem;
}
#{{ panel_id }} .compose-msg-flag {
display: inline-block;
margin-left: 0.3rem;
border-radius: 999px;
padding: 0.03rem 0.34rem;
font-size: 0.58rem;
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.02em;
}
#{{ panel_id }} .compose-msg-flag.is-edited {
color: #7d5010;
background: rgba(255, 241, 214, 0.95);
border: 1px solid rgba(169, 115, 31, 0.28);
}
#{{ panel_id }} .compose-msg-flag.is-deleted {
color: #842f2f;
background: rgba(255, 228, 228, 0.95);
border: 1px solid rgba(173, 52, 52, 0.28);
}
#{{ panel_id }} .compose-edit-history {
margin-top: 0.28rem;
border-radius: 8px;
border: 1px solid rgba(124, 102, 63, 0.25);
background: rgba(255, 252, 245, 0.96);
padding: 0.24rem 0.4rem;
font-size: 0.64rem;
}
#{{ panel_id }} .compose-edit-history summary {
cursor: pointer;
color: #7a5a22;
font-weight: 600;
}
#{{ panel_id }} .compose-edit-history ul {
margin: 0.25rem 0 0;
padding-left: 1.05rem;
}
#{{ panel_id }} .compose-edit-diff {
margin-top: 0.08rem;
display: flex;
gap: 0.22rem;
align-items: baseline;
}
#{{ panel_id }} .compose-edit-old {
color: #6e6a66;
text-decoration: line-through;
}
#{{ panel_id }} .compose-edit-new {
color: #2f4f78;
font-weight: 600;
}
#{{ panel_id }} .compose-row.is-deleted .compose-bubble {
border-style: dashed;
opacity: 0.96;
}
#{{ panel_id }} .compose-availability-lane { #{{ panel_id }} .compose-availability-lane {
margin-top: 0.42rem; margin-top: 0.42rem;
display: flex; display: flex;
@@ -1932,6 +2131,12 @@
const exportClear = document.getElementById(panelId + "-export-clear"); const exportClear = document.getElementById(panelId + "-export-clear");
const exportBuffer = document.getElementById(panelId + "-export-buffer"); const exportBuffer = document.getElementById(panelId + "-export-buffer");
const availabilityLane = document.getElementById(panelId + "-availability"); const availabilityLane = document.getElementById(panelId + "-availability");
const availabilitySummaryNode = document.getElementById(panelId + "-availability-summary");
const deletedPane = document.getElementById(panelId + "-deleted");
const deletedList = document.getElementById(panelId + "-deleted-list");
const deletedEmpty = document.getElementById(panelId + "-deleted-empty");
const deletedCountNode = document.getElementById(panelId + "-deleted-count");
const historyTabs = Array.from(panel.querySelectorAll(".compose-history-tab"));
const csrfToken = "{{ csrf_token }}"; const csrfToken = "{{ csrf_token }}";
if (lightbox && lightbox.parentElement !== document.body) { if (lightbox && lightbox.parentElement !== document.body) {
document.body.appendChild(lightbox); document.body.appendChild(lightbox);
@@ -1976,6 +2181,7 @@
rangeStartId: "", rangeStartId: "",
rangeEndId: "", rangeEndId: "",
rangeMode: "inside", rangeMode: "inside",
historyView: "thread",
}; };
window.giaComposePanels[panelId] = panelState; window.giaComposePanels[panelId] = panelState;
const triggerButtons = Array.from(panel.querySelectorAll(".js-ai-trigger")); const triggerButtons = Array.from(panel.querySelectorAll(".js-ai-trigger"));
@@ -2014,6 +2220,13 @@
const parsed = parseInt(value || "0", 10); const parsed = parseInt(value || "0", 10);
return Number.isFinite(parsed) ? parsed : 0; return Number.isFinite(parsed) ? parsed : 0;
}; };
const parseJsonSafe = function (value, fallback) {
try {
return JSON.parse(String(value || ""));
} catch (_err) {
return fallback;
}
};
const minuteBucketFromTs = function (tsValue) { const minuteBucketFromTs = function (tsValue) {
const ts = toInt(tsValue); const ts = toInt(tsValue);
@@ -2848,9 +3061,9 @@
const QUICK_REACTION_EMOJIS = ["👍", "❤️", "😂", "😮", "😢", "😡"]; const QUICK_REACTION_EMOJIS = ["👍", "❤️", "😂", "😮", "😢", "😡"];
const supportsReactions = function () { const supportsReactions = function () {
const service = String(thread.dataset.service || "").trim().toLowerCase();
const reactUrl = String(thread.dataset.reactUrl || "").trim(); const reactUrl = String(thread.dataset.reactUrl || "").trim();
return !!reactUrl && (service === "signal" || service === "whatsapp"); const capability = String(thread.dataset.capabilityReactions || "").trim() === "1";
return !!reactUrl && capability;
}; };
const reactionActorKeyForService = function (service) { const reactionActorKeyForService = function (service) {
const prefix = String(thread.dataset.reactionActorPrefix || "web::"); const prefix = String(thread.dataset.reactionActorPrefix || "web::");
@@ -2977,6 +3190,46 @@
bar.appendChild(menu); bar.appendChild(menu);
return bar; return bar;
}; };
const renderEditHistoryDetails = function (bubble, msg) {
if (!bubble) {
return;
}
const rows = Array.isArray(msg && msg.edit_history) ? msg.edit_history : [];
if (!rows.length) {
return;
}
const details = document.createElement("details");
details.className = "compose-edit-history";
const summary = document.createElement("summary");
summary.textContent = "Edited " + rows.length + (rows.length === 1 ? " time" : " times");
details.appendChild(summary);
const list = document.createElement("ul");
rows.forEach(function (entry) {
const li = document.createElement("li");
const editedDisplay = String((entry && entry.edited_display) || "").trim() || "Unknown time";
const actor = String((entry && entry.actor) || "").trim();
const source = String((entry && entry.source_service) || "").trim();
li.textContent = editedDisplay + (actor ? (" · " + actor) : "") + (source ? (" · " + source.toUpperCase()) : "");
const diff = document.createElement("div");
diff.className = "compose-edit-diff";
const oldNode = document.createElement("span");
oldNode.className = "compose-edit-old";
oldNode.textContent = String((entry && entry.previous_text) || "(empty)");
const arrow = document.createElement("span");
arrow.className = "compose-edit-arrow";
arrow.textContent = "->";
const newNode = document.createElement("span");
newNode.className = "compose-edit-new";
newNode.textContent = String((entry && entry.new_text) || "(empty)");
diff.appendChild(oldNode);
diff.appendChild(arrow);
diff.appendChild(newNode);
li.appendChild(diff);
list.appendChild(li);
});
details.appendChild(list);
bubble.appendChild(details);
};
const appendBubble = function (msg) { const appendBubble = function (msg) {
const messageId = String(msg && msg.id ? msg.id : "").trim(); const messageId = String(msg && msg.id ? msg.id : "").trim();
@@ -2991,6 +3244,9 @@
const row = document.createElement("div"); const row = document.createElement("div");
const outgoing = !!msg.outgoing; const outgoing = !!msg.outgoing;
row.className = "compose-row " + (outgoing ? "is-out" : "is-in"); row.className = "compose-row " + (outgoing ? "is-out" : "is-in");
if (msg.is_deleted) {
row.classList.add("is-deleted");
}
row.dataset.ts = String(msg.ts || 0); row.dataset.ts = String(msg.ts || 0);
row.dataset.minute = minuteBucketFromTs(msg.ts || 0); row.dataset.minute = minuteBucketFromTs(msg.ts || 0);
row.dataset.replySnippet = normalizeSnippet( row.dataset.replySnippet = normalizeSnippet(
@@ -3003,6 +3259,20 @@
row.dataset.sourceLabel = String(msg.source_label || ""); row.dataset.sourceLabel = String(msg.source_label || "");
row.dataset.sourceMessageId = String(msg.source_message_id || ""); row.dataset.sourceMessageId = String(msg.source_message_id || "");
row.dataset.direction = outgoing ? "outgoing" : "incoming"; row.dataset.direction = outgoing ? "outgoing" : "incoming";
row.dataset.isDeleted = msg.is_deleted ? "1" : "0";
row.dataset.deletedTs = String(msg.deleted_ts || 0);
row.dataset.deletedDisplay = String(msg.deleted_display || "");
row.dataset.deletedActor = String(msg.deleted_actor || "");
row.dataset.deletedSource = String(msg.deleted_source_service || "");
row.dataset.editCount = String(msg.edit_count || 0);
try {
row.dataset.editHistory = JSON.stringify(
Array.isArray(msg.edit_history) ? msg.edit_history : []
);
} catch (_err) {
row.dataset.editHistory = "[]";
}
row.dataset.rawText = String(msg.text || "");
if (msg.reply_to_id) { if (msg.reply_to_id) {
row.dataset.replyToId = String(msg.reply_to_id || ""); row.dataset.replyToId = String(msg.reply_to_id || "");
} }
@@ -3078,6 +3348,7 @@
fallback.textContent = "(no text)"; fallback.textContent = "(no text)";
bubble.appendChild(fallback); bubble.appendChild(fallback);
} }
renderEditHistoryDetails(bubble, msg);
const reactionBar = buildReactionActions(messageId); const reactionBar = buildReactionActions(messageId);
if (reactionBar) { if (reactionBar) {
bubble.appendChild(reactionBar); bubble.appendChild(reactionBar);
@@ -3094,6 +3365,22 @@
metaText += " · " + String(msg.author); metaText += " · " + String(msg.author);
} }
meta.textContent = metaText; meta.textContent = metaText;
if (msg.is_edited) {
const editedFlag = document.createElement("span");
editedFlag.className = "compose-msg-flag is-edited";
editedFlag.title = "Message edited" + (msg.last_edit_display ? (" at " + String(msg.last_edit_display)) : "");
editedFlag.textContent = "edited";
meta.appendChild(editedFlag);
}
if (msg.is_deleted) {
const deletedFlag = document.createElement("span");
deletedFlag.className = "compose-msg-flag is-deleted";
deletedFlag.title = "Deleted"
+ (msg.deleted_display ? (" at " + String(msg.deleted_display)) : "")
+ (msg.deleted_actor ? (" by " + String(msg.deleted_actor)) : "");
deletedFlag.textContent = "deleted";
meta.appendChild(deletedFlag);
}
// Render delivery/read ticks and a small time label when available. // Render delivery/read ticks and a small time label when available.
if (msg.read_ts) { if (msg.read_ts) {
const tickWrap = document.createElement("span"); const tickWrap = document.createElement("span");
@@ -3164,6 +3451,7 @@
wireImageFallbacks(row); wireImageFallbacks(row);
bindReplyReferences(row); bindReplyReferences(row);
updateGlanceFromMessage(msg); updateGlanceFromMessage(msg);
renderDeletedList();
}; };
// Receipt popover (similar to contact info popover) // Receipt popover (similar to contact info popover)
@@ -3444,6 +3732,69 @@
availabilityLane.classList.remove("is-hidden"); availabilityLane.classList.remove("is-hidden");
}; };
const renderAvailabilitySummary = function (summary, slices) {
if (!availabilitySummaryNode) {
return;
}
const rows = Array.isArray(slices) ? slices : [];
let source = (
summary && typeof summary === "object"
) ? Object.assign({}, summary) : {};
if (!source.state && rows.length > 0) {
const newest = rows.reduce(function (best, item) {
if (!best) return item;
return Number(item.end_ts || 0) > Number(best.end_ts || 0) ? item : best;
}, null);
if (newest) {
source.state = String(newest.state || "unknown").toLowerCase();
source.state_label = source.state.charAt(0).toUpperCase() + source.state.slice(1);
source.service = String(newest.service || "").toLowerCase();
source.confidence = Number(newest.confidence_end || 0);
source.source_kind = String(
((newest.payload && newest.payload.inferred_from) || (newest.payload && newest.payload.extended_by) || "")
).trim();
source.ts = Number(newest.end_ts || 0);
source.ts_label = source.ts > 0 ? new Date(source.ts).toLocaleTimeString() : "";
}
}
if (!source.state) {
availabilitySummaryNode.classList.add("is-hidden");
availabilitySummaryNode.innerHTML = "";
return;
}
const state = String(source.state || "unknown").toLowerCase();
const service = String(source.service || "").toUpperCase();
const stateTag = document.createElement("span");
stateTag.className = "tag is-light";
if (state === "available") stateTag.classList.add("is-success");
if (state === "fading") stateTag.classList.add("is-warning");
if (state === "unavailable") stateTag.classList.add("is-danger");
stateTag.textContent = String(source.state_label || (state.charAt(0).toUpperCase() + state.slice(1)));
const serviceTag = document.createElement("span");
serviceTag.className = "tag is-light";
serviceTag.textContent = service || "-";
availabilitySummaryNode.innerHTML = "";
availabilitySummaryNode.appendChild(stateTag);
availabilitySummaryNode.appendChild(serviceTag);
if (source.ts_label) {
const tsTag = document.createElement("span");
tsTag.className = "tag is-light";
tsTag.textContent = "Updated " + String(source.ts_label);
availabilitySummaryNode.appendChild(tsTag);
}
if (source.is_cross_service) {
const fallbackTag = document.createElement("span");
fallbackTag.className = "tag is-light";
fallbackTag.textContent = "Cross-service fallback";
availabilitySummaryNode.appendChild(fallbackTag);
}
availabilitySummaryNode.classList.remove("is-hidden");
};
const applyTyping = function (typingPayload) { const applyTyping = function (typingPayload) {
if (!typingNode || !typingPayload || typeof typingPayload !== "object") { if (!typingNode || !typingPayload || typeof typingPayload !== "object") {
return; return;
@@ -3481,6 +3832,9 @@
if (payload.availability_slices) { if (payload.availability_slices) {
renderAvailabilitySlices(payload.availability_slices); renderAvailabilitySlices(payload.availability_slices);
} }
if (payload.availability_summary || payload.availability_slices) {
renderAvailabilitySummary(payload.availability_summary, payload.availability_slices);
}
if (payload.last_ts !== undefined && payload.last_ts !== null) { if (payload.last_ts !== undefined && payload.last_ts !== null) {
lastTs = Math.max(lastTs, toInt(payload.last_ts)); lastTs = Math.max(lastTs, toInt(payload.last_ts));
thread.dataset.lastTs = String(lastTs); thread.dataset.lastTs = String(lastTs);
@@ -3521,6 +3875,9 @@
if (payload.availability_slices) { if (payload.availability_slices) {
renderAvailabilitySlices(payload.availability_slices); renderAvailabilitySlices(payload.availability_slices);
} }
if (payload.availability_summary || payload.availability_slices) {
renderAvailabilitySummary(payload.availability_summary, payload.availability_slices);
}
if (payload.last_ts !== undefined && payload.last_ts !== null) { if (payload.last_ts !== undefined && payload.last_ts !== null) {
lastTs = Math.max(lastTs, toInt(payload.last_ts)); lastTs = Math.max(lastTs, toInt(payload.last_ts));
thread.dataset.lastTs = String(lastTs); thread.dataset.lastTs = String(lastTs);
@@ -3544,6 +3901,7 @@
const armInput = form.querySelector("input[name='failsafe_arm']"); const armInput = form.querySelector("input[name='failsafe_arm']");
const confirmInput = form.querySelector("input[name='failsafe_confirm']"); const confirmInput = form.querySelector("input[name='failsafe_confirm']");
const sendButton = form.querySelector(".compose-send-btn"); const sendButton = form.querySelector(".compose-send-btn");
const sendCapabilityEnabled = {{ capability_send|yesno:"true,false" }};
const updateManualSafety = function () { const updateManualSafety = function () {
const confirm = !!(manualConfirm && manualConfirm.checked); const confirm = !!(manualConfirm && manualConfirm.checked);
if (armInput) { if (armInput) {
@@ -3553,7 +3911,7 @@
confirmInput.value = confirm ? "1" : "0"; confirmInput.value = confirm ? "1" : "0";
} }
if (sendButton) { if (sendButton) {
sendButton.disabled = !confirm; sendButton.disabled = (!confirm) || (!sendCapabilityEnabled);
} }
}; };
if (manualConfirm) { if (manualConfirm) {
@@ -3561,14 +3919,21 @@
} }
updateManualSafety(); updateManualSafety();
try { try {
const initialTyping = JSON.parse("{{ typing_state_json|escapejs }}"); const initialTyping = JSON.parse("{{ typing_state_json|escapejs }}");
applyTyping(initialTyping); applyTyping(initialTyping);
try { try {
const initialSlices = JSON.parse(String((availabilityLane && availabilityLane.dataset.slices) || "[]")); const initialSlices = JSON.parse(
renderAvailabilitySlices(initialSlices); String((availabilityLane && availabilityLane.dataset.slices) || "[]")
} catch (err) { );
renderAvailabilitySlices([]); const initialSummary = JSON.parse(
} String((availabilitySummaryNode && availabilitySummaryNode.dataset.summary) || "{}")
);
renderAvailabilitySlices(initialSlices);
renderAvailabilitySummary(initialSummary, initialSlices);
} catch (err) {
renderAvailabilitySlices([]);
renderAvailabilitySummary({}, []);
}
} catch (err) { } catch (err) {
// Ignore invalid initial typing state payload. // Ignore invalid initial typing state payload.
} }
@@ -3692,6 +4057,120 @@
const allMessageRows = function () { const allMessageRows = function () {
return Array.from(thread.querySelectorAll(".compose-row[data-message-id]")); return Array.from(thread.querySelectorAll(".compose-row[data-message-id]"));
}; };
const readRowEditHistory = function (row) {
if (!row || !row.dataset) {
return [];
}
const parsed = parseJsonSafe(row.dataset.editHistory || "[]", []);
return Array.isArray(parsed) ? parsed : [];
};
const renderDeletedList = function () {
if (!deletedList || !deletedEmpty) {
return;
}
const deletedRows = allMessageRows()
.filter(function (row) {
return String(row.dataset.isDeleted || "0") === "1";
})
.sort(function (a, b) {
return toInt(b.dataset.deletedTs || 0) - toInt(a.dataset.deletedTs || 0);
});
if (deletedCountNode) {
deletedCountNode.textContent = String(deletedRows.length);
}
deletedList.innerHTML = "";
if (!deletedRows.length) {
deletedEmpty.classList.remove("is-hidden");
return;
}
deletedEmpty.classList.add("is-hidden");
deletedRows.forEach(function (row) {
const messageId = String(row.dataset.messageId || "").trim();
const deletedTs = String(row.dataset.deletedDisplay || "").trim() || String(row.dataset.displayTs || "").trim() || "-";
const deletedActor = String(row.dataset.deletedActor || "").trim() || "unknown";
const deletedSource = String(row.dataset.deletedSource || "").trim() || "unknown";
const preview = normalizeSnippet(row.dataset.rawText || row.dataset.replySnippet || "(message deleted)");
const edits = readRowEditHistory(row);
const card = document.createElement("article");
card.className = "compose-deleted-item";
const meta = document.createElement("p");
meta.className = "compose-deleted-meta";
meta.textContent = "Deleted " + deletedTs + " by " + deletedActor + " via " + deletedSource.toUpperCase();
card.appendChild(meta);
const text = document.createElement("p");
text.className = "compose-deleted-preview";
text.textContent = preview;
card.appendChild(text);
if (edits.length) {
const details = document.createElement("details");
details.className = "compose-edit-history";
const summary = document.createElement("summary");
summary.textContent = "Edit history (" + edits.length + ")";
details.appendChild(summary);
const list = document.createElement("ul");
edits.forEach(function (entry) {
const li = document.createElement("li");
const editedDisplay = String((entry && entry.edited_display) || "").trim() || "Unknown time";
const actor = String((entry && entry.actor) || "").trim();
const source = String((entry && entry.source_service) || "").trim();
const oldText = String((entry && entry.previous_text) || "(empty)");
const newText = String((entry && entry.new_text) || "(empty)");
li.textContent = editedDisplay + (actor ? (" · " + actor) : "") + (source ? (" · " + source.toUpperCase()) : "");
const diff = document.createElement("div");
diff.className = "compose-edit-diff";
const oldNode = document.createElement("span");
oldNode.className = "compose-edit-old";
oldNode.textContent = oldText;
const arrow = document.createElement("span");
arrow.className = "compose-edit-arrow";
arrow.textContent = "->";
const newNode = document.createElement("span");
newNode.className = "compose-edit-new";
newNode.textContent = newText;
diff.appendChild(oldNode);
diff.appendChild(arrow);
diff.appendChild(newNode);
li.appendChild(diff);
list.appendChild(li);
});
details.appendChild(list);
card.appendChild(details);
}
const jump = document.createElement("button");
jump.type = "button";
jump.className = "button is-light is-small compose-deleted-jump";
jump.dataset.targetId = messageId;
jump.textContent = "Jump to message";
card.appendChild(jump);
deletedList.appendChild(card);
});
};
const switchHistoryView = function (viewName) {
const target = String(viewName || "thread").trim().toLowerCase() === "deleted"
? "deleted"
: "thread";
panelState.historyView = target;
historyTabs.forEach(function (tab) {
const active = String(tab.dataset.target || "") === target;
tab.classList.toggle("is-active", active);
tab.setAttribute("aria-selected", active ? "true" : "false");
});
if (target === "deleted") {
if (thread) {
thread.classList.add("is-hidden");
}
if (deletedPane) {
deletedPane.classList.remove("is-hidden");
}
} else {
if (thread) {
thread.classList.remove("is-hidden");
}
if (deletedPane) {
deletedPane.classList.add("is-hidden");
}
}
};
const selectedRangeRows = function () { const selectedRangeRows = function () {
const rows = allMessageRows(); const rows = allMessageRows();
@@ -4172,7 +4651,33 @@
}); });
}); });
}; };
historyTabs.forEach(function (tab) {
tab.addEventListener("click", function () {
switchHistoryView(String(tab.dataset.target || "thread"));
});
});
if (deletedList) {
deletedList.addEventListener("click", function (ev) {
const jumpBtn = ev.target.closest && ev.target.closest(".compose-deleted-jump");
if (!jumpBtn) {
return;
}
const targetId = String(jumpBtn.dataset.targetId || "").trim();
if (!targetId) {
return;
}
switchHistoryView("thread");
const row = rowByMessageId(targetId);
if (!row) {
return;
}
row.scrollIntoView({ behavior: "smooth", block: "center" });
flashReplyTarget(row);
});
}
bindReplyReferences(panel); bindReplyReferences(panel);
renderDeletedList();
switchHistoryView(panelState.historyView);
initExportUi(); initExportUi();
if (replyClearBtn) { if (replyClearBtn) {
replyClearBtn.addEventListener("click", function () { replyClearBtn.addEventListener("click", function () {
@@ -4264,6 +4769,16 @@
panelState.websocketReady = false; panelState.websocketReady = false;
hideAllCards(); hideAllCards();
thread.innerHTML = '<p class="compose-empty">Loading messages...</p>'; thread.innerHTML = '<p class="compose-empty">Loading messages...</p>';
if (deletedList) {
deletedList.innerHTML = "";
}
if (deletedEmpty) {
deletedEmpty.classList.remove("is-hidden");
}
if (deletedCountNode) {
deletedCountNode.textContent = "0";
}
switchHistoryView("thread");
lastTs = 0; lastTs = 0;
thread.dataset.lastTs = "0"; thread.dataset.lastTs = "0";
panelState.seenMessageIds = new Set(); panelState.seenMessageIds = new Set();

View File

@@ -0,0 +1,18 @@
import re
from django import template
register = template.Library()
@register.filter
def humanize_route(value):
text = str(value or "").strip()
if not text:
return ""
text = text.strip("/")
text = re.sub(r"[/_-]+", " ", text)
text = re.sub(r"\s+", " ", text).strip()
if not text:
return ""
return text.title()

View File

@@ -0,0 +1,67 @@
from __future__ import annotations
import json
from unittest.mock import AsyncMock, patch
from django.test import TestCase
from django.urls import reverse
from core.models import User
class ComposeSendCapabilityTests(TestCase):
def setUp(self):
self.user = User.objects.create_user("compose-send", "send@example.com", "pw")
self.client.force_login(self.user)
@patch("core.views.compose.transport.enqueue_runtime_command")
@patch("core.views.compose.transport.send_message_raw", new_callable=AsyncMock)
def test_unsupported_send_fails_fast_without_dispatch(
self,
mocked_send_message_raw,
mocked_enqueue_runtime_command,
):
response = self.client.post(
reverse("compose_send"),
{
"service": "xmpp",
"identifier": "person@example.com",
"text": "hello",
"failsafe_arm": "1",
"failsafe_confirm": "1",
},
)
self.assertEqual(200, response.status_code)
payload = json.loads(response.headers["HX-Trigger"])["composeSendResult"]
self.assertFalse(payload["ok"])
self.assertEqual("warning", payload["level"])
self.assertIn("Send not supported:", payload["message"])
mocked_send_message_raw.assert_not_awaited()
mocked_enqueue_runtime_command.assert_not_called()
def test_compose_page_displays_send_disabled_reason_for_unsupported_service(self):
response = self.client.get(
reverse("compose_page"),
{
"service": "xmpp",
"identifier": "person@example.com",
},
)
self.assertEqual(200, response.status_code)
content = response.content.decode("utf-8")
self.assertIn("Send disabled:", content)
self.assertIn("compose-send-btn", content)
def test_compose_page_uses_humanized_browser_title(self):
response = self.client.get(
reverse("compose_page"),
{
"service": "signal",
"identifier": "+15551230000",
},
)
self.assertEqual(200, response.status_code)
self.assertContains(response, "<title>Compose Page · GIA</title>", html=False)

View File

@@ -0,0 +1,239 @@
from __future__ import annotations
from pathlib import Path
from django.test import TestCase, override_settings
from core.mcp.tools import execute_tool, tool_specs
from core.models import (
AIRequest,
MCPToolAuditLog,
MemoryItem,
TaskProject,
User,
WorkspaceConversation,
DerivedTask,
DerivedTaskEvent,
)
@override_settings(MEMORY_SEARCH_BACKEND="django")
class MCPToolTests(TestCase):
def setUp(self):
self.user = User.objects.create_superuser(
username="mcp-tools-user",
email="mcp-tools@example.com",
password="pw",
)
self.conversation = WorkspaceConversation.objects.create(
user=self.user,
platform_type="signal",
title="MCP Memory Scope",
platform_thread_id="mcp-thread-1",
)
request = AIRequest.objects.create(
user=self.user,
conversation=self.conversation,
window_spec={},
operation="memory_propose",
)
self.memory = MemoryItem.objects.create(
user=self.user,
conversation=self.conversation,
memory_kind="fact",
status="active",
content={"text": "Prefers concise implementation notes."},
source_request=request,
confidence_score=0.8,
)
self.project = TaskProject.objects.create(user=self.user, name="MCP Project")
self.task = DerivedTask.objects.create(
user=self.user,
project=self.project,
title="Wire MCP server",
source_service="signal",
source_channel="team-chat",
status_snapshot="open",
immutable_payload={"scope": "memory"},
)
DerivedTaskEvent.objects.create(
task=self.task,
event_type="created",
actor_identifier="agent",
payload={"note": "task created"},
)
def test_tool_specs_include_memory_task_wiki_tools(self):
names = {item["name"] for item in tool_specs()}
self.assertIn("manticore.status", names)
self.assertIn("memory.propose", names)
self.assertIn("tasks.link_artifact", names)
self.assertIn("wiki.create_article", names)
self.assertIn("project.get_runbook", names)
def test_manticore_query_and_tasks_tools(self):
memory_payload = execute_tool(
"manticore.query",
{"user_id": self.user.id, "query": "concise implementation"},
)
self.assertGreaterEqual(int(memory_payload.get("count") or 0), 1)
first_hit = (memory_payload.get("hits") or [{}])[0]
self.assertEqual(str(self.memory.id), str(first_hit.get("memory_id")))
list_payload = execute_tool("tasks.list", {"user_id": self.user.id, "limit": 10})
self.assertEqual(1, int(list_payload.get("count") or 0))
self.assertEqual(str(self.task.id), str((list_payload.get("items") or [{}])[0].get("id")))
search_payload = execute_tool(
"tasks.search",
{"user_id": self.user.id, "query": "wire"},
)
self.assertEqual(1, int(search_payload.get("count") or 0))
events_payload = execute_tool("tasks.events", {"task_id": str(self.task.id), "limit": 5})
self.assertEqual(1, int(events_payload.get("count") or 0))
self.assertEqual("created", str((events_payload.get("items") or [{}])[0].get("event_type")))
def test_memory_proposal_review_flow(self):
propose_payload = execute_tool(
"memory.propose",
{
"user_id": self.user.id,
"conversation_id": str(self.conversation.id),
"memory_kind": "fact",
"content": {"field": "likes", "text": "short status bullets"},
"reason": "Operator memory capture",
"requested_by_identifier": "unit-test",
},
)
request_id = str((propose_payload.get("request") or {}).get("id") or "")
self.assertTrue(request_id)
pending_payload = execute_tool("memory.pending", {"user_id": self.user.id})
self.assertGreaterEqual(int(pending_payload.get("count") or 0), 1)
review_payload = execute_tool(
"memory.review",
{
"user_id": self.user.id,
"request_id": request_id,
"decision": "approve",
"reviewer_identifier": "admin",
},
)
memory_data = review_payload.get("memory") or {}
self.assertEqual("active", str(memory_data.get("status") or ""))
list_payload = execute_tool(
"memory.list",
{"user_id": self.user.id, "query": "status bullets"},
)
self.assertGreaterEqual(int(list_payload.get("count") or 0), 1)
def test_wiki_and_task_artifact_tools(self):
article_create = execute_tool(
"wiki.create_article",
{
"user_id": self.user.id,
"title": "MCP Integration Notes",
"markdown": "Initial setup steps.",
"related_task_id": str(self.task.id),
"tags": ["mcp", "memory"],
"status": "published",
},
)
article = article_create.get("article") or {}
self.assertEqual("mcp-integration-notes", str(article.get("slug") or ""))
article_update = execute_tool(
"wiki.update_article",
{
"user_id": self.user.id,
"article_id": str(article.get("id") or ""),
"markdown": "Updated setup steps.",
"approve_overwrite": True,
"summary": "Expanded usage instructions.",
},
)
revision = article_update.get("revision") or {}
self.assertEqual(2, int(revision.get("revision") or 0))
list_payload = execute_tool(
"wiki.list",
{"user_id": self.user.id, "query": "integration"},
)
self.assertEqual(1, int(list_payload.get("count") or 0))
get_payload = execute_tool(
"wiki.get",
{
"user_id": self.user.id,
"article_id": str(article.get("id") or ""),
"include_revisions": True,
},
)
self.assertGreaterEqual(len(get_payload.get("revisions") or []), 2)
note_payload = execute_tool(
"tasks.create_note",
{
"task_id": str(self.task.id),
"user_id": self.user.id,
"note": "Implemented wiki tooling.",
},
)
self.assertEqual("progress", str((note_payload.get("event") or {}).get("event_type")))
artifact_payload = execute_tool(
"tasks.link_artifact",
{
"task_id": str(self.task.id),
"user_id": self.user.id,
"kind": "wiki",
"path": "artifacts/wiki/mcp-integration-notes.md",
"summary": "Reference docs",
},
)
self.assertTrue(str((artifact_payload.get("artifact") or {}).get("id") or ""))
task_payload = execute_tool(
"tasks.get",
{"task_id": str(self.task.id), "user_id": self.user.id},
)
self.assertGreaterEqual(len(task_payload.get("artifact_links") or []), 1)
self.assertGreaterEqual(len(task_payload.get("knowledge_articles") or []), 1)
def test_docs_append_run_note_writes_file(self):
target = Path("/tmp/gia-mcp-test-notes.md")
if target.exists():
target.unlink()
payload = execute_tool(
"docs.append_run_note",
{
"title": "MCP Integration",
"content": "Connected manticore memory tools.",
"task_id": str(self.task.id),
"path": str(target),
},
)
self.assertTrue(payload.get("ok"))
content = target.read_text(encoding="utf-8")
self.assertIn("MCP Integration", content)
self.assertIn("Connected manticore memory tools.", content)
target.unlink()
def test_audit_logs_record_success_and_failures(self):
execute_tool("tasks.list", {"user_id": self.user.id})
with self.assertRaises(ValueError):
execute_tool("tasks.search", {"user_id": self.user.id})
success_row = MCPToolAuditLog.objects.filter(
tool_name="tasks.list",
ok=True,
).first()
self.assertIsNotNone(success_row)
failure_row = MCPToolAuditLog.objects.filter(
tool_name="tasks.search",
ok=False,
).first()
self.assertIsNotNone(failure_row)

View File

@@ -0,0 +1,97 @@
from __future__ import annotations
from datetime import timedelta
from io import StringIO
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from core.models import MemoryChangeRequest, MemoryItem, MessageEvent, User, WorkspaceConversation
class MemoryPipelineCommandTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="memory-pipeline-user",
email="memory-pipeline@example.com",
password="pw",
)
self.conversation = WorkspaceConversation.objects.create(
user=self.user,
platform_type="signal",
title="Pipeline Scope",
platform_thread_id="pipeline-thread-1",
)
def test_memory_suggest_from_messages_creates_pending_request(self):
MessageEvent.objects.create(
user=self.user,
conversation=self.conversation,
ts=1700000000000,
direction="in",
text="I prefer short status updates and bullet points.",
source_system="signal",
)
out = StringIO()
call_command(
"memory_suggest_from_messages",
user_id=str(self.user.id),
limit_messages=50,
max_items=10,
stdout=out,
)
rendered = out.getvalue()
self.assertIn("memory-suggest-from-messages", rendered)
self.assertGreaterEqual(MemoryItem.objects.filter(user=self.user).count(), 1)
self.assertGreaterEqual(
MemoryChangeRequest.objects.filter(user=self.user, status="pending").count(),
1,
)
def test_memory_hygiene_expires_and_detects_contradictions(self):
expired = MemoryItem.objects.create(
user=self.user,
conversation=self.conversation,
memory_kind="fact",
status="active",
content={"field": "likes", "text": "calls in the evening"},
confidence_score=0.6,
expires_at=timezone.now() - timedelta(days=1),
)
MemoryItem.objects.create(
user=self.user,
conversation=self.conversation,
memory_kind="fact",
status="active",
content={"field": "timezone", "text": "UTC+1"},
confidence_score=0.7,
)
MemoryItem.objects.create(
user=self.user,
conversation=self.conversation,
memory_kind="fact",
status="active",
content={"field": "timezone", "text": "UTC-5"},
confidence_score=0.7,
)
out = StringIO()
call_command(
"memory_hygiene",
user_id=str(self.user.id),
stdout=out,
)
rendered = out.getvalue()
self.assertIn("memory-hygiene", rendered)
expired.refresh_from_db()
self.assertEqual("deprecated", expired.status)
contradiction_requests = MemoryChangeRequest.objects.filter(
user=self.user,
status="pending",
action="update",
reason__icontains="Contradiction",
).count()
self.assertGreaterEqual(contradiction_requests, 1)

View File

@@ -5,7 +5,7 @@ from django.test import TestCase
from core.models import Person, PersonIdentifier, User from core.models import Person, PersonIdentifier, User
from core.presence import AvailabilitySignal, latest_state_for_people, record_native_signal from core.presence import AvailabilitySignal, latest_state_for_people, record_native_signal
from core.presence.inference import now_ms from core.presence.inference import now_ms
from core.views.compose import _context_base from core.views.compose import _compose_availability_payload, _context_base
class PresenceQueryAndComposeContextTests(TestCase): class PresenceQueryAndComposeContextTests(TestCase):
@@ -48,3 +48,30 @@ class PresenceQueryAndComposeContextTests(TestCase):
) )
self.assertIsNotNone(base["person_identifier"]) self.assertIsNotNone(base["person_identifier"])
self.assertEqual(str(self.person.id), str(base["person"].id)) self.assertEqual(str(self.person.id), str(base["person"].id))
def test_compose_availability_payload_falls_back_to_cross_service(self):
ts_value = now_ms()
record_native_signal(
AvailabilitySignal(
user=self.user,
person=self.person,
person_identifier=self.identifier,
service="whatsapp",
source_kind="message_in",
availability_state="available",
confidence=0.9,
ts=ts_value,
)
)
enabled, slices, summary = _compose_availability_payload(
user=self.user,
person=self.person,
service="signal",
range_start=ts_value - 60000,
range_end=ts_value + 60000,
)
self.assertTrue(enabled)
self.assertGreaterEqual(len(slices), 1)
self.assertEqual("whatsapp", str(slices[0].get("service")))
self.assertEqual("available", str(summary.get("state")))
self.assertTrue(bool(summary.get("is_cross_service")))

View File

@@ -124,3 +124,66 @@ class ReactionNormalizationTests(TestCase):
self.assertEqual("❤️", serialized["text"]) self.assertEqual("❤️", serialized["text"])
self.assertEqual([], list(serialized.get("reactions") or [])) self.assertEqual([], list(serialized.get("reactions") or []))
self.assertEqual(str(anchor.id), serialized["reply_to_id"]) self.assertEqual(str(anchor.id), serialized["reply_to_id"])
def test_apply_message_edit_tracks_history_and_updates_text(self):
message = Message.objects.create(
user=self.user,
session=self.session,
ts=1700000004000,
sender_uuid="author-3",
text="before",
source_service="signal",
source_message_id="1700000004000",
)
updated = async_to_sync(history.apply_message_edit)(
self.user,
self.identifier,
target_ts=1700000004000,
new_text="after",
source_service="signal",
actor="+15550000000",
payload={"origin": "test"},
)
self.assertIsNotNone(updated)
message.refresh_from_db()
self.assertEqual("after", str(message.text or ""))
edit_history = list((message.receipt_payload or {}).get("edit_history") or [])
self.assertEqual(1, len(edit_history))
self.assertEqual("before", str(edit_history[0].get("previous_text") or ""))
self.assertEqual("after", str(edit_history[0].get("new_text") or ""))
def test_serialize_message_marks_deleted_and_preserves_edit_history(self):
message = Message.objects.create(
user=self.user,
session=self.session,
ts=1700000005000,
sender_uuid="author-4",
text="keep me",
source_service="signal",
source_message_id="1700000005000",
receipt_payload={
"edit_history": [
{
"edited_ts": 1700000005100,
"source_service": "signal",
"actor": "+15550000000",
"previous_text": "old",
"new_text": "keep me",
}
],
"is_deleted": True,
"deleted": {
"deleted_ts": 1700000005200,
"source_service": "signal",
"actor": "+15550000000",
},
},
)
serialized = _serialize_message(message)
self.assertTrue(bool(serialized.get("is_deleted")))
self.assertEqual("(message deleted)", str(serialized.get("display_text") or ""))
self.assertEqual(1, int(serialized.get("edit_count") or 0))
self.assertEqual(1, len(list(serialized.get("edit_history") or [])))

View File

@@ -267,6 +267,76 @@ class SignalInboundReplyLinkTests(TransactionTestCase):
"Expected sync reaction to be applied via destination-number fallback resolution.", "Expected sync reaction to be applied via destination-number fallback resolution.",
) )
def test_process_raw_inbound_event_applies_edit(self):
fake_ur = Mock()
fake_ur.message_received = AsyncMock(return_value=None)
fake_ur.xmpp = Mock()
fake_ur.xmpp.client = Mock()
fake_ur.xmpp.client.apply_external_reaction = AsyncMock(return_value=None)
client = SignalClient.__new__(SignalClient)
client.service = "signal"
client.ur = fake_ur
client.log = Mock()
client.client = Mock()
client.client.bot_uuid = ""
client.client.phone_number = ""
client._resolve_signal_identifiers = AsyncMock(return_value=[self.identifier])
client._auto_link_single_user_signal_identifier = AsyncMock(return_value=[])
payload = {
"envelope": {
"sourceNumber": "+15550002000",
"sourceUuid": "756078fd-d447-426d-a620-581a86d64f51",
"timestamp": 1772545466000,
"dataMessage": {
"editMessage": {
"targetSentTimestamp": 1772545458187,
"dataMessage": {"message": "anchor edited"},
}
},
}
}
async_to_sync(client._process_raw_inbound_event)(json.dumps(payload))
self.anchor.refresh_from_db()
self.assertEqual("anchor edited", str(self.anchor.text or ""))
edits = list((self.anchor.receipt_payload or {}).get("edit_history") or [])
self.assertEqual(1, len(edits))
def test_process_raw_inbound_event_applies_delete_tombstone_flag(self):
fake_ur = Mock()
fake_ur.message_received = AsyncMock(return_value=None)
fake_ur.xmpp = Mock()
fake_ur.xmpp.client = Mock()
fake_ur.xmpp.client.apply_external_reaction = AsyncMock(return_value=None)
client = SignalClient.__new__(SignalClient)
client.service = "signal"
client.ur = fake_ur
client.log = Mock()
client.client = Mock()
client.client.bot_uuid = ""
client.client.phone_number = ""
client._resolve_signal_identifiers = AsyncMock(return_value=[self.identifier])
client._auto_link_single_user_signal_identifier = AsyncMock(return_value=[])
payload = {
"envelope": {
"sourceNumber": "+15550002000",
"sourceUuid": "756078fd-d447-426d-a620-581a86d64f51",
"timestamp": 1772545467000,
"dataMessage": {
"delete": {
"targetSentTimestamp": 1772545458187,
}
},
}
}
async_to_sync(client._process_raw_inbound_event)(json.dumps(payload))
self.anchor.refresh_from_db()
self.assertTrue(bool((self.anchor.receipt_payload or {}).get("is_deleted")))
self.assertTrue(bool((self.anchor.receipt_payload or {}).get("deleted") or {}))
class SignalRuntimeCommandWritebackTests(TestCase): class SignalRuntimeCommandWritebackTests(TestCase):
def setUp(self): def setUp(self):

View File

@@ -1,5 +1,7 @@
from asgiref.sync import async_to_sync
from django.test import SimpleTestCase from django.test import SimpleTestCase
from core.clients import transport
from core.transports.capabilities import capability_snapshot, supports, unsupported_reason from core.transports.capabilities import capability_snapshot, supports, unsupported_reason
@@ -15,3 +17,11 @@ class TransportCapabilitiesTests(SimpleTestCase):
snapshot = capability_snapshot() snapshot = capability_snapshot()
self.assertIn("schema_version", snapshot) self.assertIn("schema_version", snapshot)
self.assertIn("services", snapshot) self.assertIn("services", snapshot)
def test_transport_send_fails_fast_when_unsupported(self):
result = async_to_sync(transport.send_message_raw)(
"xmpp",
"person@example.com",
text="hello",
)
self.assertFalse(result)

View File

@@ -52,7 +52,7 @@ from core.models import (
WorkspaceConversation, WorkspaceConversation,
) )
from core.presence import get_settings as get_availability_settings from core.presence import get_settings as get_availability_settings
from core.presence import spans_for_range from core.presence import latest_state_for_people, spans_for_range
from core.realtime.typing_state import get_person_typing_state from core.realtime.typing_state import get_person_typing_state
from core.transports.capabilities import supports, unsupported_reason from core.transports.capabilities import supports, unsupported_reason
from core.translation.engine import process_inbound_translation from core.translation.engine import process_inbound_translation
@@ -190,6 +190,92 @@ def _serialize_availability_spans(spans):
return rows return rows
def _availability_summary_for_person(*, user, person: Person, service: str) -> dict:
person_key = str(person.id)
selected_service = str(service or "").strip().lower()
state_map = latest_state_for_people(
user=user,
person_ids=[person_key],
service=selected_service,
)
row = state_map.get(person_key)
is_cross_service = False
if row is None and selected_service:
state_map = latest_state_for_people(
user=user,
person_ids=[person_key],
service="",
)
row = state_map.get(person_key)
is_cross_service = row is not None
if row is None:
return {}
ts_value = int(row.get("ts") or 0)
state_value = str(row.get("state") or "unknown").strip().lower() or "unknown"
return {
"state": state_value,
"state_label": state_value.title(),
"service": str(row.get("service") or selected_service or "").strip().lower(),
"confidence": float(row.get("confidence") or 0.0),
"source_kind": str(row.get("source_kind") or "").strip(),
"ts": ts_value,
"ts_label": _format_ts_label(ts_value) if ts_value > 0 else "",
"is_cross_service": bool(is_cross_service),
}
def _compose_availability_payload(
*,
user,
person: Person | None,
service: str,
range_start: int,
range_end: int,
) -> tuple[bool, list[dict], dict]:
settings_row = get_availability_settings(user)
if (
person is None
or not settings_row.enabled
or not settings_row.show_in_chat
):
return False, [], {}
service_key = str(service or "").strip().lower()
rows = _serialize_availability_spans(
spans_for_range(
user=user,
person=person,
start_ts=int(range_start or 0),
end_ts=int(range_end or 0),
service=service_key,
limit=200,
)
)
used_cross_service = False
if not rows and service_key:
rows = _serialize_availability_spans(
spans_for_range(
user=user,
person=person,
start_ts=int(range_start or 0),
end_ts=int(range_end or 0),
service="",
limit=200,
)
)
used_cross_service = bool(rows)
summary = _availability_summary_for_person(
user=user,
person=person,
service=service_key,
)
if used_cross_service and summary:
summary["is_cross_service"] = True
return True, rows, summary
def _is_outgoing(msg: Message) -> bool: def _is_outgoing(msg: Message) -> bool:
is_outgoing = str(msg.custom_author or "").upper() in {"USER", "BOT"} is_outgoing = str(msg.custom_author or "").upper() in {"USER", "BOT"}
if not is_outgoing: if not is_outgoing:
@@ -507,6 +593,66 @@ def _serialize_message(msg: Message) -> dict:
) )
# Receipt payload and metadata # Receipt payload and metadata
receipt_payload = msg.receipt_payload or {} receipt_payload = msg.receipt_payload or {}
deleted_payload = dict((receipt_payload or {}).get("deleted") or {})
is_deleted = bool(
(receipt_payload or {}).get("is_deleted")
or deleted_payload
or (receipt_payload or {}).get("delete_events")
)
deleted_ts = 0
for candidate in (
deleted_payload.get("deleted_ts"),
deleted_payload.get("updated_at"),
deleted_payload.get("ts"),
):
try:
deleted_ts = int(candidate or 0)
except Exception:
deleted_ts = 0
if deleted_ts > 0:
break
deleted_display = _format_ts_label(deleted_ts) if deleted_ts > 0 else ""
deleted_actor = str(deleted_payload.get("actor") or "").strip()
deleted_source_service = str(deleted_payload.get("source_service") or "").strip()
edit_history_rows = []
for row in list((receipt_payload or {}).get("edit_history") or []):
item = dict(row or {})
edited_ts = 0
for candidate in (
item.get("edited_ts"),
item.get("updated_at"),
item.get("ts"),
):
try:
edited_ts = int(candidate or 0)
except Exception:
edited_ts = 0
if edited_ts > 0:
break
previous_text = str(item.get("previous_text") or "")
new_text = str(item.get("new_text") or "")
edit_history_rows.append(
{
"edited_ts": edited_ts,
"edited_display": _format_ts_label(edited_ts) if edited_ts > 0 else "",
"source_service": str(item.get("source_service") or "").strip().lower(),
"actor": str(item.get("actor") or "").strip(),
"previous_text": previous_text,
"new_text": new_text,
}
)
edit_history_rows.sort(key=lambda row: int(row.get("edited_ts") or 0))
edit_count = len(edit_history_rows)
last_edit_ts = int(edit_history_rows[-1].get("edited_ts") or 0) if edit_count else 0
last_edit_display = _format_ts_label(last_edit_ts) if last_edit_ts > 0 else ""
if is_deleted:
display_text = "(message deleted)"
image_urls = []
image_url = ""
hide_text = False
read_source_service = str(msg.read_source_service or "").strip() read_source_service = str(msg.read_source_service or "").strip()
read_by_identifier = str(msg.read_by_identifier or "").strip() read_by_identifier = str(msg.read_by_identifier or "").strip()
reaction_rows = [] reaction_rows = []
@@ -570,6 +716,17 @@ def _serialize_message(msg: Message) -> dict:
"receipt_payload": receipt_payload, "receipt_payload": receipt_payload,
"read_source_service": read_source_service, "read_source_service": read_source_service,
"read_by_identifier": read_by_identifier, "read_by_identifier": read_by_identifier,
"is_deleted": is_deleted,
"deleted_ts": deleted_ts,
"deleted_display": deleted_display,
"deleted_actor": deleted_actor,
"deleted_source_service": deleted_source_service,
"edit_history": edit_history_rows,
"edit_history_json": json.dumps(edit_history_rows),
"edit_count": edit_count,
"is_edited": bool(edit_count),
"last_edit_ts": last_edit_ts,
"last_edit_display": last_edit_display,
"reactions": reaction_rows, "reactions": reaction_rows,
"source_message_id": str(getattr(msg, "source_message_id", "") or ""), "source_message_id": str(getattr(msg, "source_message_id", "") or ""),
"reply_to_id": str(getattr(msg, "reply_to_id", "") or ""), "reply_to_id": str(getattr(msg, "reply_to_id", "") or ""),
@@ -2694,35 +2851,27 @@ def _panel_context(
counterpart_identifiers=counterpart_identifiers, counterpart_identifiers=counterpart_identifiers,
conversation=conversation, conversation=conversation,
) )
availability_slices = [] range_start = (
availability_enabled = False int(session_bundle["messages"][0].ts or 0) if session_bundle["messages"] else 0
availability_settings = get_availability_settings(request.user) )
if ( range_end = (
base["person"] is not None int(session_bundle["messages"][-1].ts or 0) if session_bundle["messages"] else 0
and availability_settings.enabled )
and availability_settings.show_in_chat if range_start <= 0 or range_end <= 0:
): now_ts = int(time.time() * 1000)
range_start = ( range_start = now_ts - (24 * 60 * 60 * 1000)
int(session_bundle["messages"][0].ts or 0) if session_bundle["messages"] else 0 range_end = now_ts
) (
range_end = ( availability_enabled,
int(session_bundle["messages"][-1].ts or 0) if session_bundle["messages"] else 0 availability_slices,
) availability_summary,
if range_start <= 0 or range_end <= 0: ) = _compose_availability_payload(
now_ts = int(time.time() * 1000) user=request.user,
range_start = now_ts - (24 * 60 * 60 * 1000) person=base["person"],
range_end = now_ts service=base["service"],
availability_enabled = True range_start=range_start,
availability_slices = _serialize_availability_spans( range_end=range_end,
spans_for_range( )
user=request.user,
person=base["person"],
start_ts=range_start,
end_ts=range_end,
service=base["service"],
limit=200,
)
)
glance_items = _build_glance_items( glance_items = _build_glance_items(
serialized_messages, serialized_messages,
person_id=(base["person"].id if base["person"] else None), person_id=(base["person"].id if base["person"] else None),
@@ -2923,9 +3072,15 @@ def _panel_context(
"manual_icon_class": "fa-solid fa-paper-plane", "manual_icon_class": "fa-solid fa-paper-plane",
"panel_id": f"compose-panel-{unique}", "panel_id": f"compose-panel-{unique}",
"typing_state_json": json.dumps(typing_state), "typing_state_json": json.dumps(typing_state),
"capability_send": supports(base["service"], "send"),
"capability_send_reason": unsupported_reason(base["service"], "send"),
"capability_reactions": supports(base["service"], "reactions"),
"capability_reactions_reason": unsupported_reason(base["service"], "reactions"),
"availability_enabled": availability_enabled, "availability_enabled": availability_enabled,
"availability_slices": availability_slices, "availability_slices": availability_slices,
"availability_slices_json": json.dumps(availability_slices), "availability_slices_json": json.dumps(availability_slices),
"availability_summary": availability_summary,
"availability_summary_json": json.dumps(availability_summary),
"command_options": command_options, "command_options": command_options,
"bp_binding_summary": bp_binding_summary, "bp_binding_summary": bp_binding_summary,
"platform_options": platform_options, "platform_options": platform_options,
@@ -3383,31 +3538,23 @@ class ComposeThread(LoginRequiredMixin, View):
counterpart_identifiers = _counterpart_identifiers_for_person( counterpart_identifiers = _counterpart_identifiers_for_person(
request.user, base["person"] request.user, base["person"]
) )
availability_slices = [] range_start = int(messages[0].ts or 0) if messages else max(0, int(after_ts or 0))
availability_settings = get_availability_settings(request.user) range_end = int(latest_ts or 0)
if ( if range_start <= 0 or range_end <= 0:
base["person"] is not None now_ts = int(time.time() * 1000)
and availability_settings.enabled range_start = now_ts - (24 * 60 * 60 * 1000)
and availability_settings.show_in_chat range_end = now_ts
): (
range_start = ( _availability_enabled,
int(messages[0].ts or 0) if messages else max(0, int(after_ts or 0)) availability_slices,
) availability_summary,
range_end = int(latest_ts or 0) ) = _compose_availability_payload(
if range_start <= 0 or range_end <= 0: user=request.user,
now_ts = int(time.time() * 1000) person=base["person"],
range_start = now_ts - (24 * 60 * 60 * 1000) service=base["service"],
range_end = now_ts range_start=range_start,
availability_slices = _serialize_availability_spans( range_end=range_end,
spans_for_range( )
user=request.user,
person=base["person"],
start_ts=range_start,
end_ts=range_end,
service=base["service"],
limit=200,
)
)
payload = { payload = {
"messages": _serialize_messages_with_artifacts( "messages": _serialize_messages_with_artifacts(
messages, messages,
@@ -3417,6 +3564,7 @@ class ComposeThread(LoginRequiredMixin, View):
), ),
"last_ts": latest_ts, "last_ts": latest_ts,
"availability_slices": availability_slices, "availability_slices": availability_slices,
"availability_summary": availability_summary,
"typing": get_person_typing_state( "typing": get_person_typing_state(
user_id=request.user.id, user_id=request.user.id,
person_id=base["person"].id if base["person"] else None, person_id=base["person"].id if base["person"] else None,
@@ -4459,6 +4607,18 @@ class ComposeSend(LoginRequiredMixin, View):
log_prefix = ( log_prefix = (
f"[ComposeSend] service={base['service']} identifier={base['identifier']}" f"[ComposeSend] service={base['service']} identifier={base['identifier']}"
) )
if bool(getattr(settings, "CAPABILITY_ENFORCEMENT_ENABLED", True)) and not supports(
str(base["service"] or "").strip().lower(),
"send",
):
reason = unsupported_reason(str(base["service"] or "").strip().lower(), "send")
return self._response(
request,
ok=False,
message=f"Send not supported: {reason}",
level="warning",
panel_id=panel_id,
)
logger.debug(f"{log_prefix} text_len={len(text)} attempting send") logger.debug(f"{log_prefix} text_len={len(text)} attempting send")
# If runtime is out-of-process, enqueue command and return immediately (non-blocking). # If runtime is out-of-process, enqueue command and return immediately (non-blocking).

88
core/views/prosody.py Normal file
View File

@@ -0,0 +1,88 @@
from __future__ import annotations
import base64
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.http import HttpRequest, HttpResponse
from django.views import View
class ProsodyAuthBridge(View):
"""
Minimal external-auth bridge for Prosody.
Returns plain text "1" or "0" per Prosody external auth protocol.
"""
http_method_names = ["get", "post"]
def _denied(self) -> HttpResponse:
return HttpResponse("0\n", content_type="text/plain")
def _b64url_decode(self, value: str) -> str:
raw = str(value or "").strip()
if not raw:
return ""
padded = raw + "=" * (-len(raw) % 4)
padded = padded.replace("-", "+").replace("_", "/")
try:
return base64.b64decode(padded.encode("ascii")).decode(
"utf-8", errors="ignore"
)
except Exception:
return ""
def _extract_line(self, request: HttpRequest) -> str:
line_b64 = str(request.GET.get("line_b64") or "").strip()
if line_b64:
return self._b64url_decode(line_b64)
body = (request.body or b"").decode("utf-8", errors="ignore").strip()
if body:
return body
return str(request.POST.get("line") or "").strip()
def post(self, request: HttpRequest) -> HttpResponse:
remote_addr = str(request.META.get("REMOTE_ADDR") or "").strip()
if remote_addr not in {"127.0.0.1", "::1"}:
return self._denied()
expected_secret = str(getattr(settings, "XMPP_SECRET", "") or "").strip()
supplied_secret = str(request.headers.get("X-Prosody-Secret") or "").strip()
if not supplied_secret:
supplied_secret = str(request.GET.get("secret") or "").strip()
secret_b64 = str(request.GET.get("secret_b64") or "").strip()
if not supplied_secret and secret_b64:
supplied_secret = self._b64url_decode(secret_b64)
if not expected_secret or supplied_secret != expected_secret:
return self._denied()
line = self._extract_line(request)
if not line:
return self._denied()
parts = line.split(":")
if len(parts) < 3:
return self._denied()
command, username, _domain = parts[:3]
password = ":".join(parts[3:]) if len(parts) > 3 else None
if command == "auth":
if not password:
return self._denied()
user = authenticate(username=username, password=password)
ok = bool(user is not None and getattr(user, "is_active", False))
return HttpResponse("1\n" if ok else "0\n", content_type="text/plain")
if command == "isuser":
User = get_user_model()
exists = bool(User.objects.filter(username=username).exists())
return HttpResponse("1\n" if exists else "0\n", content_type="text/plain")
if command == "setpass":
return self._denied()
return self._denied()
def get(self, request: HttpRequest) -> HttpResponse:
return self.post(request)

View File

@@ -14,6 +14,7 @@ from django.views import View
from core.forms import AIWorkspaceWindowForm from core.forms import AIWorkspaceWindowForm
from core.lib.notify import raw_sendmsg from core.lib.notify import raw_sendmsg
from core.memory.retrieval import retrieve_memories_for_prompt
from core.messaging import ai as ai_runner from core.messaging import ai as ai_runner
from core.messaging.utils import messages_to_string from core.messaging.utils import messages_to_string
from core.models import ( from core.models import (
@@ -3936,8 +3937,27 @@ class AIWorkspaceRunOperation(LoginRequiredMixin, View):
) )
return rows return rows
def _build_prompt(self, operation, owner_name, person, transcript, user_notes): def _build_prompt(
self,
operation,
owner_name,
person,
transcript,
user_notes,
memory_context,
):
notes = (user_notes or "").strip() notes = (user_notes or "").strip()
memory_lines = []
for index, item in enumerate(memory_context or [], start=1):
content = item.get("content") or {}
text = str(content.get("text") or "").strip()
if not text:
text = str(content).strip()
if not text:
continue
kind = str(item.get("memory_kind") or "fact")
memory_lines.append(f"{index}. [{kind}] {text}")
memory_text = "\n".join(memory_lines) if memory_lines else "None"
if operation == "draft_reply": if operation == "draft_reply":
instruction = ( instruction = (
"Generate 3 concise reply options in different tones: soft, neutral, firm. " "Generate 3 concise reply options in different tones: soft, neutral, firm. "
@@ -3965,6 +3985,8 @@ class AIWorkspaceRunOperation(LoginRequiredMixin, View):
f"Owner: {owner_name}\n" f"Owner: {owner_name}\n"
f"Person: {person.name}\n" f"Person: {person.name}\n"
f"Notes: {notes or 'None'}\n\n" f"Notes: {notes or 'None'}\n\n"
"Approved Memory Context:\n"
f"{memory_text}\n\n"
f"Conversation:\n{transcript}" f"Conversation:\n{transcript}"
), ),
}, },
@@ -4111,12 +4133,20 @@ class AIWorkspaceRunOperation(LoginRequiredMixin, View):
) )
try: try:
memory_context = retrieve_memories_for_prompt(
user_id=request.user.id,
person_id=str(person.id),
conversation_id=str(conversation.id),
statuses=("active",),
limit=12,
)
prompt = self._build_prompt( prompt = self._build_prompt(
operation=operation, operation=operation,
owner_name=owner_name, owner_name=owner_name,
person=person, person=person,
transcript=transcript, transcript=transcript,
user_notes=user_notes, user_notes=user_notes,
memory_context=memory_context,
) )
result_text = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj) result_text = async_to_sync(ai_runner.run_prompt)(prompt, ai_obj)
draft_options = ( draft_options = (

View File

@@ -26,4 +26,4 @@ py-autoreload=1
# In the container the repository is mounted at /code # In the container the repository is mounted at /code
# point autoreload at the actual in-container paths # point autoreload at the actual in-container paths
py-autoreload-on-edit=/code/core py-autoreload-on-edit=/code/core
py-autoreload-on-edit=/code/app py-autoreload-on-edit=/code/app

634
rust/manticore-mcp-worker/Cargo.lock generated Normal file
View File

@@ -0,0 +1,634 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "cc"
version = "1.2.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2"
dependencies = [
"find-msvc-tools",
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "displaydoc"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "find-msvc-tools"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
[[package]]
name = "form_urlencoded"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
dependencies = [
"percent-encoding",
]
[[package]]
name = "getrandom"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "icu_collections"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
dependencies = [
"displaydoc",
"potential_utf",
"yoke",
"zerofrom",
"zerovec",
]
[[package]]
name = "icu_locale_core"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
dependencies = [
"displaydoc",
"litemap",
"tinystr",
"writeable",
"zerovec",
]
[[package]]
name = "icu_normalizer"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
dependencies = [
"icu_collections",
"icu_normalizer_data",
"icu_properties",
"icu_provider",
"smallvec",
"zerovec",
]
[[package]]
name = "icu_normalizer_data"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
[[package]]
name = "icu_properties"
version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
dependencies = [
"icu_collections",
"icu_locale_core",
"icu_properties_data",
"icu_provider",
"zerotrie",
"zerovec",
]
[[package]]
name = "icu_properties_data"
version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
[[package]]
name = "icu_provider"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
dependencies = [
"displaydoc",
"icu_locale_core",
"writeable",
"yoke",
"zerofrom",
"zerotrie",
"zerovec",
]
[[package]]
name = "idna"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de"
dependencies = [
"idna_adapter",
"smallvec",
"utf8_iter",
]
[[package]]
name = "idna_adapter"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344"
dependencies = [
"icu_normalizer",
"icu_properties",
]
[[package]]
name = "itoa"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "libc"
version = "0.2.182"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112"
[[package]]
name = "litemap"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
[[package]]
name = "log"
version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "manticore-mcp-worker"
version = "0.1.0"
dependencies = [
"serde",
"serde_json",
"ureq",
"urlencoding",
]
[[package]]
name = "memchr"
version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "percent-encoding"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "potential_utf"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
dependencies = [
"zerovec",
]
[[package]]
name = "proc-macro2"
version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924"
dependencies = [
"proc-macro2",
]
[[package]]
name = "ring"
version = "0.17.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
dependencies = [
"cc",
"cfg-if",
"getrandom",
"libc",
"untrusted",
"windows-sys",
]
[[package]]
name = "rustls"
version = "0.23.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
dependencies = [
"log",
"once_cell",
"ring",
"rustls-pki-types",
"rustls-webpki",
"subtle",
"zeroize",
]
[[package]]
name = "rustls-pki-types"
version = "1.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd"
dependencies = [
"zeroize",
]
[[package]]
name = "rustls-webpki"
version = "0.103.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
dependencies = [
"ring",
"rustls-pki-types",
"untrusted",
]
[[package]]
name = "serde"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
dependencies = [
"serde_core",
"serde_derive",
]
[[package]]
name = "serde_core"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.149"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86"
dependencies = [
"itoa",
"memchr",
"serde",
"serde_core",
"zmij",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "smallvec"
version = "1.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "stable_deref_trait"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
[[package]]
name = "subtle"
version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "2.0.117"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synstructure"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tinystr"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
dependencies = [
"displaydoc",
"zerovec",
]
[[package]]
name = "unicode-ident"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75"
[[package]]
name = "untrusted"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "ureq"
version = "2.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d"
dependencies = [
"base64",
"log",
"once_cell",
"rustls",
"rustls-pki-types",
"url",
"webpki-roots 0.26.11",
]
[[package]]
name = "url"
version = "2.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed"
dependencies = [
"form_urlencoded",
"idna",
"percent-encoding",
"serde",
]
[[package]]
name = "urlencoding"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
[[package]]
name = "utf8_iter"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "wasi"
version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "webpki-roots"
version = "0.26.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9"
dependencies = [
"webpki-roots 1.0.6",
]
[[package]]
name = "webpki-roots"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "writeable"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
[[package]]
name = "yoke"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
dependencies = [
"stable_deref_trait",
"yoke-derive",
"zerofrom",
]
[[package]]
name = "yoke-derive"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
dependencies = [
"proc-macro2",
"quote",
"syn",
"synstructure",
]
[[package]]
name = "zerofrom"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
dependencies = [
"zerofrom-derive",
]
[[package]]
name = "zerofrom-derive"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
dependencies = [
"proc-macro2",
"quote",
"syn",
"synstructure",
]
[[package]]
name = "zeroize"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
[[package]]
name = "zerotrie"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
dependencies = [
"displaydoc",
"yoke",
"zerofrom",
]
[[package]]
name = "zerovec"
version = "0.11.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
dependencies = [
"yoke",
"zerofrom",
"zerovec-derive",
]
[[package]]
name = "zerovec-derive"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "zmij"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa"

View File

@@ -0,0 +1,10 @@
[package]
name = "manticore-mcp-worker"
version = "0.1.0"
edition = "2021"
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
ureq = { version = "2", default-features = false, features = ["tls"] }
urlencoding = "2"

View File

@@ -0,0 +1,26 @@
# Manticore MCP Rust Worker
Lightweight stdio MCP server for fast Manticore memory operations.
## Build
```bash
cd /code/xf/GIA/rust/manticore-mcp-worker
cargo build --release
```
Binary output:
`/code/xf/GIA/rust/manticore-mcp-worker/target/release/manticore-mcp-worker`
## Environment
- `MANTICORE_HTTP_URL` (default: `http://127.0.0.1:9308`)
- `MANTICORE_MEMORY_TABLE` (default: `gia_memory_items`)
- `MANTICORE_HTTP_TIMEOUT` (default: `5`)
## Tools
- `manticore.status`
- `manticore.query`
- `manticore.reindex` (maintenance: `FLUSH RAMCHUNK`, `OPTIMIZE TABLE`)

View File

@@ -0,0 +1,387 @@
use serde_json::{json, Value};
use std::collections::HashMap;
use std::env;
use std::io::{self, BufRead, Read, Write};
use std::time::{SystemTime, UNIX_EPOCH};
fn now_ms() -> i64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_millis() as i64)
.unwrap_or(0)
}
fn sql_escape(value: &str) -> String {
value.replace('\\', "\\\\").replace('\'', "\\'")
}
fn to_string_vec(value: &Value) -> Vec<String> {
match value {
Value::Array(items) => items
.iter()
.filter_map(|item| item.as_str())
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect(),
Value::String(raw) => raw
.split(',')
.map(|part| part.trim().to_string())
.filter(|s| !s.is_empty())
.collect(),
_ => vec![],
}
}
struct ManticoreClient {
base_url: String,
table: String,
timeout_secs: u64,
}
impl ManticoreClient {
fn from_env() -> Self {
let base_url = env::var("MANTICORE_HTTP_URL")
.unwrap_or_else(|_| "http://127.0.0.1:9308".to_string())
.trim_end_matches('/')
.to_string();
let table = env::var("MANTICORE_MEMORY_TABLE")
.unwrap_or_else(|_| "gia_memory_items".to_string())
.trim()
.to_string();
let timeout_secs = env::var("MANTICORE_HTTP_TIMEOUT")
.ok()
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(5);
Self {
base_url,
table,
timeout_secs,
}
}
fn sql(&self, query: &str) -> Result<Value, String> {
let endpoint = format!("{}/sql", self.base_url);
let response = ureq::post(&endpoint)
.timeout(std::time::Duration::from_secs(self.timeout_secs))
.set("Content-Type", "application/x-www-form-urlencoded")
.send_string(&format!(
"mode=raw&query={}",
urlencoding::encode(query).into_owned()
))
.map_err(|err| err.to_string())?;
let body = response
.into_string()
.map_err(|err| format!("manticore response read failed: {err}"))?;
serde_json::from_str::<Value>(&body)
.map_err(|err| format!("manticore response parse failed: {err}"))
}
fn ensure_table(&self) -> Result<(), String> {
let query = format!(
"CREATE TABLE IF NOT EXISTS {} (id BIGINT,memory_uuid STRING,user_id BIGINT,conversation_id STRING,memory_kind STRING,status STRING,updated_ts BIGINT,summary TEXT,body TEXT)",
self.table
);
self.sql(&query).map(|_| ())
}
}
fn tool_specs() -> Value {
json!([
{
"name": "manticore.status",
"description": "Report Manticore connectivity and table status.",
"inputSchema": {
"type": "object",
"additionalProperties": false,
"properties": {}
}
},
{
"name": "manticore.query",
"description": "Run fast full-text retrieval against the Manticore memory table.",
"inputSchema": {
"type": "object",
"additionalProperties": false,
"required": ["query"],
"properties": {
"query": {"type": "string"},
"user_id": {"type": "integer"},
"conversation_id": {"type": "string"},
"statuses": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"limit": {"type": "integer", "minimum": 1, "maximum": 100}
}
}
},
{
"name": "manticore.reindex",
"description": "Run table maintenance operations for fast reads (flush + optimize).",
"inputSchema": {
"type": "object",
"additionalProperties": false,
"properties": {
"flush_ramchunk": {"type": "boolean"},
"optimize": {"type": "boolean"}
}
}
}
])
}
fn call_tool(client: &ManticoreClient, name: &str, arguments: &Value) -> Result<Value, String> {
match name {
"manticore.status" => {
client.ensure_table()?;
let table_check = client.sql(&format!(
"SHOW TABLES LIKE '{}'",
sql_escape(&client.table)
))?;
Ok(json!({
"backend": "manticore",
"ok": true,
"manticore_http_url": client.base_url,
"manticore_table": client.table,
"table_check": table_check,
"ts_ms": now_ms()
}))
}
"manticore.query" => {
client.ensure_table()?;
let query = arguments
.get("query")
.and_then(Value::as_str)
.unwrap_or("")
.trim()
.to_string();
if query.is_empty() {
return Err("query is required".to_string());
}
let limit = arguments
.get("limit")
.and_then(Value::as_i64)
.unwrap_or(20)
.clamp(1, 100);
let mut where_parts = vec![format!("MATCH('{}')", sql_escape(&query))];
if let Some(user_id) = arguments.get("user_id").and_then(Value::as_i64) {
where_parts.push(format!("user_id={user_id}"));
}
let conversation_id = arguments
.get("conversation_id")
.and_then(Value::as_str)
.unwrap_or("")
.trim()
.to_string();
if !conversation_id.is_empty() {
where_parts.push(format!(
"conversation_id='{}'",
sql_escape(&conversation_id)
));
}
let statuses = to_string_vec(arguments.get("statuses").unwrap_or(&Value::Null));
if !statuses.is_empty() {
let joined = statuses
.iter()
.map(|s| format!("'{}'", sql_escape(s)))
.collect::<Vec<_>>()
.join(",");
where_parts.push(format!("status IN ({joined})"));
}
let sql = format!(
"SELECT memory_uuid,memory_kind,status,conversation_id,updated_ts,summary,WEIGHT() AS score FROM {} WHERE {} ORDER BY score DESC LIMIT {}",
client.table,
where_parts.join(" AND "),
limit
);
let payload = client.sql(&sql)?;
let count = payload
.get("data")
.and_then(Value::as_array)
.map(|rows| rows.len())
.unwrap_or(0);
Ok(json!({
"backend": "manticore",
"query": query,
"count": count,
"hits": payload.get("data").cloned().unwrap_or_else(|| json!([])),
"raw": payload
}))
}
"manticore.reindex" => {
client.ensure_table()?;
let flush = arguments
.get("flush_ramchunk")
.and_then(Value::as_bool)
.unwrap_or(true);
let optimize = arguments
.get("optimize")
.and_then(Value::as_bool)
.unwrap_or(true);
let mut actions: Vec<Value> = Vec::new();
if flush {
let sql = format!("FLUSH RAMCHUNK {}", client.table);
let payload = client.sql(&sql)?;
actions.push(json!({"sql": sql, "result": payload}));
}
if optimize {
let sql = format!("OPTIMIZE TABLE {}", client.table);
let payload = client.sql(&sql)?;
actions.push(json!({"sql": sql, "result": payload}));
}
Ok(json!({
"ok": true,
"actions": actions,
"ts_ms": now_ms()
}))
}
_ => Err(format!("Unknown tool: {name}")),
}
}
fn response(id: Value, result: Value) -> Value {
json!({"jsonrpc":"2.0","id":id,"result":result})
}
fn error(id: Value, code: i32, message: &str) -> Value {
json!({"jsonrpc":"2.0","id":id,"error":{"code":code,"message":message}})
}
fn write_message(payload: &Value, compat_newline_mode: bool) -> Result<(), String> {
let raw = serde_json::to_vec(payload).map_err(|e| e.to_string())?;
let mut stdout = io::stdout();
if compat_newline_mode {
stdout
.write_all(format!("{}\n", String::from_utf8_lossy(&raw)).as_bytes())
.map_err(|e| e.to_string())?;
} else {
stdout
.write_all(format!("Content-Length: {}\r\n\r\n", raw.len()).as_bytes())
.map_err(|e| e.to_string())?;
stdout.write_all(&raw).map_err(|e| e.to_string())?;
}
stdout.flush().map_err(|e| e.to_string())
}
fn read_message(
stdin: &mut io::StdinLock<'_>,
compat_newline_mode: &mut bool,
) -> Result<Option<Value>, String> {
let mut headers: HashMap<String, String> = HashMap::new();
let mut pending_body: Vec<u8> = Vec::new();
loop {
let mut line: Vec<u8> = Vec::new();
let bytes = stdin.read_until(b'\n', &mut line).map_err(|e| e.to_string())?;
if bytes == 0 {
return Ok(None);
}
let trimmed = line
.iter()
.copied()
.skip_while(|b| b.is_ascii_whitespace())
.collect::<Vec<u8>>();
if headers.is_empty() && (trimmed.starts_with(b"{") || trimmed.starts_with(b"[")) {
*compat_newline_mode = true;
let raw = String::from_utf8_lossy(&line).trim().to_string();
let parsed = serde_json::from_str::<Value>(&raw).map_err(|e| e.to_string())?;
return Ok(Some(parsed));
}
if line == b"\r\n" || line == b"\n" {
break;
}
let decoded = String::from_utf8_lossy(&line).trim().to_string();
if let Some((k, v)) = decoded.split_once(':') {
headers.insert(k.trim().to_lowercase(), v.trim().to_string());
}
}
if let Some(length_raw) = headers.get("content-length") {
let length = length_raw
.parse::<usize>()
.map_err(|_| "invalid content-length".to_string())?;
if length > 0 {
pending_body.resize(length, 0);
stdin.read_exact(&mut pending_body).map_err(|e| e.to_string())?;
let parsed =
serde_json::from_slice::<Value>(&pending_body).map_err(|e| e.to_string())?;
return Ok(Some(parsed));
}
return Ok(None);
}
Ok(None)
}
fn main() {
let client = ManticoreClient::from_env();
let stdin = io::stdin();
let mut locked = stdin.lock();
let mut compat_newline_mode = false;
loop {
let message = match read_message(&mut locked, &mut compat_newline_mode) {
Ok(Some(value)) => value,
Ok(None) => return,
Err(err) => {
let _ = write_message(
&error(Value::Null, -32000, &format!("read failed: {err}")),
compat_newline_mode,
);
return;
}
};
let id = message.get("id").cloned().unwrap_or(Value::Null);
let method = message
.get("method")
.and_then(Value::as_str)
.unwrap_or("")
.to_string();
let params = message.get("params").cloned().unwrap_or_else(|| json!({}));
let response_payload = match method.as_str() {
"notifications/initialized" => None,
"initialize" => Some(response(
id,
json!({
"protocolVersion":"2025-06-18",
"serverInfo":{"name":"gia-manticore-mcp-rust","version":"0.1.0"},
"capabilities":{"tools":{}}
}),
)),
"ping" => Some(response(id, json!({}))),
"tools/list" => Some(response(id, json!({"tools": tool_specs()}))),
"tools/call" => {
let name = params
.get("name")
.and_then(Value::as_str)
.unwrap_or("")
.trim()
.to_string();
let args = params
.get("arguments")
.cloned()
.unwrap_or_else(|| json!({}));
match call_tool(&client, &name, &args) {
Ok(payload) => Some(response(
id,
json!({"isError":false,"content":[{"type":"text","text":payload.to_string()}]}),
)),
Err(err) => Some(response(
id,
json!({"isError":true,"content":[{"type":"text","text":json!({"error":err}).to_string()}]}),
)),
}
}
_ => Some(error(id, -32601, &format!("Method not found: {method}"))),
};
if let Some(payload) = response_payload {
if write_message(&payload, compat_newline_mode).is_err() {
return;
}
}
}
}

View File

@@ -36,6 +36,13 @@ PROSODY_CONFIG_FILE="${QUADLET_PROSODY_CONFIG_FILE:-$ROOT_DIR/utilities/prosody/
PROSODY_CERTS_DIR="${QUADLET_PROSODY_CERTS_DIR:-$ROOT_DIR/.podman/gia_prosody_certs}" PROSODY_CERTS_DIR="${QUADLET_PROSODY_CERTS_DIR:-$ROOT_DIR/.podman/gia_prosody_certs}"
PROSODY_DATA_DIR="${QUADLET_PROSODY_DATA_DIR:-$ROOT_DIR/.podman/gia_prosody_data}" PROSODY_DATA_DIR="${QUADLET_PROSODY_DATA_DIR:-$ROOT_DIR/.podman/gia_prosody_data}"
PROSODY_LOGS_DIR="${QUADLET_PROSODY_LOGS_DIR:-$ROOT_DIR/.podman/gia_prosody_logs}" PROSODY_LOGS_DIR="${QUADLET_PROSODY_LOGS_DIR:-$ROOT_DIR/.podman/gia_prosody_logs}"
PROSODY_IMAGE="${PROSODY_IMAGE:-docker.io/prosody/prosody:latest}"
if id code >/dev/null 2>&1; then
PROSODY_RUN_USER_DEFAULT="$(id -u code):$(id -g code)"
else
PROSODY_RUN_USER_DEFAULT="$(id -u):$(id -g)"
fi
PROSODY_RUN_USER="${PROSODY_RUN_USER:-$PROSODY_RUN_USER_DEFAULT}"
PROSODY_ENABLED="${PROSODY_ENABLED:-false}" PROSODY_ENABLED="${PROSODY_ENABLED:-false}"
ENSURE_XMPP_SECRET_SCRIPT="$ROOT_DIR/utilities/prosody/ensure_xmpp_secret.sh" ENSURE_XMPP_SECRET_SCRIPT="$ROOT_DIR/utilities/prosody/ensure_xmpp_secret.sh"
if [[ -n "${STACK_ID}" ]]; then if [[ -n "${STACK_ID}" ]]; then
@@ -232,12 +239,15 @@ start_stack() {
--name "$PROSODY_CONTAINER" \ --name "$PROSODY_CONTAINER" \
--pod "$POD_NAME" \ --pod "$POD_NAME" \
--env-file "$STACK_ENV" \ --env-file "$STACK_ENV" \
--user "$PROSODY_RUN_USER" \
--entrypoint prosody \
-v "$PROSODY_CONFIG_FILE:/etc/prosody/prosody.cfg.lua:ro" \ -v "$PROSODY_CONFIG_FILE:/etc/prosody/prosody.cfg.lua:ro" \
-v "$PROSODY_CERTS_DIR:/etc/prosody/certs" \ -v "$PROSODY_CERTS_DIR:/etc/prosody/certs" \
-v "$PROSODY_DATA_DIR:/var/lib/prosody" \ -v "$PROSODY_DATA_DIR:/var/lib/prosody" \
-v "$PROSODY_LOGS_DIR:/var/log/prosody" \ -v "$PROSODY_LOGS_DIR:/var/log/prosody" \
-v "$REPO_DIR:/code" \ -v "$REPO_DIR:/code" \
docker.io/prosody/prosody:0.12 >/dev/null "$PROSODY_IMAGE" \
-F >/dev/null
fi fi
wait_for_redis_socket wait_for_redis_socket
@@ -288,7 +298,7 @@ case "${1:-}" in
if is_remote; then if is_remote; then
podman logs -f "$APP_CONTAINER" podman logs -f "$APP_CONTAINER"
else else
local log_targets=("$APP_CONTAINER" "$ASGI_CONTAINER" "$UR_CONTAINER" "$SCHED_CONTAINER" "$CODEX_WORKER_CONTAINER" "$REDIS_CONTAINER" "$SIGNAL_CONTAINER") log_targets=("$APP_CONTAINER" "$ASGI_CONTAINER" "$UR_CONTAINER" "$SCHED_CONTAINER" "$CODEX_WORKER_CONTAINER" "$REDIS_CONTAINER" "$SIGNAL_CONTAINER")
if [[ "$PROSODY_ENABLED" == "true" ]]; then if [[ "$PROSODY_ENABLED" == "true" ]]; then
log_targets+=("$PROSODY_CONTAINER") log_targets+=("$PROSODY_CONTAINER")
fi fi

View File

@@ -30,11 +30,14 @@ EVENT_PRIMARY_WRITE_PATH=false
# XMPP component bridge (UR -> Prosody) # XMPP component bridge (UR -> Prosody)
XMPP_ADDRESS=127.0.0.1 XMPP_ADDRESS=127.0.0.1
XMPP_JID=jews.example.com XMPP_JID=jews.example.com
# End-user XMPP domain for client-facing JIDs (e.g. user@example.com).
XMPP_USER_DOMAIN=example.com
XMPP_PORT=8888 XMPP_PORT=8888
# Auto-generated if empty by Prosody startup helpers. # Auto-generated if empty by Prosody startup helpers.
XMPP_SECRET= XMPP_SECRET=
# Optional Prosody container storage/config paths used by utilities/prosody/manage_prosody_container.sh # Optional Prosody container storage/config paths used by utilities/prosody/manage_prosody_container.sh
PROSODY_IMAGE=docker.io/prosody/prosody:latest
QUADLET_PROSODY_CONFIG_FILE=./utilities/prosody/prosody.cfg.lua QUADLET_PROSODY_CONFIG_FILE=./utilities/prosody/prosody.cfg.lua
QUADLET_PROSODY_CERTS_DIR=./.podman/gia_prosody_certs QUADLET_PROSODY_CERTS_DIR=./.podman/gia_prosody_certs
QUADLET_PROSODY_DATA_DIR=./.podman/gia_prosody_data QUADLET_PROSODY_DATA_DIR=./.podman/gia_prosody_data

View File

@@ -1,26 +1,56 @@
#!/bin/sh #!/bin/sh
set -eu set -eu
AUTH_PY_IN_CONTAINER="/code/utilities/prosody/auth_django.py" AUTH_ENDPOINT="${PROSODY_AUTH_ENDPOINT:-http://127.0.0.1:8000/internal/prosody/auth/}"
STACK_ID="${GIA_STACK_ID:-${STACK_ID:-}}" PROSODY_SECRET="${XMPP_SECRET:-}"
STACK_ID="$(echo "$STACK_ID" | tr -cs 'a-zA-Z0-9._-' '-' | sed 's/^-*//; s/-*$//')"
if [ -n "$STACK_ID" ]; then b64url() {
GIA_CONTAINER="gia_${STACK_ID}" printf '%s' "$1" | base64 | tr -d '\n=' | tr '+/' '-_'
else }
GIA_CONTAINER="gia"
fi http_get() {
url="$1"
if command -v wget >/dev/null 2>&1; then
wget -qO- -T 5 "$url" 2>/dev/null
return
fi
if command -v curl >/dev/null 2>&1; then
curl -fsS --max-time 5 "$url" 2>/dev/null
return
fi
if command -v lua >/dev/null 2>&1; then
lua - "$url" <<'LUA'
local http = require("socket.http")
local ltn12 = require("ltn12")
http.TIMEOUT = 5
local chunks = {}
local _, code = http.request({
url = arg[1],
sink = ltn12.sink.table(chunks),
})
if tonumber(code) and tonumber(code) >= 200 and tonumber(code) < 300 then
io.write(table.concat(chunks))
end
LUA
return
fi
return 1
}
# Prosody external auth uses line-oriented stdin/stdout.
# We execute one short-lived auth check per line to avoid stale stdin issues
# in long-lived `podman exec -i` sessions after disconnects/restarts.
while IFS= read -r line; do while IFS= read -r line; do
if [ -z "$line" ]; then if [ -z "$line" ] || [ -z "$PROSODY_SECRET" ]; then
printf '0\n' printf '0\n'
continue continue
fi fi
printf '%s\n' "$line" | podman exec -i "$GIA_CONTAINER" sh -lc ' secret_b64="$(b64url "$PROSODY_SECRET")"
cd /code && line_b64="$(b64url "$line")"
. /venv/bin/activate && result="$(http_get "$AUTH_ENDPOINT?secret_b64=$secret_b64&line_b64=$line_b64" || printf '0')"
exec python -u '"$AUTH_PY_IN_CONTAINER"' --once case "$result" in
' 1|1$'\n')
printf '1\n'
;;
*)
printf '0\n'
;;
esac
done done

View File

@@ -5,6 +5,19 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
STACK_ENV="${STACK_ENV:-$ROOT_DIR/stack.env}" STACK_ENV="${STACK_ENV:-$ROOT_DIR/stack.env}"
ENSURE_XMPP_SECRET_SCRIPT="$ROOT_DIR/utilities/prosody/ensure_xmpp_secret.sh" ENSURE_XMPP_SECRET_SCRIPT="$ROOT_DIR/utilities/prosody/ensure_xmpp_secret.sh"
podman_cmd() {
if [[ "$(id -u)" -eq 0 ]] && id code >/dev/null 2>&1; then
local quoted=()
local arg
for arg in "$@"; do
quoted+=("$(printf '%q' "$arg")")
done
su -s /bin/sh code -c "podman ${quoted[*]}"
return
fi
podman "$@"
}
if [[ -f "$STACK_ENV" ]]; then if [[ -f "$STACK_ENV" ]]; then
set -a set -a
. "$STACK_ENV" . "$STACK_ENV"
@@ -31,20 +44,105 @@ name_with_stack() {
POD_NAME="$(name_with_stack "gia")" POD_NAME="$(name_with_stack "gia")"
PROSODY_CONTAINER="$(name_with_stack "prosody_gia")" PROSODY_CONTAINER="$(name_with_stack "prosody_gia")"
resolve_runtime_names() {
local candidates
candidates="$(
podman_cmd pod ps --format '{{.Name}}' 2>/dev/null \
| grep -E '^((pod_)?gia)($|_[a-zA-Z0-9._-]+$)' || true
)"
local expected=()
if [[ -n "$STACK_ID" ]]; then
expected+=("gia_${STACK_ID}" "pod_gia_${STACK_ID}")
else
expected+=("gia" "pod_gia")
fi
local name
for name in "${expected[@]}"; do
if printf '%s\n' "$candidates" | grep -qx "$name"; then
POD_NAME="$name"
break
fi
done
if [[ "$POD_NAME" != "gia" ]] && [[ "$POD_NAME" != "pod_gia" ]] && ! podman_cmd pod exists "$POD_NAME" >/dev/null 2>&1; then
POD_NAME="$(name_with_stack "gia")"
fi
if podman_cmd pod exists "$POD_NAME" >/dev/null 2>&1; then
if [[ "$POD_NAME" == pod_gia* ]]; then
local suffix="${POD_NAME#pod_gia}"
suffix="${suffix#_}"
if [[ -n "$suffix" ]]; then
PROSODY_CONTAINER="prosody_gia_${suffix}"
else
PROSODY_CONTAINER="prosody_gia"
fi
elif [[ "$POD_NAME" == gia_* ]]; then
local suffix="${POD_NAME#gia_}"
PROSODY_CONTAINER="prosody_gia_${suffix}"
else
PROSODY_CONTAINER="prosody_gia"
fi
return
fi
# Fallback: if only one gia-like pod exists, use it.
local count
count="$(printf '%s\n' "$candidates" | sed '/^$/d' | wc -l | tr -d ' ')"
if [[ "$count" != "1" ]]; then
return
fi
local detected
detected="$(printf '%s\n' "$candidates" | sed '/^$/d' | head -n1)"
[[ -z "$detected" ]] && return
POD_NAME="$detected"
if [[ "$POD_NAME" == pod_gia_* ]]; then
local suffix="${POD_NAME#pod_gia_}"
PROSODY_CONTAINER="prosody_gia_${suffix}"
elif [[ "$POD_NAME" == pod_gia ]]; then
PROSODY_CONTAINER="prosody_gia"
elif [[ "$POD_NAME" == gia_* ]]; then
local suffix="${POD_NAME#gia_}"
PROSODY_CONTAINER="prosody_gia_${suffix}"
else
PROSODY_CONTAINER="prosody_gia"
fi
echo "Info: auto-detected pod '$POD_NAME' and Prosody container '$PROSODY_CONTAINER'." >&2
}
PROSODY_CONFIG_FILE="${QUADLET_PROSODY_CONFIG_FILE:-$ROOT_DIR/utilities/prosody/prosody.cfg.lua}" PROSODY_CONFIG_FILE="${QUADLET_PROSODY_CONFIG_FILE:-$ROOT_DIR/utilities/prosody/prosody.cfg.lua}"
PROSODY_CERTS_DIR="${QUADLET_PROSODY_CERTS_DIR:-$ROOT_DIR/.podman/gia_prosody_certs}" PROSODY_CERTS_DIR="${QUADLET_PROSODY_CERTS_DIR:-$ROOT_DIR/.podman/gia_prosody_certs}"
PROSODY_DATA_DIR="${QUADLET_PROSODY_DATA_DIR:-$ROOT_DIR/.podman/gia_prosody_data}" PROSODY_DATA_DIR="${QUADLET_PROSODY_DATA_DIR:-$ROOT_DIR/.podman/gia_prosody_data}"
PROSODY_LOGS_DIR="${QUADLET_PROSODY_LOGS_DIR:-$ROOT_DIR/.podman/gia_prosody_logs}" PROSODY_LOGS_DIR="${QUADLET_PROSODY_LOGS_DIR:-$ROOT_DIR/.podman/gia_prosody_logs}"
PROSODY_IMAGE="${PROSODY_IMAGE:-docker.io/prosody/prosody-alpine:latest}" PROSODY_IMAGE="${PROSODY_IMAGE:-docker.io/prosody/prosody:latest}"
if id code >/dev/null 2>&1; then
PROSODY_RUN_USER_DEFAULT="$(id -u code):$(id -g code)"
else
PROSODY_RUN_USER_DEFAULT="$(id -u):$(id -g)"
fi
PROSODY_RUN_USER="${PROSODY_RUN_USER:-$PROSODY_RUN_USER_DEFAULT}"
mkdir -p "$PROSODY_CERTS_DIR" "$PROSODY_DATA_DIR" "$PROSODY_LOGS_DIR" mkdir -p "$PROSODY_CERTS_DIR" "$PROSODY_DATA_DIR" "$PROSODY_LOGS_DIR"
up() { up() {
resolve_runtime_names
local run_args=() local run_args=()
local pod_state="" local pod_state=""
if podman pod exists "$POD_NAME"; then if podman_cmd pod exists "$POD_NAME"; then
pod_state="$(podman pod inspect "$POD_NAME" --format '{{.State}}' 2>/dev/null || true)" pod_state="$(podman_cmd pod inspect "$POD_NAME" --format '{{.State}}' 2>/dev/null || true)"
if [[ "$pod_state" == "Running" ]]; then if [[ -z "$pod_state" ]]; then
pod_state="$(podman_cmd pod ps --format '{{.Name}} {{.Status}}' 2>/dev/null | awk -v pod="$POD_NAME" '$1==pod {print $2}')"
fi
if [[ "$pod_state" != "Running" && "$pod_state" != "Degraded" ]]; then
podman_cmd pod start "$POD_NAME" >/dev/null 2>&1 || true
pod_state="$(podman_cmd pod inspect "$POD_NAME" --format '{{.State}}' 2>/dev/null || true)"
if [[ -z "$pod_state" ]]; then
pod_state="$(podman_cmd pod ps --format '{{.Name}} {{.Status}}' 2>/dev/null | awk -v pod="$POD_NAME" '$1==pod {print $2}')"
fi
fi
if [[ "$pod_state" == "Running" || "$pod_state" == "Degraded" ]]; then
run_args+=(--pod "$POD_NAME") run_args+=(--pod "$POD_NAME")
else else
echo "Warning: pod '$POD_NAME' state is '$pod_state'; starting $PROSODY_CONTAINER standalone with explicit ports." >&2 echo "Warning: pod '$POD_NAME' state is '$pod_state'; starting $PROSODY_CONTAINER standalone with explicit ports." >&2
@@ -54,17 +152,20 @@ up() {
echo "Warning: pod '$POD_NAME' not found; starting $PROSODY_CONTAINER standalone with explicit ports." >&2 echo "Warning: pod '$POD_NAME' not found; starting $PROSODY_CONTAINER standalone with explicit ports." >&2
run_args+=(-p 5222:5222 -p 5269:5269 -p 5280:5280 -p 8888:8888) run_args+=(-p 5222:5222 -p 5269:5269 -p 5280:5280 -p 8888:8888)
fi fi
podman run -d \ podman_cmd run -d \
--replace \ --replace \
--name "$PROSODY_CONTAINER" \ --name "$PROSODY_CONTAINER" \
"${run_args[@]}" \ "${run_args[@]}" \
--env-file "$STACK_ENV" \ --env-file "$STACK_ENV" \
--user "$PROSODY_RUN_USER" \
--entrypoint prosody \
-v "$PROSODY_CONFIG_FILE:/etc/prosody/prosody.cfg.lua:ro" \ -v "$PROSODY_CONFIG_FILE:/etc/prosody/prosody.cfg.lua:ro" \
-v "$PROSODY_CERTS_DIR:/etc/prosody/certs" \ -v "$PROSODY_CERTS_DIR:/etc/prosody/certs" \
-v "$PROSODY_DATA_DIR:/var/lib/prosody" \ -v "$PROSODY_DATA_DIR:/var/lib/prosody" \
-v "$PROSODY_LOGS_DIR:/var/log/prosody" \ -v "$PROSODY_LOGS_DIR:/var/log/prosody" \
-v "$ROOT_DIR:/code" \ -v "$ROOT_DIR:/code" \
"$PROSODY_IMAGE" >/dev/null "$PROSODY_IMAGE" \
-F >/dev/null
if [[ " ${run_args[*]} " == *" --pod "* ]]; then if [[ " ${run_args[*]} " == *" --pod "* ]]; then
echo "Started $PROSODY_CONTAINER in pod $POD_NAME" echo "Started $PROSODY_CONTAINER in pod $POD_NAME"
else else
@@ -73,16 +174,19 @@ up() {
} }
down() { down() {
podman rm -f "$PROSODY_CONTAINER" >/dev/null 2>&1 || true resolve_runtime_names
podman_cmd rm -f "$PROSODY_CONTAINER" >/dev/null 2>&1 || true
echo "Stopped $PROSODY_CONTAINER" echo "Stopped $PROSODY_CONTAINER"
} }
status() { status() {
podman ps --format "table {{.Names}}\t{{.Status}}" | grep -E "^$PROSODY_CONTAINER\b" || true resolve_runtime_names
podman_cmd ps --format "table {{.Names}}\t{{.Status}}" | grep -E "^$PROSODY_CONTAINER\b" || true
} }
logs() { logs() {
podman logs -f "$PROSODY_CONTAINER" resolve_runtime_names
podman_cmd logs -f "$PROSODY_CONTAINER"
} }
case "${1:-}" in case "${1:-}" in

View File

@@ -0,0 +1,84 @@
-- GIA auth provider for legacy Prosody builds that do not ship
-- external auth modules. Delegates auth checks to external_auth_command.
local new_sasl = require "util.sasl".new;
local host = module.host;
local log = module._log;
local auth_cmd = module:get_option_string(
"external_auth_command",
"/code/utilities/prosody/auth_django.sh"
);
local provider = {};
local function shell_quote(value)
return "'" .. tostring(value or ""):gsub("'", "'\\''") .. "'";
end
local function run_external(line)
local cmd = "printf %s\\\\n " .. shell_quote(line) .. " | " .. shell_quote(auth_cmd);
local handle = io.popen(cmd, "r");
if not handle then
return false;
end
local output = handle:read("*a") or "";
handle:close();
output = output:gsub("%s+", "");
return output == "1";
end
function provider.test_password(username, password)
if not username or username == "" then
return nil, "Auth failed. Invalid username.";
end
if not password or password == "" then
return nil, "Auth failed. Invalid password.";
end
local ok = run_external("auth:" .. username .. ":" .. host .. ":" .. password);
if ok then
return true;
end
return nil, "Auth failed. Invalid username or password.";
end
function provider.user_exists(username)
if not username or username == "" then
return nil, "Auth failed. Invalid username.";
end
if run_external("isuser:" .. username .. ":" .. host) then
return true;
end
return nil, "Auth failed. Invalid username.";
end
function provider.set_password()
return nil, "method not implemented";
end
function provider.users()
return function() return nil end;
end
function provider.create_user()
return nil, "method not implemented";
end
function provider.delete_user()
return nil, "method not implemented";
end
function provider.get_sasl_handler()
return new_sasl(host, {
plain_test = function(_, username, password)
local ok = provider.test_password(username, password);
if ok then
return true, true;
end
return false, nil;
end
});
end
log("debug", "initializing GIA auth provider for host '%s'", host);
module:provides("auth", provider);

View File

@@ -1,19 +1,17 @@
local env = os.getenv local env = os.getenv
local domain = env("DOMAIN") or "example.com"
local xmpp_component = env("XMPP_JID") or ("jews." .. domain)
local share_host = env("XMPP_SHARE_HOST") or ("share." .. domain)
local xmpp_secret = env("XMPP_SECRET") or "" local xmpp_secret = env("XMPP_SECRET") or ""
if xmpp_secret == "" then if xmpp_secret == "" then
error("XMPP_SECRET is required for Prosody component authentication") error("XMPP_SECRET is required for Prosody component authentication")
end end
sasl_mechanisms = { "PLAIN", "SCRAM-SHA-1", "SCRAM-SHA-256" } sasl_mechanisms = { "PLAIN" }
plugin_paths = { "/code/utilities/prosody/modules" }
daemonize = false daemonize = false
pidfile = "/run/prosody/prosody.pid" pidfile = "/tmp/prosody.pid"
admins = { env("XMPP_ADMIN_JID") or ("admin@" .. domain) } admins = { env("XMPP_ADMIN_JID") or "admin@example.com" }
modules_enabled = { modules_enabled = {
"disco"; "disco";
@@ -21,30 +19,20 @@ modules_enabled = {
"saslauth"; "saslauth";
"tls"; "tls";
"blocklist"; "blocklist";
"bookmarks";
"carbons"; "carbons";
"dialback"; "dialback";
"limits"; "limits";
"pep"; "pep";
"private"; "private";
"smacks";
"vcard4";
"vcard_legacy";
"cloud_notify";
"csi_simple"; "csi_simple";
"invites";
"invites_adhoc";
"invites_register";
"ping"; "ping";
"time"; "time";
"uptime"; "uptime";
"version"; "version";
"mam"; "vcard4";
"turn_external"; "vcard_legacy";
"admin_adhoc"; "admin_adhoc";
"admin_shell";
"announce"; "announce";
"auth_external_insecure";
"http"; "http";
} }
@@ -55,7 +43,7 @@ limits = {
s2sin = { rate = "100mb/s"; }; s2sin = { rate = "100mb/s"; };
} }
authentication = "external_insecure" authentication = "gia"
archive_expires_after = "1w" archive_expires_after = "1w"
log = { log = {
@@ -65,20 +53,28 @@ log = {
} }
certificates = "certs" certificates = "certs"
ssl = {
key = "/etc/prosody/certs/cert.pem";
certificate = "/etc/prosody/certs/cert.pem";
}
component_ports = { 8888 } component_ports = { 8888 }
component_interfaces = { "0.0.0.0" } component_interfaces = { "0.0.0.0" }
VirtualHost domain
authentication = "external_insecure"
external_auth_command = "/code/utilities/prosody/auth_django.sh"
certificate = "/etc/prosody/certs/cert.pem"
Component xmpp_component
component_secret = xmpp_secret
Component share_host "http_file_share"
http_ports = { 5280 } http_ports = { 5280 }
http_interfaces = { "0.0.0.0", "::" } http_interfaces = { "0.0.0.0", "::" }
http_external_url = "https://" .. share_host .. "/" http_external_url = "https://share.example.com/"
VirtualHost "example.com"
authentication = "gia"
external_auth_command = "/code/utilities/prosody/auth_django.sh"
ssl = {
key = "/etc/prosody/certs/cert.pem";
certificate = "/etc/prosody/certs/cert.pem";
}
Component "jews.example.com"
component_secret = xmpp_secret
ssl = {
key = "/etc/prosody/certs/cert.pem";
certificate = "/etc/prosody/certs/cert.pem";
}