Continue AI features and improve protocol support

This commit is contained in:
2026-02-15 16:57:32 +00:00
parent 2d3b8fdac6
commit 85e97e895d
62 changed files with 5472 additions and 441 deletions

View File

@@ -1,19 +1,19 @@
import logging
import hashlib
import logging
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AbstractUser
from django.db import models
from core.clients import signalapi
from core.clients import transport
from core.lib.notify import raw_sendmsg
logger = logging.getLogger(__name__)
SERVICE_CHOICES = (
("signal", "Signal"),
("whatsapp", "WhatsApp"),
("xmpp", "XMPP"),
("instagram", "Instagram"),
)
@@ -61,7 +61,7 @@ def _attribute_display_id(kind, *parts):
n_letters //= 26
digits = int(digest[8:16], 16) % 10000
return f"{''.join(letters)}{digits:04d}"
return f"{''.join(letters)}{str(digits).zfill(4)}"
def get_default_workspace_user_pk():
@@ -157,20 +157,16 @@ class PersonIdentifier(models.Model):
def __str__(self):
return f"{self.person} ({self.service})"
async def send(self, text, attachments=[]):
async def send(self, text, attachments=None):
"""
Send this contact a text.
"""
if self.service == "signal":
ts = await signalapi.send_message_raw(
self.identifier,
text,
attachments,
)
print("SENT")
return ts
else:
raise NotImplementedError(f"Service not implemented: {self.service}")
return await transport.send_message_raw(
self.service,
self.identifier,
text=text,
attachments=attachments or [],
)
class ChatSession(models.Model):
@@ -214,6 +210,34 @@ class Message(models.Model):
text = models.TextField(blank=True, null=True)
custom_author = models.CharField(max_length=255, blank=True, null=True)
delivered_ts = models.BigIntegerField(
null=True,
blank=True,
help_text="Delivery timestamp (unix ms) when known.",
)
read_ts = models.BigIntegerField(
null=True,
blank=True,
help_text="Read timestamp (unix ms) when known.",
)
read_source_service = models.CharField(
max_length=255,
choices=SERVICE_CHOICES,
null=True,
blank=True,
help_text="Service that reported the read receipt.",
)
read_by_identifier = models.CharField(
max_length=255,
blank=True,
null=True,
help_text="Identifier that read this message (service-native value).",
)
receipt_payload = models.JSONField(
default=dict,
blank=True,
help_text="Raw normalized delivery/read receipt metadata.",
)
class Meta:
ordering = ["ts"]
@@ -471,14 +495,130 @@ class WorkspaceConversation(models.Model):
return self.title or f"{self.platform_type}:{self.id}"
class WorkspaceMetricSnapshot(models.Model):
"""
Historical snapshots of workspace metrics for trend visualisation.
"""
conversation = models.ForeignKey(
WorkspaceConversation,
on_delete=models.CASCADE,
related_name="metric_snapshots",
help_text="Workspace conversation this metric snapshot belongs to.",
)
computed_at = models.DateTimeField(
auto_now_add=True,
db_index=True,
help_text="When this snapshot was persisted.",
)
source_event_ts = models.BigIntegerField(
null=True,
blank=True,
help_text="Latest message timestamp used during this metric computation.",
)
stability_state = models.CharField(
max_length=32,
choices=WorkspaceConversation.StabilityState.choices,
default=WorkspaceConversation.StabilityState.CALIBRATING,
help_text="Stability state at computation time.",
)
stability_score = models.FloatField(
null=True,
blank=True,
help_text="Stability score (0-100).",
)
stability_confidence = models.FloatField(
default=0.0,
help_text="Confidence in stability score (0.0-1.0).",
)
stability_sample_messages = models.PositiveIntegerField(
default=0,
help_text="How many messages were in the sampled window.",
)
stability_sample_days = models.PositiveIntegerField(
default=0,
help_text="How many days were in the sampled window.",
)
commitment_inbound_score = models.FloatField(
null=True,
blank=True,
help_text="Commitment estimate counterpart -> user (0-100).",
)
commitment_outbound_score = models.FloatField(
null=True,
blank=True,
help_text="Commitment estimate user -> counterpart (0-100).",
)
commitment_confidence = models.FloatField(
default=0.0,
help_text="Confidence in commitment scores (0.0-1.0).",
)
inbound_messages = models.PositiveIntegerField(
default=0,
help_text="Inbound message count in the sampled window.",
)
outbound_messages = models.PositiveIntegerField(
default=0,
help_text="Outbound message count in the sampled window.",
)
reciprocity_score = models.FloatField(
null=True,
blank=True,
help_text="Balance component used for stability.",
)
continuity_score = models.FloatField(
null=True,
blank=True,
help_text="Continuity component used for stability.",
)
response_score = models.FloatField(
null=True,
blank=True,
help_text="Response-time component used for stability.",
)
volatility_score = models.FloatField(
null=True,
blank=True,
help_text="Volatility component used for stability.",
)
inbound_response_score = models.FloatField(
null=True,
blank=True,
help_text="Inbound response-lag score used for commitment.",
)
outbound_response_score = models.FloatField(
null=True,
blank=True,
help_text="Outbound response-lag score used for commitment.",
)
balance_inbound_score = models.FloatField(
null=True,
blank=True,
help_text="Inbound balance score used for commitment.",
)
balance_outbound_score = models.FloatField(
null=True,
blank=True,
help_text="Outbound balance score used for commitment.",
)
class Meta:
ordering = ("-computed_at",)
indexes = [
models.Index(fields=["conversation", "computed_at"]),
]
def __str__(self):
return f"Metrics {self.conversation_id} @ {self.computed_at.isoformat()}"
class MessageEvent(models.Model):
"""
Normalized message event used by workspace timeline and AI selection windows.
"""
SOURCE_SYSTEM_CHOICES = (
("signal", "Signal"),
("xmpp", "XMPP"),
*SERVICE_CHOICES,
("workspace", "Workspace"),
("ai", "AI"),
)
@@ -499,7 +639,10 @@ class MessageEvent(models.Model):
on_delete=models.CASCADE,
related_name="workspace_message_events",
default=get_default_workspace_user_pk,
help_text="Owner of this message event row (required for restricted CRUD filtering).",
help_text=(
"Owner of this message event row "
"(required for restricted CRUD filtering)."
),
)
conversation = models.ForeignKey(
WorkspaceConversation,
@@ -679,7 +822,9 @@ class AIResult(models.Model):
on_delete=models.CASCADE,
related_name="workspace_ai_results",
default=get_default_workspace_user_pk,
help_text="Owner of this AI result row (required for restricted CRUD filtering).",
help_text=(
"Owner of this AI result row " "(required for restricted CRUD filtering)."
),
)
ai_request = models.OneToOneField(
AIRequest,
@@ -702,7 +847,8 @@ class AIResult(models.Model):
blank=True,
help_text=(
"Structured positive/neutral/risk signals inferred for this run. "
"Example item: {'label':'repair_attempt','valence':'positive','message_event_ids':[...]}."
"Example item: {'label':'repair_attempt','valence':'positive',"
"'message_event_ids':[...]}."
),
)
memory_proposals = models.JSONField(
@@ -1089,7 +1235,8 @@ class PatternMitigationCorrection(models.Model):
default="",
help_text=(
"Joint clarification text intended to reduce interpretation drift. "
"Example: 'When you say \"you ignore me\", I hear fear of disconnection, not blame.'"
'Example: \'When you say "you ignore me", I hear fear of '
"disconnection, not blame.'"
),
)
source_phrase = models.TextField(
@@ -1097,7 +1244,8 @@ class PatternMitigationCorrection(models.Model):
default="",
help_text=(
"Situation/message fragment this correction responds to. "
"Example: 'she says: \"you never listen\"' or 'you say: \"you are dismissing me\"'."
"Example: 'she says: \"you never listen\"' or "
"'you say: \"you are dismissing me\"'."
),
)
perspective = models.CharField(
@@ -1106,14 +1254,18 @@ class PatternMitigationCorrection(models.Model):
default="third_person",
help_text=(
"Narrative perspective used when framing this correction. "
"Examples: third person ('she says'), second person ('you say'), first person ('I say')."
"Examples: third person ('she says'), second person ('you say'), "
"first person ('I say')."
),
)
share_target = models.CharField(
max_length=16,
choices=SHARE_TARGET_CHOICES,
default="both",
help_text="Who this insight is intended to be shared with. Example: self, other, or both.",
help_text=(
"Who this insight is intended to be shared with. "
"Example: self, other, or both."
),
)
language_style = models.CharField(
max_length=16,
@@ -1121,7 +1273,8 @@ class PatternMitigationCorrection(models.Model):
default="adapted",
help_text=(
"Whether to keep wording identical or adapt it per recipient. "
"Example: same text for both parties, or softened/adapted wording for recipient."
"Example: same text for both parties, or softened/adapted wording "
"for recipient."
),
)
enabled = models.BooleanField(