Continue AI features and improve protocol support
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
from openai import AsyncOpenAI, OpenAI
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from core.models import AI, ChatSession, Manipulation, Message, Person
|
||||
from core.models import AI
|
||||
|
||||
|
||||
async def run_prompt(
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
|
||||
from asgiref.sync import sync_to_async
|
||||
from django.utils import timezone
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from core.lib.prompts import bases
|
||||
from core.models import AI, ChatSession, Manipulation, Message, Person
|
||||
from core.util import logs
|
||||
from core.models import AI, Manipulation, Person
|
||||
|
||||
|
||||
def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str):
|
||||
@@ -62,7 +55,7 @@ async def run_context_prompt(
|
||||
):
|
||||
cast = {"api_key": ai.api_key}
|
||||
if ai.base_url is not None:
|
||||
cast["api_key"] = ai.base_url
|
||||
cast["base_url"] = ai.base_url
|
||||
client = AsyncOpenAI(**cast)
|
||||
response = await client.chat.completions.create(
|
||||
model=ai.model,
|
||||
|
||||
@@ -15,7 +15,11 @@ log = logs.get_logger("history")
|
||||
DEFAULT_PROMPT_HISTORY_MAX_MESSAGES = getattr(
|
||||
settings, "PROMPT_HISTORY_MAX_MESSAGES", 120
|
||||
)
|
||||
DEFAULT_PROMPT_HISTORY_MAX_CHARS = getattr(settings, "PROMPT_HISTORY_MAX_CHARS", 24000)
|
||||
DEFAULT_PROMPT_HISTORY_MAX_CHARS = getattr(
|
||||
settings,
|
||||
"PROMPT_HISTORY_MAX_CHARS",
|
||||
24000,
|
||||
)
|
||||
DEFAULT_PROMPT_HISTORY_MIN_MESSAGES = getattr(
|
||||
settings, "PROMPT_HISTORY_MIN_MESSAGES", 24
|
||||
)
|
||||
@@ -40,7 +44,8 @@ def _build_recent_history(messages, max_chars):
|
||||
total_chars = 0
|
||||
# Recency-first packing, then reorder to chronological output later.
|
||||
for msg in reversed(messages):
|
||||
line = f"[{msg.ts}] <{msg.custom_author if msg.custom_author else msg.session.identifier.person.name}> {msg.text}"
|
||||
author = msg.custom_author or msg.session.identifier.person.name
|
||||
line = f"[{msg.ts}] <{author}> {msg.text}"
|
||||
line_len = len(line) + 1
|
||||
# Keep at least one line even if it alone exceeds max_chars.
|
||||
if selected and (total_chars + line_len) > max_chars:
|
||||
@@ -147,6 +152,7 @@ async def store_message(session, sender, text, ts, outgoing=False):
|
||||
sender_uuid=sender,
|
||||
text=text,
|
||||
ts=ts,
|
||||
delivered_ts=ts,
|
||||
custom_author="USER" if outgoing else None,
|
||||
)
|
||||
|
||||
@@ -161,6 +167,7 @@ async def store_own_message(session, text, ts, manip=None, queue=False):
|
||||
"custom_author": "BOT",
|
||||
"text": text,
|
||||
"ts": ts,
|
||||
"delivered_ts": ts,
|
||||
}
|
||||
if queue:
|
||||
msg_object = QueuedMessage
|
||||
@@ -177,3 +184,62 @@ async def store_own_message(session, text, ts, manip=None, queue=False):
|
||||
|
||||
async def delete_queryset(queryset):
|
||||
await sync_to_async(queryset.delete, thread_sensitive=True)()
|
||||
|
||||
|
||||
async def apply_read_receipts(
|
||||
user,
|
||||
identifier,
|
||||
message_timestamps,
|
||||
read_ts=None,
|
||||
source_service="signal",
|
||||
read_by_identifier="",
|
||||
payload=None,
|
||||
):
|
||||
"""
|
||||
Persist delivery/read metadata for one identifier's messages.
|
||||
"""
|
||||
ts_values = []
|
||||
for item in message_timestamps or []:
|
||||
try:
|
||||
ts_values.append(int(item))
|
||||
except Exception:
|
||||
continue
|
||||
if not ts_values:
|
||||
return 0
|
||||
|
||||
try:
|
||||
read_at = int(read_ts) if read_ts else None
|
||||
except Exception:
|
||||
read_at = None
|
||||
|
||||
rows = await sync_to_async(list)(
|
||||
Message.objects.filter(
|
||||
user=user,
|
||||
session__identifier=identifier,
|
||||
ts__in=ts_values,
|
||||
).select_related("session")
|
||||
)
|
||||
updated = 0
|
||||
for message in rows:
|
||||
dirty = []
|
||||
if message.delivered_ts is None:
|
||||
message.delivered_ts = read_at or message.ts
|
||||
dirty.append("delivered_ts")
|
||||
if read_at and (message.read_ts is None or read_at > message.read_ts):
|
||||
message.read_ts = read_at
|
||||
dirty.append("read_ts")
|
||||
if source_service and message.read_source_service != source_service:
|
||||
message.read_source_service = source_service
|
||||
dirty.append("read_source_service")
|
||||
if read_by_identifier and message.read_by_identifier != read_by_identifier:
|
||||
message.read_by_identifier = read_by_identifier
|
||||
dirty.append("read_by_identifier")
|
||||
if payload:
|
||||
receipt_data = dict(message.receipt_payload or {})
|
||||
receipt_data[str(source_service)] = payload
|
||||
message.receipt_payload = receipt_data
|
||||
dirty.append("receipt_payload")
|
||||
if dirty:
|
||||
await sync_to_async(message.save)(update_fields=dirty)
|
||||
updated += 1
|
||||
return updated
|
||||
|
||||
48
core/messaging/media_bridge.py
Normal file
48
core/messaging/media_bridge.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
|
||||
DEFAULT_BLOB_TTL_SECONDS = 60 * 20
|
||||
|
||||
|
||||
def _blob_cache_key(service, digest):
|
||||
return f"gia:media:{service}:{digest}"
|
||||
|
||||
|
||||
def put_blob(service, content, filename, content_type, ttl=DEFAULT_BLOB_TTL_SECONDS):
|
||||
if not content:
|
||||
return None
|
||||
digest = hashlib.sha1(content).hexdigest()
|
||||
key = _blob_cache_key(service, digest)
|
||||
cache.set(
|
||||
key,
|
||||
{
|
||||
"filename": filename or "attachment.bin",
|
||||
"content_type": content_type or "application/octet-stream",
|
||||
"content_b64": base64.b64encode(content).decode("utf-8"),
|
||||
"size": len(content),
|
||||
"stored_at": int(time.time()),
|
||||
},
|
||||
timeout=ttl,
|
||||
)
|
||||
return key
|
||||
|
||||
|
||||
def get_blob(key):
|
||||
row = cache.get(key)
|
||||
if not row:
|
||||
return None
|
||||
try:
|
||||
content = base64.b64decode(row.get("content_b64", ""))
|
||||
except Exception:
|
||||
return None
|
||||
return {
|
||||
"content": content,
|
||||
"filename": row.get("filename") or "attachment.bin",
|
||||
"content_type": row.get("content_type") or "application/octet-stream",
|
||||
"size": row.get("size") or len(content),
|
||||
"stored_at": row.get("stored_at"),
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncio
|
||||
import random
|
||||
|
||||
|
||||
async def natural_send_message(
|
||||
@@ -11,7 +10,8 @@ async def natural_send_message(
|
||||
Args:
|
||||
chat_session: The active chat session.
|
||||
ts: Timestamp of the message.
|
||||
c: The context or object with `.send()`, `.start_typing()`, and `.stop_typing()` methods.
|
||||
c: The context or object with `.send()`, `.start_typing()`,
|
||||
and `.stop_typing()` methods.
|
||||
text: A string containing multiple messages separated by double newlines (`\n\n`).
|
||||
|
||||
Behavior:
|
||||
@@ -34,7 +34,7 @@ async def natural_send_message(
|
||||
length_factor = len(message) / 25
|
||||
# ~50 chars ≈ +1s processing
|
||||
# ~25 chars ≈ +1s processing
|
||||
natural_delay = min(base_delay + length_factor, 10) # Cap at 5s max
|
||||
natural_delay = min(base_delay + length_factor, 10) # Cap at 10s max
|
||||
|
||||
# Decide when to start thinking *before* typing
|
||||
if not skip_thinking:
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
|
||||
from asgiref.sync import sync_to_async
|
||||
from django.utils import timezone
|
||||
|
||||
from core.lib.prompts import bases
|
||||
from core.models import AI, ChatSession, Manipulation, Message, Person
|
||||
from core.models import Manipulation, Person
|
||||
from core.util import logs
|
||||
|
||||
log = logs.get_logger("replies")
|
||||
|
||||
@@ -2,13 +2,26 @@ from asgiref.sync import sync_to_async
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
def messages_to_string(messages: list):
|
||||
def messages_to_string(messages: list, author_rewrites: dict | None = None):
|
||||
"""
|
||||
Converts message objects to a formatted string, showing custom_author if set.
|
||||
"""
|
||||
author_rewrites = {
|
||||
str(key).strip().upper(): str(value)
|
||||
for key, value in (author_rewrites or {}).items()
|
||||
}
|
||||
|
||||
def _author_label(message):
|
||||
author = (
|
||||
message.custom_author
|
||||
if message.custom_author
|
||||
else message.session.identifier.person.name
|
||||
)
|
||||
mapped = author_rewrites.get(str(author).strip().upper())
|
||||
return mapped if mapped else author
|
||||
|
||||
message_texts = [
|
||||
f"[{msg.ts}] <{msg.custom_author if msg.custom_author else msg.session.identifier.person.name}> {msg.text}"
|
||||
for msg in messages
|
||||
f"[{msg.ts}] <{_author_label(msg)}> {msg.text}" for msg in messages
|
||||
]
|
||||
return "\n".join(message_texts)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user