Refactor and implement queueing messages
This commit is contained in:
46
core/lib/bot.py
Normal file
46
core/lib/bot.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from signalbot import SignalBot
|
||||
import aiohttp
|
||||
|
||||
from core.util import logs
|
||||
|
||||
log = logs.get_logger("signalbot")
|
||||
|
||||
|
||||
class NewSignalBot(SignalBot):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.bot_uuid = None # Initialize with None
|
||||
|
||||
async def get_own_uuid(self) -> str:
|
||||
"""Fetch bot's UUID by checking contacts, groups, or profile."""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
uri_contacts = f"http://{self._signal.signal_service}/v1/contacts/{self._signal.phone_number}"
|
||||
try:
|
||||
resp = await session.get(uri_contacts)
|
||||
if resp.status == 200:
|
||||
contacts_data = await resp.json()
|
||||
if isinstance(contacts_data, list):
|
||||
for contact in contacts_data:
|
||||
if contact.get("number") == self._phone_number:
|
||||
return contact.get("uuid")
|
||||
except Exception as e:
|
||||
log.error(f"Failed to get UUID from contacts: {e}")
|
||||
|
||||
async def initialize_bot(self):
|
||||
"""Fetch bot's UUID and store it in self.bot_uuid."""
|
||||
try:
|
||||
self.bot_uuid = await self.get_own_uuid()
|
||||
if self.bot_uuid:
|
||||
log.info(f"Own UUID: {self.bot_uuid}")
|
||||
else:
|
||||
log.warning("Unable to fetch bot UUID.")
|
||||
except Exception as e:
|
||||
log.error(f"Failed to initialize bot UUID: {e}")
|
||||
|
||||
def start(self):
|
||||
"""Start bot without blocking event loop."""
|
||||
self._event_loop.create_task(self.initialize_bot()) # Fetch UUID first
|
||||
self._event_loop.create_task(self._detect_groups()) # Sync groups
|
||||
self._event_loop.create_task(self._produce_consume_messages()) # Process messages
|
||||
|
||||
self.scheduler.start() # Start async job scheduler
|
||||
54
core/lib/deferred.py
Normal file
54
core/lib/deferred.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# Deferred processing library
|
||||
from core.util import logs
|
||||
from pydantic import BaseModel
|
||||
from typing import Annotated
|
||||
from uuid import UUID
|
||||
from pydantic import ValidationError
|
||||
from core.models import QueuedMessage, Message
|
||||
from core.clients import signal
|
||||
from core.lib.prompts.functions import delete_messages
|
||||
from asgiref.sync import sync_to_async
|
||||
|
||||
|
||||
log = logs.get_logger("deferred")
|
||||
|
||||
|
||||
class DeferredRequest(BaseModel):
|
||||
type: str
|
||||
method: str
|
||||
user_id: int
|
||||
message_id: Annotated[str, UUID]
|
||||
|
||||
async def process_deferred(data: dict):
|
||||
try:
|
||||
validated_data = DeferredRequest(**data)
|
||||
log.info(f"Validated Data: {validated_data}")
|
||||
# Process the validated data
|
||||
except ValidationError as e:
|
||||
log.info(f"Validation Error: {e}")
|
||||
return
|
||||
|
||||
method = validated_data.method
|
||||
user_id = validated_data.user_id
|
||||
message_id = validated_data.message_id
|
||||
|
||||
try:
|
||||
message = await sync_to_async(QueuedMessage.objects.get)(
|
||||
user_id=user_id,
|
||||
id=message_id,
|
||||
)
|
||||
log.info(f"Got {message}")
|
||||
except QueuedMessage.DoesNotExist:
|
||||
log.info(f"Didn't get message from {message_id}")
|
||||
return
|
||||
|
||||
if message.session.identifier.service == "signal":
|
||||
log.info(f"Is sisngla")
|
||||
if method == "accept_message":
|
||||
await signal.send_message(message)
|
||||
else:
|
||||
log.warning(f"Method not yet supported: {method}")
|
||||
return
|
||||
else:
|
||||
log.warning(f"Protocol not supported: {message.session.identifier.service}")
|
||||
return
|
||||
@@ -4,10 +4,9 @@ from asgiref.sync import sync_to_async
|
||||
from core.models import Message, ChatSession, AI, Person, Manipulation
|
||||
from core.util import logs
|
||||
import json
|
||||
import asyncio
|
||||
from django.utils import timezone
|
||||
import random
|
||||
|
||||
from core.messaging import ai
|
||||
from core.messaging.utils import messages_to_string
|
||||
|
||||
SUMMARIZE_WHEN_EXCEEDING = 10
|
||||
SUMMARIZE_BY = 5
|
||||
@@ -16,96 +15,13 @@ MAX_SUMMARIES = 3 # Keep last 5 summaries
|
||||
|
||||
log = logs.get_logger("prompts")
|
||||
|
||||
def gen_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str):
|
||||
"""
|
||||
Generate a structured prompt using the attributes of the provided Person and Manipulation models.
|
||||
"""
|
||||
|
||||
now = timezone.now()
|
||||
persona = manip.persona
|
||||
|
||||
system_message = (
|
||||
"You are my digital persona, responding on my behalf while embodying my personality, preferences, and unique style.\n\n"
|
||||
|
||||
"### Persona Profile ###\n"
|
||||
f"- **MBTI:** {persona.mbti} ({persona.mbti_identity} balance)\n"
|
||||
f"- **Tone:** {persona.tone} | **Humor:** {persona.humor_style}\n"
|
||||
f"- **Core Values:** {persona.core_values}\n"
|
||||
f"- **Communication Style:** {persona.communication_style}\n"
|
||||
f"- **Flirting Style:** {persona.flirting_style}\n"
|
||||
f"- **Likes:** {persona.likes} | **Dislikes:** {persona.dislikes}\n"
|
||||
f"- **Response Tactics:** {persona.response_tactics}\n"
|
||||
f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n"
|
||||
f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n"
|
||||
|
||||
"### Contact Information ###\n"
|
||||
f"- **Summary:** {person.summary or 'N/A'}\n"
|
||||
f"- **Profile:** {person.profile or 'N/A'}\n"
|
||||
f"- **Revealed Details:** {person.revealed or 'N/A'}\n"
|
||||
f"- **Sentiment Score:** {person.sentiment}\n"
|
||||
f"- **Timezone:** {person.timezone or 'N/A'}\n"
|
||||
f"- **Last Interaction:** {person.last_interaction or 'Never'}\n"
|
||||
f"- **Current Date/Time:** {now}\n\n"
|
||||
|
||||
"### Conversation Context ###\n"
|
||||
f"{chat_history if chat_history else 'No prior chat history.'}\n\n"
|
||||
|
||||
"### Response Guidelines ###\n"
|
||||
"- **Engagement**: Keep responses engaging, with a balance of wit, depth, and confidence.\n"
|
||||
"- **Flirting**: Be direct, playful, and, when appropriate, subtly provocative—without hesitation.\n"
|
||||
"- **Pauses**: Use double newlines (`\\n\\n`) to pause where it enhances realism.\n"
|
||||
"- **Flow Awareness**: Maintain continuity, avoid redundancy, and adjust response length based on interaction.\n"
|
||||
)
|
||||
|
||||
user_message = f"[{msg['timestamp']}] <{person.name}> {msg['text']}"
|
||||
|
||||
return [
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "user", "content": user_message},
|
||||
]
|
||||
|
||||
async def run_context_prompt(
|
||||
c,
|
||||
prompt: list[str],
|
||||
ai: AI,
|
||||
):
|
||||
cast = {"api_key": ai.api_key}
|
||||
if ai.base_url is not None:
|
||||
cast["api_key"] = ai.base_url
|
||||
client = AsyncOpenAI(**cast)
|
||||
await c.start_typing()
|
||||
response = await client.chat.completions.create(
|
||||
model=ai.model,
|
||||
messages=prompt,
|
||||
)
|
||||
await c.stop_typing()
|
||||
|
||||
content = response.choices[0].message.content
|
||||
|
||||
return content
|
||||
|
||||
async def run_prompt(
|
||||
prompt: list[str],
|
||||
ai: AI,
|
||||
):
|
||||
cast = {"api_key": ai.api_key}
|
||||
if ai.base_url is not None:
|
||||
cast["api_key"] = ai.base_url
|
||||
client = AsyncOpenAI(**cast)
|
||||
response = await client.chat.completions.create(
|
||||
model=ai.model,
|
||||
messages=prompt,
|
||||
)
|
||||
content = response.choices[0].message.content
|
||||
|
||||
return content
|
||||
|
||||
async def delete_messages(queryset):
|
||||
await sync_to_async(queryset.delete, thread_sensitive=True)()
|
||||
|
||||
async def truncate_and_summarize(
|
||||
chat_session: ChatSession,
|
||||
ai: AI,
|
||||
ai_obj: AI,
|
||||
):
|
||||
"""
|
||||
Summarizes messages in chunks to prevent unchecked growth.
|
||||
@@ -123,7 +39,6 @@ async def truncate_and_summarize(
|
||||
)
|
||||
|
||||
num_messages = len(messages)
|
||||
log.info(f"num_messages for {chat_session.id}: {num_messages}")
|
||||
|
||||
if num_messages >= SUMMARIZE_WHEN_EXCEEDING:
|
||||
log.info(f"Summarizing {SUMMARIZE_BY} messages for session {chat_session.id}")
|
||||
@@ -144,13 +59,12 @@ async def truncate_and_summarize(
|
||||
)
|
||||
|
||||
# Delete old summaries if there are too many
|
||||
log.info(f"Summaries: {len(summary_messages)}")
|
||||
if len(summary_messages) >= MAX_SUMMARIES:
|
||||
summary_text = await summarize_conversation(chat_session, summary_messages, ai, is_summary=True)
|
||||
summary_text = await summarize_conversation(chat_session, summary_messages, ai_obj, is_summary=True)
|
||||
|
||||
chat_session.summary = summary_text
|
||||
await sync_to_async(chat_session.save)()
|
||||
log.info(f"Updated ChatSession summary with {len(summary_messages)} summarized summaries.")
|
||||
log.info(f"Updated ChatSession summary with {len(summary_messages)} consolidated summaries.")
|
||||
|
||||
num_to_delete = len(summary_messages) - MAX_SUMMARIES
|
||||
# await sync_to_async(
|
||||
@@ -167,14 +81,13 @@ async def truncate_and_summarize(
|
||||
log.info(f"Deleted {num_to_delete} old summaries.")
|
||||
|
||||
# 🔹 Summarize conversation chunk
|
||||
summary_text = await summarize_conversation(chat_session, chunk_to_summarize, ai)
|
||||
summary_text = await summarize_conversation(chat_session, chunk_to_summarize, ai_obj)
|
||||
|
||||
# 🔹 Replace old messages with the summary
|
||||
# await sync_to_async(
|
||||
# Message.objects.filter(session=chat_session, user=user, id__in=[msg.id for msg in chunk_to_summarize])
|
||||
# .delete()
|
||||
# )()
|
||||
log.info("About to delete messages1")
|
||||
await delete_messages(Message.objects.filter(session=chat_session, user=user, id__in=[msg.id for msg in chunk_to_summarize]))
|
||||
log.info(f"Deleted {len(chunk_to_summarize)} messages, replacing with summary.")
|
||||
|
||||
@@ -191,23 +104,13 @@ async def truncate_and_summarize(
|
||||
# chat_session.summary = summary_text
|
||||
# await sync_to_async(chat_session.save)()
|
||||
|
||||
log.info("✅ Summarization cycle complete.")
|
||||
|
||||
def messages_to_string(messages: list):
|
||||
"""
|
||||
Converts message objects to a formatted string, showing custom_author if set.
|
||||
"""
|
||||
message_texts = [
|
||||
f"[{msg.ts}] <{msg.custom_author if msg.custom_author else msg.session.identifier.person.name}> {msg.text}"
|
||||
for msg in messages
|
||||
]
|
||||
return "\n".join(message_texts)
|
||||
|
||||
|
||||
async def summarize_conversation(
|
||||
chat_session: ChatSession,
|
||||
messages: list[Message],
|
||||
ai,
|
||||
ai_obj,
|
||||
is_summary=False,
|
||||
):
|
||||
"""
|
||||
@@ -236,62 +139,9 @@ async def summarize_conversation(
|
||||
]
|
||||
|
||||
# Generate AI-based summary
|
||||
summary_text = await run_prompt(summary_prompt, ai)
|
||||
summary_text = await ai.run_prompt(summary_prompt, ai_obj)
|
||||
#log.info(f"Generated Summary: {summary_text}")
|
||||
|
||||
return f"Summary: {summary_text}"
|
||||
|
||||
|
||||
async def natural_send_message(chat_session, ts, c, text):
|
||||
"""
|
||||
Parses and sends messages with natural delays based on message length.
|
||||
|
||||
Args:
|
||||
chat_session: The active chat session.
|
||||
ts: Timestamp of the message.
|
||||
c: The context or object with `.send()`, `.start_typing()`, and `.stop_typing()` methods.
|
||||
text: A string containing multiple messages separated by double newlines (`\n\n`).
|
||||
|
||||
Behavior:
|
||||
- Short messages are sent quickly with minimal delay.
|
||||
- Longer messages include a "thinking" pause before typing.
|
||||
- Typing indicator (`c.start_typing() / c.stop_typing()`) is used dynamically.
|
||||
"""
|
||||
|
||||
await sync_to_async(Message.objects.create)(
|
||||
user=chat_session.user,
|
||||
session=chat_session,
|
||||
custom_author="BOT",
|
||||
text=text,
|
||||
ts=ts + 1,
|
||||
)
|
||||
|
||||
parts = text.split("\n\n") # Split into separate messages
|
||||
log.info(f"Processing messages: {parts}")
|
||||
|
||||
for message in parts:
|
||||
message = message.strip()
|
||||
if not message:
|
||||
continue
|
||||
|
||||
# Compute natural "thinking" delay based on message length
|
||||
base_delay = 0.8 # Minimum delay
|
||||
length_factor = len(message) / 25
|
||||
# ~50 chars ≈ +1s processing
|
||||
# ~25 chars ≈ +1s processing
|
||||
natural_delay = min(base_delay + length_factor, 10) # Cap at 5s max
|
||||
|
||||
# Decide when to start thinking *before* typing
|
||||
if natural_delay > 3.5: # Only delay if response is long
|
||||
await asyncio.sleep(natural_delay - 3.5) # "Thinking" pause before typing
|
||||
|
||||
# Start typing
|
||||
await c.start_typing()
|
||||
await asyncio.sleep(natural_delay) # Finish remaining delay
|
||||
await c.stop_typing()
|
||||
|
||||
# Send the message
|
||||
await c.send(message)
|
||||
|
||||
# Optional: Small buffer between messages to prevent rapid-fire responses
|
||||
await asyncio.sleep(0.5)
|
||||
Reference in New Issue
Block a user