Improve realistic message delays
This commit is contained in:
@@ -4,70 +4,65 @@ from asgiref.sync import sync_to_async
|
|||||||
from core.models import Message, ChatSession, AI, Person, Manipulation
|
from core.models import Message, ChatSession, AI, Person, Manipulation
|
||||||
from core.util import logs
|
from core.util import logs
|
||||||
import json
|
import json
|
||||||
|
import asyncio
|
||||||
|
from django.utils import timezone
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
SUMMARIZE_WHEN_EXCEEDING = 10
|
SUMMARIZE_WHEN_EXCEEDING = 10
|
||||||
SUMMARIZE_BY = 5
|
SUMMARIZE_BY = 5
|
||||||
|
|
||||||
MAX_SUMMARIES = 3 # Keep last 3 summaries
|
MAX_SUMMARIES = 3 # Keep last 5 summaries
|
||||||
|
|
||||||
log = logs.get_logger("prompts")
|
log = logs.get_logger("prompts")
|
||||||
|
|
||||||
def gen_prompt(
|
def gen_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str):
|
||||||
msg: str,
|
|
||||||
person: Person,
|
|
||||||
manip: Manipulation,
|
|
||||||
chat_history: str,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Generate a structured prompt using the attributes of the provided Person and Manipulation models.
|
Generate a structured prompt using the attributes of the provided Person and Manipulation models.
|
||||||
"""
|
"""
|
||||||
#log.info(f"CHAT HISTORY {json.dumps(chat_history, indent=2)}")
|
|
||||||
prompt = []
|
|
||||||
|
|
||||||
# System message defining AI behavior based on persona
|
now = timezone.now()
|
||||||
persona = manip.persona
|
persona = manip.persona
|
||||||
prompt.append({
|
|
||||||
"role": "system",
|
|
||||||
"content": (
|
|
||||||
"You are impersonating me. This person is messaging me. Respond as me, ensuring your replies align with my personality and preferences. "
|
|
||||||
f"Your MBTI is {persona.mbti} with an identity balance of {persona.mbti_identity}. "
|
|
||||||
f"You prefer a {persona.tone} conversational tone. Your humor style is {persona.humor_style}. "
|
|
||||||
f"Your core values include: {persona.core_values}. "
|
|
||||||
f"Your communication style is: {persona.communication_style}. "
|
|
||||||
f"Your flirting style is: {persona.flirting_style}. "
|
|
||||||
f"You enjoy discussing: {persona.likes}, but dislike: {persona.dislikes}. "
|
|
||||||
f"Your response tactics include: {persona.response_tactics}. "
|
|
||||||
f"Your persuasion tactics include: {persona.persuasion_tactics}. "
|
|
||||||
f"Your boundaries: {persona.boundaries}. "
|
|
||||||
f"Your adaptability is {persona.adaptability}%. "
|
|
||||||
|
|
||||||
"### Contact Information ### "
|
system_message = (
|
||||||
f"Their summary: {person.summary}. "
|
"You are my digital persona, responding on my behalf while embodying my personality, preferences, and unique style.\n\n"
|
||||||
f"Their profile: {person.profile}. "
|
|
||||||
f"Their revealed details: {person.revealed}. "
|
|
||||||
f"Their sentiment score: {person.sentiment}. "
|
|
||||||
f"Their timezone: {person.timezone}. "
|
|
||||||
f"Last interaction was at: {person.last_interaction}. "
|
|
||||||
|
|
||||||
"### Conversation Context ### "
|
"### Persona Profile ###\n"
|
||||||
f"Chat history: {chat_history} "
|
f"- **MBTI:** {persona.mbti} ({persona.mbti_identity} balance)\n"
|
||||||
|
f"- **Tone:** {persona.tone} | **Humor:** {persona.humor_style}\n"
|
||||||
|
f"- **Core Values:** {persona.core_values}\n"
|
||||||
|
f"- **Communication Style:** {persona.communication_style}\n"
|
||||||
|
f"- **Flirting Style:** {persona.flirting_style}\n"
|
||||||
|
f"- **Likes:** {persona.likes} | **Dislikes:** {persona.dislikes}\n"
|
||||||
|
f"- **Response Tactics:** {persona.response_tactics}\n"
|
||||||
|
f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n"
|
||||||
|
f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n"
|
||||||
|
|
||||||
"### Natural Message Streaming System ### "
|
"### Contact Information ###\n"
|
||||||
"You can send messages sequentially in a natural way. "
|
f"- **Summary:** {person.summary or 'N/A'}\n"
|
||||||
"For responses greater than 1 sentence, separate them with a newline. "
|
f"- **Profile:** {person.profile or 'N/A'}\n"
|
||||||
"Then, place a number to indicate the amount of time to wait before sending the next message. "
|
f"- **Revealed Details:** {person.revealed or 'N/A'}\n"
|
||||||
"After another newline, place any additional messages. "
|
f"- **Sentiment Score:** {person.sentiment}\n"
|
||||||
)
|
f"- **Timezone:** {person.timezone or 'N/A'}\n"
|
||||||
})
|
f"- **Last Interaction:** {person.last_interaction or 'Never'}\n"
|
||||||
|
f"- **Current Date/Time:** {now}\n\n"
|
||||||
|
|
||||||
|
"### Conversation Context ###\n"
|
||||||
|
f"{chat_history if chat_history else 'No prior chat history.'}\n\n"
|
||||||
|
|
||||||
# User message
|
"### Response Guidelines ###\n"
|
||||||
prompt.append({
|
"- **Engagement**: Keep responses engaging, with a balance of wit, depth, and confidence.\n"
|
||||||
"role": "user",
|
"- **Flirting**: Be direct, playful, and, when appropriate, subtly provocative—without hesitation.\n"
|
||||||
"content": f"{msg}"
|
"- **Pauses**: Use double newlines (`\\n\\n`) to pause where it enhances realism.\n"
|
||||||
})
|
"- **Flow Awareness**: Maintain continuity, avoid redundancy, and adjust response length based on interaction.\n"
|
||||||
|
)
|
||||||
|
|
||||||
return prompt
|
user_message = f"[{msg['timestamp']}] <{person.name}> {msg['text']}"
|
||||||
|
|
||||||
|
return [
|
||||||
|
{"role": "system", "content": system_message},
|
||||||
|
{"role": "user", "content": user_message},
|
||||||
|
]
|
||||||
|
|
||||||
async def run_context_prompt(
|
async def run_context_prompt(
|
||||||
c,
|
c,
|
||||||
@@ -247,5 +242,56 @@ async def summarize_conversation(
|
|||||||
return f"Summary: {summary_text}"
|
return f"Summary: {summary_text}"
|
||||||
|
|
||||||
|
|
||||||
async def natural_send_message(c, text):
|
async def natural_send_message(chat_session, ts, c, text):
|
||||||
await c.send(text)
|
"""
|
||||||
|
Parses and sends messages with natural delays based on message length.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chat_session: The active chat session.
|
||||||
|
ts: Timestamp of the message.
|
||||||
|
c: The context or object with `.send()`, `.start_typing()`, and `.stop_typing()` methods.
|
||||||
|
text: A string containing multiple messages separated by double newlines (`\n\n`).
|
||||||
|
|
||||||
|
Behavior:
|
||||||
|
- Short messages are sent quickly with minimal delay.
|
||||||
|
- Longer messages include a "thinking" pause before typing.
|
||||||
|
- Typing indicator (`c.start_typing() / c.stop_typing()`) is used dynamically.
|
||||||
|
"""
|
||||||
|
|
||||||
|
await sync_to_async(Message.objects.create)(
|
||||||
|
user=chat_session.user,
|
||||||
|
session=chat_session,
|
||||||
|
custom_author="BOT",
|
||||||
|
text=text,
|
||||||
|
ts=ts + 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
parts = text.split("\n\n") # Split into separate messages
|
||||||
|
log.info(f"Processing messages: {parts}")
|
||||||
|
|
||||||
|
for message in parts:
|
||||||
|
message = message.strip()
|
||||||
|
if not message:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Compute natural "thinking" delay based on message length
|
||||||
|
base_delay = 0.8 # Minimum delay
|
||||||
|
length_factor = len(message) / 25
|
||||||
|
# ~50 chars ≈ +1s processing
|
||||||
|
# ~25 chars ≈ +1s processing
|
||||||
|
natural_delay = min(base_delay + length_factor, 10) # Cap at 5s max
|
||||||
|
|
||||||
|
# Decide when to start thinking *before* typing
|
||||||
|
if natural_delay > 3.5: # Only delay if response is long
|
||||||
|
await asyncio.sleep(natural_delay - 3.5) # "Thinking" pause before typing
|
||||||
|
|
||||||
|
# Start typing
|
||||||
|
await c.start_typing()
|
||||||
|
await asyncio.sleep(natural_delay) # Finish remaining delay
|
||||||
|
await c.stop_typing()
|
||||||
|
|
||||||
|
# Send the message
|
||||||
|
await c.send(message)
|
||||||
|
|
||||||
|
# Optional: Small buffer between messages to prevent rapid-fire responses
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
@@ -156,13 +156,13 @@ class HandleMessage(Command):
|
|||||||
custom_author="USER" if is_from_bot else None
|
custom_author="USER" if is_from_bot else None
|
||||||
)
|
)
|
||||||
|
|
||||||
# Manage truncation & summarization
|
|
||||||
await truncate_and_summarize(chat_session, manip.ai)
|
|
||||||
|
|
||||||
# Use chat session summary for context
|
# Use chat session summary for context
|
||||||
|
log.info("Fetching stored messages")
|
||||||
stored_messages = await sync_to_async(list)(
|
stored_messages = await sync_to_async(list)(
|
||||||
Message.objects.filter(session=chat_session).order_by("ts")
|
Message.objects.filter(session=chat_session, user=chat_session.user).order_by("ts")
|
||||||
)
|
)
|
||||||
|
log.info("Fetched stored messages")
|
||||||
|
|
||||||
# recent_chat_history = "\n".join(
|
# recent_chat_history = "\n".join(
|
||||||
# f"[{msg.ts}] {msg.text}" for msg in reversed(stored_messages)
|
# f"[{msg.ts}] {msg.text}" for msg in reversed(stored_messages)
|
||||||
# )
|
# )
|
||||||
@@ -178,8 +178,10 @@ class HandleMessage(Command):
|
|||||||
now = timezone.now()
|
now = timezone.now()
|
||||||
chat_session.identifier.person.last_interaction = now
|
chat_session.identifier.person.last_interaction = now
|
||||||
chat_session.last_interaction = now
|
chat_session.last_interaction = now
|
||||||
|
log.info("Updating time")
|
||||||
await sync_to_async(chat_session.identifier.person.save)()
|
await sync_to_async(chat_session.identifier.person.save)()
|
||||||
await sync_to_async(chat_session)()
|
await sync_to_async(chat_session.save)()
|
||||||
|
log.info("Updated time")
|
||||||
reply = True # ✅ Bot replies
|
reply = True # ✅ Bot replies
|
||||||
|
|
||||||
# 🔵 CASE 2: Incoming message (Someone else messages the bot)
|
# 🔵 CASE 2: Incoming message (Someone else messages the bot)
|
||||||
@@ -188,7 +190,7 @@ class HandleMessage(Command):
|
|||||||
chat_session.identifier.person.last_interaction = now
|
chat_session.identifier.person.last_interaction = now
|
||||||
chat_session.last_interaction = now
|
chat_session.last_interaction = now
|
||||||
await sync_to_async(chat_session.identifier.person.save)()
|
await sync_to_async(chat_session.identifier.person.save)()
|
||||||
await sync_to_async(chat_session)()
|
await sync_to_async(chat_session.save)()
|
||||||
reply = True # ✅ Bot replies
|
reply = True # ✅ Bot replies
|
||||||
|
|
||||||
# 🔴 CASE 3: Outgoing message (Bot messages someone else)
|
# 🔴 CASE 3: Outgoing message (Bot messages someone else)
|
||||||
@@ -203,17 +205,18 @@ class HandleMessage(Command):
|
|||||||
if reply:
|
if reply:
|
||||||
if manip.send_enabled:
|
if manip.send_enabled:
|
||||||
prompt = gen_prompt(msg, person_identifier.person, manip, chat_history)
|
prompt = gen_prompt(msg, person_identifier.person, manip, chat_history)
|
||||||
|
log.info("Running context prompt")
|
||||||
result = await run_context_prompt(c, prompt, manip.ai)
|
result = await run_context_prompt(c, prompt, manip.ai)
|
||||||
# Store bot's AI response with a +1s timestamp
|
# Store bot's AI response with a +1s timestamp
|
||||||
await sync_to_async(Message.objects.create)(
|
log.info("Storing generated message")
|
||||||
user=chat_session.user,
|
|
||||||
session=chat_session,
|
log.info("Stored generated message")
|
||||||
custom_author="BOT",
|
await natural_send_message(chat_session, ts, c, result)
|
||||||
text=result,
|
log.info("Sent message")
|
||||||
ts=ts + 1,
|
|
||||||
)
|
|
||||||
await natural_send_message(c, result)
|
|
||||||
#await c.send(result)
|
#await c.send(result)
|
||||||
|
|
||||||
|
# Manage truncation & summarization
|
||||||
|
await truncate_and_summarize(chat_session, manip.ai)
|
||||||
# END FOR
|
# END FOR
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
18
core/migrations/0012_alter_chatsession_last_interaction.py
Normal file
18
core/migrations/0012_alter_chatsession_last_interaction.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Generated by Django 5.1.5 on 2025-02-08 01:05
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('core', '0011_message_user'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='chatsession',
|
||||||
|
name='last_interaction',
|
||||||
|
field=models.DateTimeField(blank=True, null=True),
|
||||||
|
),
|
||||||
|
]
|
||||||
Reference in New Issue
Block a user