Implement AI workspace and mitigation workflow

This commit is contained in:
2026-02-15 04:27:28 +00:00
parent de2b9a9bbb
commit 2d3b8fdac6
64 changed files with 7669 additions and 769 deletions

View File

@@ -1,14 +1,15 @@
from openai import AsyncOpenAI, OpenAI
from core.models import Message, ChatSession, AI, Person, Manipulation
from core.models import AI, ChatSession, Manipulation, Message, Person
async def run_prompt(
prompt: list[str],
ai: AI,
):
prompt: list[str],
ai: AI,
):
cast = {"api_key": ai.api_key}
if ai.base_url is not None:
cast["api_key"] = ai.base_url
cast["base_url"] = ai.base_url
client = AsyncOpenAI(**cast)
response = await client.chat.completions.create(
model=ai.model,

View File

@@ -1,13 +1,16 @@
from core.lib.prompts import bases
from openai import AsyncOpenAI
from asgiref.sync import sync_to_async
from core.models import Message, ChatSession, AI, Person, Manipulation
from core.util import logs
import json
import asyncio
from django.utils import timezone
import json
import random
from asgiref.sync import sync_to_async
from django.utils import timezone
from openai import AsyncOpenAI
from core.lib.prompts import bases
from core.models import AI, ChatSession, Manipulation, Message, Person
from core.util import logs
def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str):
"""
Generate a structured prompt using the attributes of the provided Person and Manipulation models.
@@ -18,7 +21,6 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history
system_message = (
"You are my digital persona, responding on my behalf while embodying my personality, preferences, and unique style.\n\n"
"### Persona Profile ###\n"
f"- **MBTI:** {persona.mbti} ({persona.mbti_identity} balance)\n"
f"- **Tone:** {persona.tone} | **Humor:** {persona.humor_style}\n"
@@ -29,7 +31,6 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history
f"- **Response Tactics:** {persona.response_tactics}\n"
f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n"
f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n"
"### Contact Information ###\n"
f"- **Summary:** {person.summary or 'N/A'}\n"
f"- **Profile:** {person.profile or 'N/A'}\n"
@@ -38,10 +39,8 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history
f"- **Timezone:** {person.timezone or 'N/A'}\n"
f"- **Last Interaction:** {person.last_interaction or 'Never'}\n"
f"- **Current Date/Time:** {now}\n\n"
"### Conversation Context ###\n"
f"{chat_history if chat_history else 'No prior chat history.'}\n\n"
"### Response Guidelines ###\n"
"- **Engagement**: Keep responses engaging, with a balance of wit, depth, and confidence.\n"
"- **Flirting**: Be direct, playful, and, when appropriate, subtly provocative—without hesitation.\n"
@@ -56,10 +55,11 @@ def generate_prompt(msg: dict, person: Person, manip: Manipulation, chat_history
{"role": "user", "content": user_message},
]
async def run_context_prompt(
prompt: list[str],
ai: AI,
):
prompt: list[str],
ai: AI,
):
cast = {"api_key": ai.api_key}
if ai.base_url is not None:
cast["api_key"] = ai.base_url
@@ -70,4 +70,4 @@ async def run_context_prompt(
)
content = response.choices[0].message.content
return content
return content

View File

@@ -1,19 +1,136 @@
from core.util import logs
from core.models import Message, ChatSession, QueuedMessage
from asgiref.sync import sync_to_async
from django.conf import settings
from core.messaging.utils import messages_to_string
from core.models import ChatSession, Message, QueuedMessage
from core.util import logs
log = logs.get_logger("history")
async def get_chat_history(session):
stored_messages = await sync_to_async(list)(
Message.objects.filter(session=session, user=session.user).order_by("ts")
# Prompt-window controls:
# - Full message history is always persisted in the database.
# - Only the prompt input window is reduced.
# - Max values are hard safety rails; runtime chooses a smaller adaptive subset.
# - Min value prevents overly aggressive clipping on very long average messages.
DEFAULT_PROMPT_HISTORY_MAX_MESSAGES = getattr(
settings, "PROMPT_HISTORY_MAX_MESSAGES", 120
)
DEFAULT_PROMPT_HISTORY_MAX_CHARS = getattr(settings, "PROMPT_HISTORY_MAX_CHARS", 24000)
DEFAULT_PROMPT_HISTORY_MIN_MESSAGES = getattr(
settings, "PROMPT_HISTORY_MIN_MESSAGES", 24
)
def _build_recent_history(messages, max_chars):
"""
Build the final prompt transcript under a strict character budget.
Method:
1. Iterate messages from newest to oldest so recency is prioritized.
2. For each message, estimate the rendered line length exactly as it will
appear in the prompt transcript.
3. Stop once adding another line would exceed `max_chars`, while still
guaranteeing at least one message can be included.
4. Reverse back to chronological order for readability in prompts.
"""
if not messages:
return ""
selected = []
total_chars = 0
# Recency-first packing, then reorder to chronological output later.
for msg in reversed(messages):
line = f"[{msg.ts}] <{msg.custom_author if msg.custom_author else msg.session.identifier.person.name}> {msg.text}"
line_len = len(line) + 1
# Keep at least one line even if it alone exceeds max_chars.
if selected and (total_chars + line_len) > max_chars:
break
selected.append(msg)
total_chars += line_len
selected.reverse()
return messages_to_string(selected)
def _compute_adaptive_message_limit(messages, max_messages, max_chars):
"""
Derive how many messages to include before final char-budget packing.
This function intentionally avoids hand-picked threshold buckets.
Instead, it computes a budget-derived estimate:
- Build a recent sample (up to 80 messages) representing current chat style.
- Measure *rendered* line lengths (timestamp + author + text), not raw text.
- Estimate average line length from that sample.
- Convert char budget into message budget: floor(max_chars / avg_line_len).
- Clamp to configured min/max rails.
Why two stages:
- Stage A (this function): estimate count from current message density.
- Stage B (`_build_recent_history`): enforce exact char ceiling.
This keeps behavior stable while guaranteeing hard prompt budget compliance.
"""
if not messages:
return DEFAULT_PROMPT_HISTORY_MIN_MESSAGES
sample = messages[-min(len(messages), 80) :]
rendered_lengths = []
for msg in sample:
author = (
msg.custom_author
if msg.custom_author
else msg.session.identifier.person.name
)
text = msg.text or ""
# Match the line shape used in _build_recent_history/messages_to_string.
rendered_lengths.append(len(f"[{msg.ts}] <{author}> {text}") + 1)
# Defensive denominator: never divide by zero.
avg_line_len = (
(sum(rendered_lengths) / len(rendered_lengths)) if rendered_lengths else 1.0
)
recent_chat_history = messages_to_string(stored_messages)
chat_history = f"Chat Summary:\n{session.summary}\n\nRecent Messages:\n{recent_chat_history}" if session.summary else f"Recent Messages:\n{recent_chat_history}"
avg_line_len = max(avg_line_len, 1.0)
budget_based = int(max_chars / avg_line_len)
adaptive = max(DEFAULT_PROMPT_HISTORY_MIN_MESSAGES, budget_based)
adaptive = min(max_messages, adaptive)
return max(1, adaptive)
async def get_chat_history(
session,
max_messages=DEFAULT_PROMPT_HISTORY_MAX_MESSAGES,
max_chars=DEFAULT_PROMPT_HISTORY_MAX_CHARS,
):
"""
Return prompt-ready chat history with adaptive windowing and hard budget limits.
Pipeline:
1. Fetch a bounded recent slice from DB (performance guard).
2. Estimate adaptive message count from observed rendered message density.
3. Keep only the newest `adaptive_limit` messages.
4. Pack those lines under `max_chars` exactly.
"""
# Storage remains complete; only prompt context is reduced.
fetch_limit = max(max_messages * 3, 200)
fetch_limit = min(fetch_limit, 1000)
stored_messages = await sync_to_async(list)(
Message.objects.filter(session=session, user=session.user).order_by("-ts")[
:fetch_limit
]
)
stored_messages.reverse()
adaptive_limit = _compute_adaptive_message_limit(
stored_messages,
max_messages=max_messages,
max_chars=max_chars,
)
selected_messages = stored_messages[-adaptive_limit:]
recent_chat_history = _build_recent_history(selected_messages, max_chars=max_chars)
chat_history = f"Recent Messages:\n{recent_chat_history}"
return chat_history
async def get_chat_session(user, identifier):
chat_session, _ = await sync_to_async(ChatSession.objects.get_or_create)(
identifier=identifier,
@@ -21,6 +138,7 @@ async def get_chat_session(user, identifier):
)
return chat_session
async def store_message(session, sender, text, ts, outgoing=False):
log.info(f"STORE MESSAGE {text}")
msg = await sync_to_async(Message.objects.create)(
@@ -29,11 +147,12 @@ async def store_message(session, sender, text, ts, outgoing=False):
sender_uuid=sender,
text=text,
ts=ts,
custom_author="USER" if outgoing else None
custom_author="USER" if outgoing else None,
)
return msg
async def store_own_message(session, text, ts, manip=None, queue=False):
log.info(f"STORE OWN MESSAGE {text}")
cast = {
@@ -53,4 +172,8 @@ async def store_own_message(session, text, ts, manip=None, queue=False):
**cast,
)
return msg
return msg
async def delete_queryset(queryset):
await sync_to_async(queryset.delete, thread_sensitive=True)()

View File

@@ -1,12 +1,10 @@
import asyncio
import random
async def natural_send_message(text,
send,
start_typing,
stop_typing,
skip_thinking=False
):
async def natural_send_message(
text, send, start_typing, stop_typing, skip_thinking=False
):
"""
Parses and sends messages with natural delays based on message length.
@@ -41,7 +39,9 @@ async def natural_send_message(text,
# Decide when to start thinking *before* typing
if not skip_thinking:
if natural_delay > 3.5: # Only delay if response is long
await asyncio.sleep(natural_delay - 3.5) # "Thinking" pause before typing
await asyncio.sleep(
natural_delay - 3.5
) # "Thinking" pause before typing
# Start typing
await start_typing()
@@ -55,4 +55,4 @@ async def natural_send_message(text,
# Optional: Small buffer between messages to prevent rapid-fire responses
await asyncio.sleep(0.5)
return ids
return ids

View File

@@ -1,18 +1,21 @@
from core.lib.prompts import bases
from asgiref.sync import sync_to_async
from core.models import Message, ChatSession, AI, Person, Manipulation
from core.util import logs
import json
import asyncio
from django.utils import timezone
import json
import random
from asgiref.sync import sync_to_async
from django.utils import timezone
from core.lib.prompts import bases
from core.models import AI, ChatSession, Manipulation, Message, Person
from core.util import logs
log = logs.get_logger("replies")
def should_reply(
reply_to_self,
reply_to_others,
is_outgoing_message,
reply_to_self,
reply_to_others,
is_outgoing_message,
):
reply = False
if reply_to_self:
@@ -26,7 +29,14 @@ def should_reply(
return reply
def generate_mutate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str, mutate: bool = False):
def generate_mutate_reply_prompt(
msg: dict,
person: Person,
manip: Manipulation,
chat_history: str,
mutate: bool = False,
):
"""
Strictly rewrites the message in the personas tone and style
while keeping the original meaning. No added explanations.
@@ -66,16 +76,12 @@ def generate_mutate_reply_prompt(msg: dict, person: Person, manip: Manipulation,
f"- **Response Tactics:** {persona.response_tactics}\n"
f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n"
f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n"
"### STRICT RULES ###\n"
f"{strict_rules}\n\n"
"### TRANSFORMATION GUIDELINES ###\n"
f"{transformation_guidelines}\n\n"
"### Original Message ###\n"
f"{msg}\n\n"
"### Rewritten Message ###\n"
"(DO NOT include anything except the rewritten text. NO extra comments or formatting.)"
)
@@ -83,8 +89,13 @@ def generate_mutate_reply_prompt(msg: dict, person: Person, manip: Manipulation,
return [{"role": "system", "content": system_message}]
def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_history: str, mutate: bool = False):
def generate_reply_prompt(
msg: dict,
person: Person,
manip: Manipulation,
chat_history: str,
mutate: bool = False,
):
"""
Generate a structured prompt using the attributes of the provided Person and Manipulation models.
"""
@@ -108,7 +119,6 @@ def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_h
"You are my digital persona, responding on my behalf while embodying my personality, preferences, and unique style.\n\n"
"You must strictly apply the following persona-based filtering rules when modifying the message:\n\n"
f"{filter_rules}\n\n"
"### Persona Profile ###\n"
f"- **MBTI:** {persona.mbti} ({persona.mbti_identity} balance)\n"
f"- **Tone:** {persona.tone} | **Humor:** {persona.humor_style}\n"
@@ -119,7 +129,6 @@ def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_h
f"- **Response Tactics:** {persona.response_tactics}\n"
f"- **Persuasion Techniques:** {persona.persuasion_tactics}\n"
f"- **Boundaries:** {persona.boundaries} | **Adaptability:** {persona.adaptability}%\n\n"
"### Contact Information ###\n"
f"- **Summary:** {person.summary or 'N/A'}\n"
f"- **Profile:** {person.profile or 'N/A'}\n"
@@ -128,7 +137,6 @@ def generate_reply_prompt(msg: dict, person: Person, manip: Manipulation, chat_h
f"- **Timezone:** {person.timezone or 'N/A'}\n"
f"- **Last Interaction:** {person.last_interaction or 'Never'}\n"
f"- **Current Date/Time:** {now}\n\n"
"### Conversation Context ###\n"
f"{chat_history if chat_history else 'No prior chat history.'}\n\n"
)

View File

@@ -12,9 +12,10 @@ def messages_to_string(messages: list):
]
return "\n".join(message_texts)
async def update_last_interaction(session):
now = timezone.now()
session.identifier.person.last_interaction = now
session.last_interaction = now
await sync_to_async(session.identifier.person.save)()
await sync_to_async(session.save)()
await sync_to_async(session.save)()