Refactor and implement queueing messages

This commit is contained in:
2025-02-12 18:45:21 +00:00
parent d5eb37d2b4
commit abcb038051
23 changed files with 804 additions and 338 deletions

19
core/messaging/ai.py Normal file
View File

@@ -0,0 +1,19 @@
from openai import AsyncOpenAI, OpenAI
from core.models import Message, ChatSession, AI, Person, Manipulation
async def run_prompt(
prompt: list[str],
ai: AI,
):
cast = {"api_key": ai.api_key}
if ai.base_url is not None:
cast["api_key"] = ai.base_url
client = AsyncOpenAI(**cast)
response = await client.chat.completions.create(
model=ai.model,
messages=prompt,
)
content = response.choices[0].message.content
return content