Refactor and implement queueing messages
This commit is contained in:
19
core/messaging/ai.py
Normal file
19
core/messaging/ai.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from openai import AsyncOpenAI, OpenAI
|
||||
from core.models import Message, ChatSession, AI, Person, Manipulation
|
||||
|
||||
|
||||
async def run_prompt(
|
||||
prompt: list[str],
|
||||
ai: AI,
|
||||
):
|
||||
cast = {"api_key": ai.api_key}
|
||||
if ai.base_url is not None:
|
||||
cast["api_key"] = ai.base_url
|
||||
client = AsyncOpenAI(**cast)
|
||||
response = await client.chat.completions.create(
|
||||
model=ai.model,
|
||||
messages=prompt,
|
||||
)
|
||||
content = response.choices[0].message.content
|
||||
|
||||
return content
|
||||
Reference in New Issue
Block a user