Files
vtb/server/agent_service.py
2026-03-05 18:45:04 +08:00

47 lines
1.5 KiB
Python

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import MultiModalMessage, TextMessage
from autogen_core import Image
from autogen_core.models import ModelFamily
from autogen_ext.models.ollama import OllamaChatCompletionClient
from . import config
from .mcp_tools import get_weather, web_search
class AvatarAgentService:
def __init__(self) -> None:
model_client = OllamaChatCompletionClient(
model=config.OLLAMA_MODEL,
model_info={
"vision": True,
"function_calling": True,
"json_output": True,
"family": ModelFamily.UNKNOWN,
"structured_output": True,
},
)
# 定义可用的 MCP 工具
tools = [
get_weather,
web_search,
]
self._agent = AssistantAgent(
name="avatar",
model_client=model_client,
system_message=config.SYSTEM_MESSAGE,
tools=tools,
)
async def reply(self, user_text: str, image_b64: str) -> str:
user_image = Image.from_base64(image_b64)
multimodal_task = MultiModalMessage(source="user", content=[user_text, user_image])
ai_response = ""
async for message in self._agent.run_stream(task=multimodal_task):
if isinstance(message, TextMessage) and message.source == "avatar":
ai_response = message.content
return ai_response