借助LlamaIndex实现简单Agent
借助LlamaIndex实现简单Agent
1 简介
智能体的构建发展是一个趋势,借助LlamaIndex简单实现Agent。本文主要借助LlamaIndex中的FunctionTool和Workflow。Workflow是使用事件流的方法实现。
2 构建公共类
由于LlamaIndex中的OpenAI无法直接连接国内大模型,因此需要使用OpenAI构建CustomLikeOpenAI。
from typing import Anyfrom llama_index.core.base.llms.types import CompletionResponseGen, LLMMetadata, CompletionResponse
from llama_index.core.llms import CustomLLM
from openai import OpenAI
from pydantic import Fieldclass CustomLikeOpenAI(CustomLLM):model: str = Field(description="自定义模型名称")api_key: str = Field(description="自定义API Key")api_base: str = Field(description="自定义API地址")context_window: int = Field(default=32768, description="上下文窗口大小")temperature: float = Field(ge=0, le=1, default=0.3, description="设置温度,值域须为 [0, 1]")num_output: int = Field(default=8192, description="设置max_tokens")def __init__(self, **data):# 必须调用父类初始化super().__init__(**data)# 创建对象self._client = OpenAI(api_key=self.api_key,base_url=self.api_base)@propertydef metadata(self) -> LLMMetadata:"""Get LLM metadata."""return LLMMetadata(context_window=self.context_window,num_output=self.num_output,model_name=self.model)def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:"""生成文本:param prompt: 添加提示词:param kwargs: 其他相关参数:return: CompletionResponse"""# 构建生成completion = self._client.chat.completions.create(model=self.model,messages=[{"role": "user", "content": prompt}],temperature=self.temperature,max_tokens=self.num_output)# 返回值return CompletionResponse(text=completion.choices[0].message.content)def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:"""生成流式文本:param prompt: 提示词:param kwargs: 其他参数:return: CompletionResponseGen迭代器"""# 根据需要可以不实现,如果不想实现使用下面代码即可# raise NotImplementedError("Streaming not supported")# 构建数据流stream = self._client.chat.completions.create(model=self.model,messages=[{"role": "user", "content": prompt}],temperature=self.temperature,max_tokens=self.num_output,stream=True)# 遍历数据流for chunk in stream:# 获取新文本delta = chunk.choices[0].delta# 判断数据是否存在if delta.content:yield CompletionResponse(text=delta.content, delta=delta.content)
3 函数调用实现Agent
源代码
# 使用LlamaIndex中的ReActAgent和函数调用的方法,实现Agent
# ReActAgent集成AgentRunner/AgentWorker 此方法LlamaIndex未来会被舍弃,推荐使用AgentWorkflow 和 Workflows.
# 需要安装的组件
"""
pip install llama-index
pip install llama-index-llms-openai-like
"""import asynciofrom llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionToolfrom my_custom_rag.custom_like_openai import CustomLikeOpenAIdef add(x: int, y: int) -> int:"""Useful function to add two numbers."""return x + ydef multiply(x: int, y: int) -> int:"""Useful function to multiply two numbers."""return x * yasync def main():# 函数回调tools = [FunctionTool.from_defaults(fn=add, description="实现加法"),FunctionTool.from_defaults(fn=multiply, description="实现乘法")]# Llama index中的OpenAILike调用千问和Kimi不能使用,一直报参数错误,所以只能自定义了llm = CustomLikeOpenAI(model="XXXX",api_base="XXXX",api_key="XXXX")# 构建Agentagent = ReActAgent.from_tools(llm=llm,verbose=True,tools=tools)ret = agent.chat(message="1乘以10等于几")print(ret)if __name__ == "__main__":asyncio.run(main())
截图
4 WorkFlow实现Agent
运行流程
flowchart TDsubgraph 说明A1[(1)找到启动的方法,含有StartEvent参数的方法 ] --> B1B1[(2)执行第一个任务流,根据含有StartEvent参数的方法的输出找到的方法 ] --> C1C1[(3)执行任务流,根据上一个方法的输出找到下一个方法] --> D1D1[(4)执行结束,直到执行方法的返回值是StopEvent]endsubgraph 函数A2[(1)call_tool] --> B2B2[(2)generate_joke] --> C2C2[(3)……] --> D2D2[(4)critique_joke]end
代码
# 需要安装的组件
"""
pip install llama-index
pip install llama-index-llms-openai-like
"""import asynciofrom llama_index.core.workflow import (Event,StartEvent,StopEvent,Workflow,step
)from my_custom_rag.custom_like_openai import CustomLikeOpenAIclass ToolCallEvent(Event):"""用于回调返回数据"""data: strclass JokeEvent(Event):"""笑话实体类"""joke: strclass JokeFlow(Workflow):# Llama index中的OpenAILike调用千问和Kimi不能使用,一直报参数错误,所以只能自定义了llm = CustomLikeOpenAI(model="XXXX",api_base="XXXX",api_key="XXXX")@stepasync def call_tool(self, ev: StartEvent) -> ToolCallEvent:"""生成笑话:param ev: 开始事件:return: 返回笑话事件"""topic = ev.topicprompt = f"根据主题 {topic} 拓展相关主题."response = await self.llm.acomplete(prompt)print("生成的相关主题:", response)return ToolCallEvent(data=str(response))@stepasync def generate_joke(self, ev: ToolCallEvent) -> JokeEvent:"""评价笑话事件:param ev: 笑话事件:return: 停止事件"""prompt = f"写一个关于{ev.data}主题的笑话."response = await self.llm.acomplete(prompt)print("生成的笑话:", response)return JokeEvent(joke=str(response))@stepasync def critique_joke(self, ev: JokeEvent) -> StopEvent:"""评价笑话事件:param ev: 笑话事件:return: 停止事件"""joke = ev.jokeprompt = f"请分析和评价笑话: {joke}"response = await self.llm.acomplete(prompt)return StopEvent(result=str(response))async def main():w = JokeFlow(timeout=60, verbose=True)result = await w.run(topic="水果")print(str(result))if __name__ == "__main__":asyncio.run(main())
截图