赞
踩
LangChain系列文章
如果我们想在运行时使聊天模型或LLM的选择可配置化:
通过对象configurable_chain
的方法传递不同model参数,决定最终的调用模型。比如例子中分别掉了instruct和chat两种OpenAI Model。
from langchain_core.runnables import ConfigurableField from langchain_core.runnables import RunnablePassthrough from langchain.prompts import ChatPromptTemplate from langchain.chat_models import ChatOpenAI from langchain.llms import OpenAI from langchain_core.output_parsers import StrOutputParser from dotenv import load_dotenv load_dotenv() from langchain.globals import set_debug set_debug(True) prompt = ChatPromptTemplate.from_template( "Tell me a short joke about {topic}" ) output_parser = StrOutputParser() model = ChatOpenAI(model="gpt-3.5-turbo") llm = OpenAI(model="gpt-3.5-turbo-instruct") configurable_model = model.configurable_alternatives( ConfigurableField(id="model"), default_key="chat_openai", openai=llm, ) configurable_chain = ( {"topic": RunnablePassthrough()} | prompt | configurable_model | output_parser ) configurable_chain.invoke( "ice cream", config={"model": "openai"} ) stream = configurable_chain.stream( "spaghetti", config={"model": "chat_openai"} ) for chunk in stream: print(chunk, end="", flush=True) # response = configurable_chain.batch(["ice cream", "spaghetti", "dumplings"]) response = configurable_chain.batch(["ice cream", "spaghetti"]) print('ice cream, spaghetti, dumplings, response >> ', response) # await configurable_chain.ainvoke("ice cream") # async def main(): # await configurable_chain.ainvoke("ice cream") # import asyncio # asyncio.run(main())
运行结果
⚡ python LCEL/runtime_lcel.py 1 ↵ [chain/start] [1:chain:RunnableSequence] Entering Chain run with input: { "input": "ice cream" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel] Entering Chain run with input: { "input": "ice cream" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] Entering Chain run with input: { "input": "ice cream" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] [3ms] Exiting Chain run with output: { "output": "ice cream" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel] [12ms] Exiting Chain run with output: { "topic": "ice cream" } [chain/start] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] Entering Prompt run with input: { "topic": "ice cream" } [chain/end] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] [5ms] Exiting Prompt run with output: { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "chat", "ChatPromptValue" ], "kwargs": { "messages": [ { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "HumanMessage" ], "kwargs": { "content": "Tell me a short joke about ice cream", "additional_kwargs": {} } } ] } } [llm/start] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] Entering LLM run with input: { "prompts": [ "Human: Tell me a short joke about ice cream" ] } [llm/end] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] [1.98s] Exiting LLM run with output: { "generations": [ [ { "text": "Why did the ice cream go to school?\n\nBecause it wanted to get a little \"sundae\" education!", "generation_info": { "finish_reason": "stop", "logprobs": null }, "type": "ChatGeneration", "message": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "AIMessage" ], "kwargs": { "content": "Why did the ice cream go to school?\n\nBecause it wanted to get a little \"sundae\" education!", "additional_kwargs": {} } } } ] ], "llm_output": { "token_usage": { "completion_tokens": 23, "prompt_tokens": 15, "total_tokens": 38 }, "model_name": "gpt-3.5-turbo", "system_fingerprint": null }, "run": null } [chain/start] [1:chain:RunnableSequence > 6:parser:StrOutputParser] Entering Parser run with input: [inputs] [chain/end] [1:chain:RunnableSequence > 6:parser:StrOutputParser] [1ms] Exiting Parser run with output: { "output": "Why did the ice cream go to school?\n\nBecause it wanted to get a little \"sundae\" education!" } [chain/end] [1:chain:RunnableSequence] [2.01s] Exiting Chain run with output: { "output": "Why did the ice cream go to school?\n\nBecause it wanted to get a little \"sundae\" education!" } [chain/start] [1:chain:RunnableSequence] Entering Chain run with input: { "input": "" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel] Entering Chain run with input: { "input": "" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] Entering Chain run with input: { "input": "" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] [8ms] Exiting Chain run with output: { "output": "spaghetti" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel] [10ms] Exiting Chain run with output: { "topic": "spaghetti" } [chain/start] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] Entering Prompt run with input: { "topic": "spaghetti" } [chain/end] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] [5ms] Exiting Prompt run with output: { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "chat", "ChatPromptValue" ], "kwargs": { "messages": [ { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "HumanMessage" ], "kwargs": { "content": "Tell me a short joke about spaghetti", "additional_kwargs": {} } } ] } } [llm/start] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] Entering LLM run with input: { "prompts": [ "Human: Tell me a short joke about spaghetti" ] } [chain/start] [1:chain:RunnableSequence > 6:parser:StrOutputParser] Entering Parser run with input: { "input": "" } Why did the spaghetti go to the party? Because it heard it was pasta-tively awesome![llm/end] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] [1.47s] Exiting LLM run with output: { "generations": [ [ { "text": "Why did the spaghetti go to the party?\n\nBecause it heard it was pasta-tively awesome!", "generation_info": { "finish_reason": "stop" }, "type": "ChatGenerationChunk", "message": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "AIMessageChunk" ], "kwargs": { "example": false, "content": "Why did the spaghetti go to the party?\n\nBecause it heard it was pasta-tively awesome!", "additional_kwargs": {} } } } ] ], "llm_output": null, "run": null } [chain/end] [1:chain:RunnableSequence > 6:parser:StrOutputParser] [575ms] Exiting Parser run with output: { "output": "Why did the spaghetti go to the party?\n\nBecause it heard it was pasta-tively awesome!" } [chain/end] [1:chain:RunnableSequence] [1.56s] Exiting Chain run with output: { "output": "Why did the spaghetti go to the party?\n\nBecause it heard it was pasta-tively awesome!" } [chain/start] [1:chain:RunnableSequence] Entering Chain run with input: { "input": "ice cream" } [chain/start] [1:chain:RunnableSequence] Entering Chain run with input: { "input": "spaghetti" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel] Entering Chain run with input: { "input": "ice cream" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel] Entering Chain run with input: { "input": "spaghetti" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] Entering Chain run with input: { "input": "ice cream" } [chain/start] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] Entering Chain run with input: { "input": "spaghetti" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] [5ms] Exiting Chain run with output: { "output": "ice cream" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel > 3:chain:RunnablePassthrough] [4ms] Exiting Chain run with output: { "output": "spaghetti" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel] [10ms] Exiting Chain run with output: { "topic": "ice cream" } [chain/end] [1:chain:RunnableSequence > 2:chain:RunnableParallel] [14ms] Exiting Chain run with output: { "topic": "spaghetti" } [chain/start] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] Entering Prompt run with input: { "topic": "ice cream" } [chain/start] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] Entering Prompt run with input: { "topic": "spaghetti" } [chain/end] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output: { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "chat", "ChatPromptValue" ], "kwargs": { "messages": [ { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "HumanMessage" ], "kwargs": { "content": "Tell me a short joke about ice cream", "additional_kwargs": {} } } ] } } [chain/end] [1:chain:RunnableSequence > 4:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output: { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "chat", "ChatPromptValue" ], "kwargs": { "messages": [ { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "HumanMessage" ], "kwargs": { "content": "Tell me a short joke about spaghetti", "additional_kwargs": {} } } ] } } [llm/start] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] Entering LLM run with input: { "prompts": [ "Human: Tell me a short joke about ice cream" ] } [llm/start] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] Entering LLM run with input: { "prompts": [ "Human: Tell me a short joke about spaghetti" ] } [llm/end] [1:chain:RunnableSequence > 5:llm:ChatOpenAI] [1.06s] Exiting LLM run with output: { "generations": [ [ { "text": "Why did the tomato turn red?\n\nBecause it saw the spaghetti sauce!", "generation_info": { "finish_reason": "stop", "logprobs": null }, "type": "ChatGeneration", "message": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "AIMessage" ], "kwargs": { "content": "Why did the tomato turn red?\n\nBecause it saw the spaghetti sauce!", "additional_kwargs": {} } } } ] ], "llm_output": { "token_usage": { "completion_tokens": 14, "prompt_tokens": 14, "total_tokens": 28 }, "model_name": "gpt-3.5-turbo", "system_fingerprint": null }, "run": null
传统方法通过定义字符串,用if else 调用不同的model实现。
def invoke_configurable_chain( topic: str, *, model: str = "chat_openai" ) -> str: if model == "chat_openai": return invoke_chain(topic) elif model == "openai": return invoke_llm_chain(topic) elif model == "anthropic": return invoke_anthropic_chain(topic) else: raise ValueError( f"Received invalid model '{model}'." " Expected one of chat_openai, openai, anthropic" ) def stream_configurable_chain( topic: str, *, model: str = "chat_openai" ) -> Iterator[str]: if model == "chat_openai": return stream_chain(topic) elif model == "openai": # Note we haven't implemented this yet. return stream_llm_chain(topic) elif model == "anthropic": # Note we haven't implemented this yet return stream_anthropic_chain(topic) else: raise ValueError( f"Received invalid model '{model}'." " Expected one of chat_openai, openai, anthropic" ) def batch_configurable_chain( topics: List[str], *, model: str = "chat_openai" ) -> List[str]: # You get the idea ... async def abatch_configurable_chain( topics: List[str], *, model: str = "chat_openai" ) -> List[str]: ... invoke_configurable_chain("ice cream", model="openai") stream = stream_configurable_chain( "ice_cream", model="anthropic" ) for chunk in stream: print(chunk, end="", flush=True) # batch_configurable_chain(["ice cream", "spaghetti", "dumplings"]) # await ainvoke_configurable_chain("ice cream")
https://github.com/zgpeace/pets-name-langchain/tree/develop
https://python.langchain.com/docs/expression_language/why
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。