系列文章索引
J-LangChain 入门
介绍
j-langchain是一个Java版的LangChain开发框架,具有灵活编排和流式执行能力,旨在简化和加速各类大模型应用在Java平台的落地开发。它提供了一组实用的工具和类,使得开发人员能够更轻松地构建类似于LangChain的Java应用程序。
github: https://github.com/flower-trees/j-langchain
依赖
Maven
<dependency>
<groupId>io.github.flower-trees</groupId>
<artifactId>j-langchain</artifactId>
<version>1.0.4-preview</version>
</dependency>
Gradle
implementation 'io.github.flower-trees:j-langchain:1.0.4-preview'
配置
@Import(JLangchainConfig.class)
public class YourApplication {
public static void main(String[] args) {
SpringApplication.run(YourApplication.class, args);
}
}
export ALIYUN_KEY=xxx-xxx-xxx-xxx
export CHATGPT_KEY=xxx-xxx-xxx-xxx
export DOUBAO_KEY=xxx-xxx-xxx-xxx
export MOONSHOT_KEY=xxx-xxx-xxx-xxx
💡 Notes:
- 系统基于salt-function-flow流程编排框架开发,具体语法可 参考。
如何智能链构建
1、顺序调用
LangChain实现
from langchain_ollama import OllamaLLM
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
model = OllamaLLM(model="qwen2.5:0.5b")
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
chain = prompt | model | StrOutputParser()
result = chain.invoke({"topic": "bears"})
print(result)
J-LangChain实现
@Component
public class ChainBuildDemo {
@Autowired
ChainActor chainActor;
public void SimpleDemo() {
BaseRunnable<StringPromptValue, ?> prompt = PromptTemplate.fromTemplate("tell me a joke about ${topic}");
ChatOpenAI chatOpenAI = ChatOpenAI.builder().model("gpt-4").build();
FlowInstance chain = chainActor.builder().next(prompt).next(oll).next(new StrOutputParser()).build();
ChatGeneration result = chainActor.invoke(chain, Map.of("topic", "bears"));
System.out.println(result);
}
}
2、分支路由
根据 chain
输入参数 vendor
,判断使用 llama3
、gpt-4
、还是回复 无法回答
。
LangChain实现
from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
prompt = ChatPromptTemplate.from_template("tell me a joke about ${topic}")
def route(info):
if "ollama" in info["vendor"]:
return prompt | OllamaLLM(model="qwen2.5:0.5b")
elif "chatgpt" in info["vendor"]:
return prompt | ChatOpenAI(model="gpt-4")
else:
return prompt | RunnableLambda(lambda x: "sorry, I don't know how to do that")
chain = route | StrOutputParser()
result = chain.invoke({"topic": "bears", "vendor": "ollama"})
print(result)
J-LangChain实现
public void SwitchDemo() {
BaseRunnable<StringPromptValue, ?> prompt = PromptTemplate.fromTemplate("tell me a joke about ${topic}");
ChatOllama chatOllama = ChatOllama.builder().model("llama3:8b").build();
ChatOpenAI chatOpenAI = ChatOpenAI.builder().model("gpt-4").build();
FlowInstance chain = chainActor.builder()
.next(prompt)
.next(
Info.c("vendor == 'ollama'", chatOllama),
Info.c("vendor == 'chatgpt'", chatOpenAI),
Info.c(input -> "sorry, I don't know how to do that")
)
.next(new StrOutputParser()).build();
Generation result = chainActor.invoke(chain, Map.of("topic", "bears", "vendor", "ollama"));
System.out.println(result);
}
3、组合嵌套
主chain
调用 子chain
生成一个笑话,并对笑话是否可笑进行评价。
LangChain实现
analysis_prompt = ChatPromptTemplate.from_template("is this a funny joke? {joke}")
composed_chain = {"joke": chain} | analysis_prompt | model | StrOutputParser()
result = composed_chain.invoke({"topic": "bears"})
print(result)
J-LangChain实现
public void ComposeDemo() {
ChatOllama llm = ChatOllama.builder().model("llama3:8b").build();
StrOutputParser parser = new StrOutputParser();
BaseRunnable<StringPromptValue, ?> prompt = PromptTemplate.fromTemplate("tell me a joke about ${topic}");
FlowInstance chain = chainActor.builder().next(prompt).next(llm).next(parser).build();
BaseRunnable<StringPromptValue, ?> analysisPrompt = PromptTemplate.fromTemplate("is this a funny joke? ${joke}");
FlowInstance analysisChain = chainActor.builder()
.next(chain)
.next(input -> Map.of("joke", ((Generation)input).getText()))
.next(analysisPrompt)
.next(llm)
.next(parser).build();
ChatGeneration result = chainActor.invoke(analysisChain, Map.of("topic", "bears"));
System.out.println(result);
}
4、并行执行
主chain
并行执行 joke_chain
和 poem_chain
,并合并输出答案。
LangChain实现
from langchain_core.runnables import RunnableParallel
joke_chain = ChatPromptTemplate.from_template("tell me a joke about {topic}") | model
poem_chain = ChatPromptTemplate.from_template("write a 2-line poem about {topic}") | model
parallel_chain = RunnableParallel(joke=joke_chain, poem=poem_chain)
result = parallel_chain.invoke({"topic": "bear"})
print(result)
J-LangChain实现
public void ParallelDemo() {
ChatOllama llm = ChatOllama.builder().model("llama3:8b").build();
BaseRunnable<StringPromptValue, ?> joke = PromptTemplate.fromTemplate("tell me a joke about ${topic}");
BaseRunnable<StringPromptValue, ?> poem = PromptTemplate.fromTemplate("write a 2-line poem about ${topic}");
FlowInstance jokeChain = chainActor.builder().next(joke).next(llm).build();
FlowInstance poemChain = chainActor.builder().next(poem).next(llm).build();
FlowInstance chain = chainActor.builder().concurrent(
(IResult<Map<String, String>>) (iContextBus, isTimeout) -> {
AIMessage jokeResult = iContextBus.getResult(jokeChain.getFlowId());
AIMessage poemResult = iContextBus.getResult(poemChain.getFlowId());
return Map.of("joke", jokeResult.getContent(), "poem", poemResult.getContent());
}, jokeChain, poemChain).build();
Map<String, String> result = chainActor.invoke(chain, Map.of("topic", "bears"));
System.out.println(JsonUtil.toJson(result));
}
5、动态路由
chain 1
总结用户问题 topic
,主chain
根据 topic
动态路由执行 langchain_chain
、anthropic_chain
、或者 general_chain
。
LangChain实现
通过 RunnableLambda
实现动态路由:
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda
chain = (
PromptTemplate.from_template(
"""Given the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.
Do not respond with more than one word.
<question>
{question}
</question>
Classification:"""
)
| OllamaLLM(model="qwen2.5:0.5b")
| StrOutputParser()
)
langchain_chain = PromptTemplate.from_template(
"""You are an expert in langchain. \
Always answer questions starting with "As Harrison Chase told me". \
Respond to the following question:
Question: {question}
Answer:"""
) | OllamaLLM(model="qwen2.5:0.5b")
anthropic_chain = PromptTemplate.from_template(
"""You are an expert in anthropic. \
Always answer questions starting with "As Dario Amodei told me". \
Respond to the following question:
Question: {question}
Answer:"""
) | OllamaLLM(model="qwen2.5:0.5b")
general_chain = PromptTemplate.from_template(
"""Respond to the following question:
Question: {question}
Answer:"""
) | OllamaLLM(model="qwen2.5:0.5b")
def route(info):
if "anthropic" in info["topic"].lower():
return anthropic_chain
elif "langchain" in info["topic"].lower():
return langchain_chain
else:
return general_chain
full_chain = {"topic": chain, "question": lambda x: x["question"]} | RunnableLambda(route)
result = full_chain.invoke({"question": "how do I use LangChain?"})
print(result)
def route(info):
if "anthropic" in info["topic"].lower():
return anthropic_chain
elif "langchain" in info["topic"].lower():
return langchain_chain
else:
return general_chain
from langchain_core.runnables import RunnableLambda
full_chain = {"topic": chain, "question": lambda x: x["question"]} | RunnableLambda(route)
result = full_chain.invoke({"question": "how do I use LangChain?"})
print(result)
J-LangChain实现
public void RouteDemo() {
ChatOllama llm = ChatOllama.builder().model("llama3:8b").build();
BaseRunnable<StringPromptValue, Object> prompt = PromptTemplate.fromTemplate(
"""
Given the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.
Do not respond with more than one word.
<question>
${question}
</question>
Classification:
"""
);
FlowInstance chain = chainActor.builder().next(prompt).next(llm).next(new StrOutputParser()).build();
FlowInstance langchainChain = chainActor.builder().next(PromptTemplate.fromTemplate(
"""
You are an expert in langchain. \
Always answer questions starting with "As Harrison Chase told me". \
Respond to the following question:
Question: ${question}
Answer:
"""
)).next(ChatOllama.builder().model("llama3:8b").build()).build();
FlowInstance anthropicChain = chainActor.builder().next(PromptTemplate.fromTemplate(
"""
You are an expert in anthropic. \
Always answer questions starting with "As Dario Amodei told me". \
Respond to the following question:
Question: ${question}
Answer:
"""
)).next(ChatOllama.builder().model("llama3:8b").build()).build();
FlowInstance generalChain = chainActor.builder().next(PromptTemplate.fromTemplate(
"""
Respond to the following question:
Question: ${question}
Answer:
"""
)).next(ChatOllama.builder().model("llama3:8b").build()).build();
FlowInstance fullChain = chainActor.builder()
.next(chain)
.next(input -> Map.of("topic", input, "question", ((Map<?, ?>)ContextBus.get().getFlowParam()).get("question")))
.next(
Info.c("topic == 'anthropic'", anthropicChain),
Info.c("topic == 'langchain'", langchainChain),
Info.c(generalChain)
).build();
AIMessage result = chainActor.invoke(fullChain, Map.of("question", "how do I use Anthropic?"));
System.out.println(result.getContent());
}
动态构建
主chain
调用 子chain 1
获取加工后的用户问题,子chain 1
根据用户输入问题是否带 对话历史
,判断是调用子chain 2
根据历史修改用户问题,还是直接透传用户问题,主chain
根据最终问题,并添加 system
内容后,给出答案。
LangChain实现
from langchain_core.runnables import chain, RunnablePassthrough
llm = OllamaLLM(model="qwen2.5:0.5b")
contextualize_instructions = """Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text)."""
contextualize_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_instructions),
("placeholder", "{chat_history}"),
("human", "{question}"),
]
)
contextualize_question = contextualize_prompt | llm | StrOutputParser()
@chain
def contextualize_if_needed(input_: dict):
if input_.get("chat_history"):
return contextualize_question
else:
return RunnablePassthrough() | itemgetter("question")
@chain
def fake_retriever(input_: dict):
return "egypt's population in 2024 is about 111 million"
qa_instructions = (
"""Answer the user question given the following context:\n\n{context}."""
)
qa_prompt = ChatPromptTemplate.from_messages(
[("system", qa_instructions), ("human", "{question}")]
)
full_chain = (
RunnablePassthrough.assign(question=contextualize_if_needed).assign(
context=fake_retriever
)
| qa_prompt
| llm
| StrOutputParser()
)
result = full_chain.invoke({
"question": "what about egypt",
"chat_history": [
("human", "what's the population of indonesia"),
("ai", "about 276 million"),
],
})
print(result)
J-LangChain实现
public void DynamicDemo() {
ChatOllama llm = ChatOllama.builder().model("llama3:8b").build();
String contextualizeInstructions = """
Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text).""";
BaseRunnable<ChatPromptValue, Object> contextualizePrompt = ChatPromptTemplate.fromMessages(
List.of(
Pair.of("system", contextualizeInstructions),
Pair.of("placeholder", "${chatHistory}"),
Pair.of("human", "${question}")
)
);
FlowInstance contextualizeQuestion = chainActor.builder()
.next(contextualizePrompt)
.next(llm)
.next(new StrOutputParser())
.build();
FlowInstance contextualizeIfNeeded = chainActor.builder().next(
Info.c("chatHistory != null", contextualizeQuestion),
Info.c(input -> Map.of("question", ((Map<String, String>)input).get("question")))
).build();
String qaInstructions =
"""
Answer the user question given the following context:\n\n${context}.
""";
BaseRunnable<ChatPromptValue, Object> qaPrompt = ChatPromptTemplate.fromMessages(
List.of(
Pair.of("system", qaInstructions),
Pair.of("human", "${question}")
)
);
FlowInstance fullChain = chainActor.builder()
.all(
(iContextBus, isTimeout) -> Map.of(
"question", iContextBus.getResult(contextualizeIfNeeded.getFlowId()).toString(),
"context", iContextBus.getResult("fakeRetriever")),
Info.c(contextualizeIfNeeded),
Info.c(input -> "egypt's population in 2024 is about 111 million").cAlias("fakeRetriever")
)
.next(qaPrompt)
.next(input -> {System.out.println(JsonUtil.toJson(input)); return input;})
.next(llm)
.next(new StrOutputParser())
.build();
ChatGeneration result = chainActor.invoke(fullChain,
Map.of(
"question", "what about egypt",
"chatHistory",
List.of(
Pair.of("human", "what's the population of indonesia"),
Pair.of("ai", "about 276 million")
)
)
);
System.out.println(result);
}