数据准备
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
]
docs = [WebBaseLoader(url).load() for url in urls]
docs_list = [item for sublist in docs for item in sublist]
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=250, chunk_overlap=0
)
doc_splits = text_splitter.split_documents(docs_list)
from langchain_community.embeddings import ZhipuAIEmbeddings
embed = ZhipuAIEmbeddings(
model="Embedding-3",
api_key="your api key",
)
# Add to vectorDB
batch_size = 10
for i in range(0, len(doc_splits), batch_size):
# 确保切片不会超出数组边界
batch = doc_splits[i:min(i + batch_size, len(doc_splits))]
vectorstore = Chroma.from_documents(
documents=batch,
collection_name="rag-chroma",
embedding=embed,
persist_directory="./chroma_db"
)
retriever = vectorstore.as_retriever()
retrieval_grader llm 模型
### Retrieval Grader
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
# Data model
class GradeDocuments(BaseModel):
"""Binary score for relevance check on retrieved documents."""
binary_score: str = Field(
description="Documents are relevant to the question, 'yes' or 'no'"
)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
temperature=0,
model="GLM-4-plus",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
structured_llm_grader = llm.with_structured_output(GradeDocuments)
# Prompt
system = """You are a grader assessing relevance of a retrieved document to a user question. \n
If the document contains keyword(s) or semantic meaning related to the question, grade it as relevant. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
grade_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "Retrieved document: \n\n {document} \n\n User question: {question}"),
]
)
retrieval_grader = grade_prompt | structured_llm_grader
测试:
question = "agent memory"
docs = retriever.invoke(question)
doc_txt = docs[1].page_content
print(retrieval_grader.invoke({"question": question, "document": doc_txt}))
binary_score='yes'
Generate llm 模型
### Generate
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
# Prompt
prompt = hub.pull("rlm/rag-prompt")
# LLM
llm = ChatOpenAI(
temperature=0,
model="GLM-4-plus",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
# Post-processing
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
# Chain
rag_chain = prompt | llm | StrOutputParser()
测试:
generation = rag_chain.invoke({"context": docs, "question": question})
print(generation)
d:\soft\anaconda\envs\langchain\Lib\site-packages\langsmith\client.py:354: LangSmithMissingAPIKeyWarning: API key must be provided when using hosted LangSmith API
warnings.warn(
In a LLM-powered autonomous agent system, memory is a key component, encompassing various types of memory and utilizing techniques like Maximum Inner Product Search (MIPS) to enhance the agent's functionality. This memory component complements the LLM, which acts as the agent's brain, enabling it to perform complex tasks and problem-solving. The integration of memory is crucial for the agent's ability to learn, adapt, and make informed decisions.
question_rewriter llm 模型
### Question Re-writer
# LLM
llm = ChatOpenAI(
temperature=0,
model="GLM-4-plus",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
# Prompt
system = """You a question re-writer that converts an input question to a better version that is optimized \n
for web search. Look at the input and try to reason about the underlying semantic intent / meaning."""
re_write_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
(
"human",
"Here is the initial question: \n\n {question} \n Formulate an improved question.",
),
]
)
class Question(BaseModel):
"""an improved question"""
question: str = Field(
description="Improved Question"
)
question_rewriter = re_write_prompt | llm.with_structured_output(Question)
测试:
qq= question_rewriter.invoke({"question": question})
print(qq)
question='What is an agent memory in the context of artificial intelligence and machine learning?'
检索工具
### Search
import os
from langchain_community.tools.tavily_search import TavilySearchResults
os.environ["TAVILY_API_KEY"] = "your api key"
web_search_tool = TavilySearchResults(k=3)
graph中的State定义
from typing import List
from typing_extensions import TypedDict
class GraphState(TypedDict):
"""
Represents the state of our graph.
Attributes:
question: question
generation: LLM generation
web_search: whether to add search
documents: list of documents
"""
question: str
generation: str
web_search: str
documents: List[str]
from langchain.schema import Document
def retrieve(state):
"""
Retrieve documents
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, documents, that contains retrieved documents
"""
print("---RETRIEVE---")
question = state["question"]
# Retrieval
documents = retriever.invoke(question)
return {"documents": documents, "question": question}
def generate(state):
"""
Generate answer
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, generation, that contains LLM generation
"""
print("---GENERATE---")
question = state["question"]
documents = state["documents"]
# RAG generation
generation = rag_chain.invoke({"context": documents, "question": question})
return {"documents": documents, "question": question, "generation": generation}
def grade_documents(state):
"""
Determines whether the retrieved documents are relevant to the question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates documents key with only filtered relevant documents
"""
print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
question = state["question"]
documents = state["documents"]
# Score each doc
filtered_docs = []
web_search = "No"
for d in documents:
score = retrieval_grader.invoke(
{"question": question, "document": d.page_content}
)
grade = score.binary_score
if grade == "yes":
print("---GRADE: DOCUMENT RELEVANT---")
filtered_docs.append(d)
else:
print("---GRADE: DOCUMENT NOT RELEVANT---")
web_search = "Yes"
continue
return {"documents": filtered_docs, "question": question, "web_search": web_search}
def transform_query(state):
"""
Transform the query to produce a better question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates question key with a re-phrased question
"""
print("---TRANSFORM QUERY---")
question = state["question"]
documents = state["documents"]
# Re-write question
better_question = question_rewriter.invoke({"question": question})
print(better_question)
return {"documents": documents, "question": better_question.question}
def web_search(state):
"""
Web search based on the re-phrased question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates documents key with appended web results
"""
print("---WEB SEARCH---")
question = state["question"]
documents = state["documents"]
# Web search
docs = web_search_tool.invoke({"query": question})
web_results = "\n".join([d["content"] for d in docs])
web_results = Document(page_content=web_results)
documents.append(web_results)
return {"documents": documents, "question": question}
### Edges
def decide_to_generate(state):
"""
Determines whether to generate an answer, or re-generate a question.
Args:
state (dict): The current graph state
Returns:
str: Binary decision for next node to call
"""
print("---ASSESS GRADED DOCUMENTS---")
state["question"]
web_search = state["web_search"]
state["documents"]
if web_search == "Yes":
# All documents have been filtered check_relevance
# We will re-generate a new query
print(
"---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---"
)
return "transform_query"
else:
# We have relevant documents, so generate answer
print("---DECISION: GENERATE---")
return "generate"
graph 流程图
from langgraph.graph import END, StateGraph, START
workflow = StateGraph(GraphState)
# Define the nodes
workflow.add_node("retrieve", retrieve) # retrieve
workflow.add_node("grade_documents", grade_documents) # grade documents
workflow.add_node("generate", generate) # generatae
workflow.add_node("transform_query", transform_query) # transform_query
workflow.add_node("web_search_node", web_search) # web search
# Build graph
workflow.add_edge(START, "retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
"grade_documents",
decide_to_generate,
{
"transform_query": "transform_query",
"generate": "generate",
},
)
workflow.add_edge("transform_query", "web_search_node")
workflow.add_edge("web_search_node", "generate")
workflow.add_edge("generate", END)
# Compile
app = workflow.compile()
graph可视化
from IPython.display import Image, display
try:
display(Image(app.get_graph(xray=True).draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
相关文档检索
from pprint import pprint
# Run
inputs = {"question": "What is the Chain of thought?"}
for output in app.stream(inputs):
for key, value in output.items():
# Node
pprint(f"Node '{key}':")
pprint("\n---\n")
# Final generation
pprint(value["generation"])
---RETRIEVE---
"Node 'retrieve':"
'\n---\n'
---CHECK DOCUMENT RELEVANCE TO QUESTION---
---GRADE: DOCUMENT RELEVANT---
---GRADE: DOCUMENT RELEVANT---
---GRADE: DOCUMENT RELEVANT---
---GRADE: DOCUMENT RELEVANT---
---ASSESS GRADED DOCUMENTS---
---DECISION: GENERATE---
"Node 'grade_documents':"
'\n---\n'
---GENERATE---
"Node 'generate':"
'\n---\n'
('The Chain of Thought (CoT) is a reasoning method used in language models to '
'improve their problem-solving abilities by breaking down complex tasks into '
'a series of logical steps. It involves generating intermediate reasoning '
'steps before arriving at a final answer, enhancing transparency and '
'accuracy. This technique is particularly useful in tasks requiring '
'multi-step reasoning and is often employed in conjunction with prompt '
"engineering to guide the model's behavior effectively.")
非相关文档检索
from pprint import pprint
# Run
inputs = {"question": "How does the AlphaCodium paper work?"}
for output in app.stream(inputs):
for key, value in output.items():
# Node
pprint(f"Node '{key}':")
pprint("\n---\n")
# Final generation
pprint(value["generation"])
---RETRIEVE---
"Node 'retrieve':"
'\n---\n'
---CHECK DOCUMENT RELEVANCE TO QUESTION---
---GRADE: DOCUMENT NOT RELEVANT---
---GRADE: DOCUMENT NOT RELEVANT---
---GRADE: DOCUMENT NOT RELEVANT---
---GRADE: DOCUMENT NOT RELEVANT---
---ASSESS GRADED DOCUMENTS---
---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---
"Node 'grade_documents':"
'\n---\n'
---TRANSFORM QUERY---
question="What is the mechanism behind AlphaCodium's functionality as described in the research paper?"
"Node 'transform_query':"
'\n---\n'
---WEB SEARCH---
"Node 'web_search_node':"
'\n---\n'
---GENERATE---
"Node 'generate':"
'\n---\n'
('AlphaCodium functions through a test-based, multi-stage, code-oriented '
'iterative flow, repeatedly running and fixing generated code against '
'input-output tests. It enhances the process by generating additional data '
'like problem reflection and test reasoning to aid iterations. This approach '
'significantly improves the performance of LLMs on code problems, as '
'demonstrated on the CodeContests dataset.')
注意事项:
官网中的原代码只让模型输出简单的字符串,对chatglm模型来说,容易运行失败:
question_rewriter = re_write_prompt | llm | StrOutputParser()
把重写的部分代码改为特定类型输出:
class Question(BaseModel):
"""an improved question"""
question: str = Field(
description="Improved Question"
)
question_rewriter = re_write_prompt | llm.with_structured_output(Question)
并在transform_query
类中把better_question
改为better_question.question
:
def transform_query(state):
"""
Transform the query to produce a better question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates question key with a re-phrased question
"""
print("---TRANSFORM QUERY---")
question = state["question"]
documents = state["documents"]
# Re-write question
better_question = question_rewriter.invoke({"question": question})
print(better_question)
return {"documents": documents, "question": better_question.question}
即可运行成功。
参考链接:https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_crag/#llms
如果有任何问题,欢迎在评论区提问。