mirror of
https://github.com/The-Art-of-Hacking/h4cker
synced 2024-11-21 18:33:03 +00:00
Create rag_basic_example.py
This commit is contained in:
parent
b413d5bac2
commit
ef3e6a99d1
1 changed files with 54 additions and 0 deletions
54
ai_research/LangChain/rag_basic_example.py
Normal file
54
ai_research/LangChain/rag_basic_example.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
from langchain.document_loaders import WebBaseLoader
|
||||
from langchain.document_transformers import ChunkTransformer
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.vectorstores import FAISS
|
||||
from langchain.retrievers import SemanticRetriever
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
from langchain.schema.runnable import RunnablePassthrough
|
||||
|
||||
# Step 1: Load documents
|
||||
loader = WebBaseLoader("https://example.com")
|
||||
documents = loader.load()
|
||||
|
||||
# Step 2: Transform documents
|
||||
transformer = ChunkTransformer(chunk_size=512)
|
||||
transformed_documents = transformer.transform(documents)
|
||||
|
||||
# Step 3: Create embeddings
|
||||
embedding_model = OpenAIEmbeddings()
|
||||
embeddings = embedding_model.embed(transformed_documents)
|
||||
|
||||
# Step 4: Store embeddings in a vector store
|
||||
vector_store = FAISS.from_embeddings(embeddings)
|
||||
|
||||
# Step 5: Create a retriever
|
||||
retriever = SemanticRetriever(vector_store)
|
||||
|
||||
# Step 6: Define the prompt template
|
||||
template = """Answer the question based only on the following context:
|
||||
{context}
|
||||
|
||||
Question: {question}
|
||||
"""
|
||||
prompt = ChatPromptTemplate.from_template(template)
|
||||
|
||||
# Step 7: Create the language model
|
||||
model = ChatOpenAI()
|
||||
|
||||
# Step 8: Define the output parser
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
# Step 9: Define the RAG pipeline
|
||||
pipeline = {
|
||||
"context": retriever,
|
||||
"question": RunnablePassthrough(),
|
||||
} | prompt | model | output_parser
|
||||
|
||||
# Step 10: Invoke the RAG pipeline with a question
|
||||
question = "What is the capital of France?"
|
||||
answer = pipeline.invoke({"question": question})
|
||||
|
||||
# Step 11: Print the answer
|
||||
print(answer)
|
Loading…
Reference in a new issue