1)环境准备

!pip install sentence-transformers transformers faiss-gpu

2)代码实现

import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
from transformers import GPT2LMHeadModel, GPT2Tokenizer

# 1. 加载 SentenceTransformer 模型,用于生成嵌入
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')

# 示例文档
documents = [
    "Milvus is an open-source vector database",
    "RAG is a technique to enhance generation using retrieval",
    "Python is a popular programming language",
    "Machine learning enables models to learn from data",
    "Faiss is a library for efficient similarity search and clustering of dense vectors"
]

# 2. 对文档生成嵌入向量
embeddings = model.encode(documents)
embeddings = np.array(embeddings).astype('float32')

# 3. 使用 Faiss 创建索引(L2 距离)
dimension = embeddings.shape[1]  # 嵌入向量的维度
index = faiss.IndexFlatL2(dimension)
index.add(embeddings)

# 4. 输入查询并进行向量化
query = "What is Faiss used for?"
query_embedding = model.encode([query]).astype('float32')

# 5. 在 Faiss 中检索最相似的文档
k = 3  # 查找前3个最相似的文档
distances, indices = index.search(query_embedding, k)

# 6. 获取最相似的文档
print("Top results:")
for i, idx in enumerate(indices[0]):
    print(f"Document {idx}: {documents[idx]}, Score: {distances[0][i]}")

# 7. 加载 GPT-2 模型,用于生成增强
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2")

# 将检索到的文档作为上下文,传递给 GPT-2 模型进行生成
context = " ".join([documents[idx] for idx in indices[0]])
input_text = f"Context: {context}\nQuestion: {query}"

inputs = tokenizer.encode(input_text, return_tensors="pt")
output = gpt2_model.generate(inputs, max_length=100, num_return_sequences=1)

generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print("\nGenerated Response:", generated_text)

3)读取pdf和docx

!pip install PyMuPDF python-docx

import fitz  # PyMuPDF
import docx

# 读取 PDF 文件
def read_pdf(file_path):
    pdf_text = ""
    with fitz.open(file_path) as doc:
        for page_num in range(len(doc)):
            page = doc.load_page(page_num)
            pdf_text += page.get_text("text")
    return pdf_text

# 读取 DOCX 文件
def read_docx(file_path):
    doc = docx.Document(file_path)
    doc_text = "\n".join([para.text for para in doc.paragraphs])
    return doc_text

# 使用你提供的文件路径
pdf_file = "/content/cgft-llm/llama-index/pdf/Subclass 500 Student visa.pdf"  # 替换为你的 PDF 文件路径
docx_file = "/content/cgft-llm/llama-index/docs/3.98万L4无人车来了!卷出行业新低,1小时卖掉半年产能.txt"  # 替换为你的 DOCX 文件路径

# 读取文件内容
pdf_text = read_pdf(pdf_file)
# docx_text = read_docx(docx_file)

# 将读取的内容作为 documents 列表
# documents = [pdf_text, docx_text]

# 打印读取的内容(可以选择打印部分内容)
print("PDF 内容:", pdf_text[:200])  # 打印前200个字符
# print("DOCX 内容:", docx_text[:200])  # 打印前200个字符

读取txt

# 读取 TXT 文件
def read_txt(file_path):
    with open(file_path, 'r', encoding='utf-8') as file:
        return file.read()

# 修改路径
txt_file = "/content/cgft-llm/llama-index/docs/3.98万L4无人车来了!卷出行业新低,1小时卖掉半年产能.txt"

# 读取文件内容
txt_text = read_txt(txt_file)



# 打印读取的内容(可以选择打印部分内容)

print("TXT 内容:", txt_text[:200])  # 打印前200个字符

点赞(0) 打赏

评论列表 共有 0 条评论

暂无评论

微信公众账号

微信扫一扫加关注

发表
评论
返回
顶部