使用few-shot Prompt template让大模型更懂你
这里写几个例子,实现了:长度示例选择器、相关度示例选择器、相似性示例选择器三种:
创建一个使用少量示例的提示模板(Prompt template)。少量示例的提示模板可以从一组示例(examples)或一个示例选择器( Exampleselector)对象构建
一.使用相似性示例选择器,选择小样本示例内容:
本事例实现了生成单词反义词的功能
直接记录代码:
from langchain_community.vectorstores import Chroma
from langchain_core.example_selectors import SemanticSimilarityExampleSelector
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, \
HumanMessagePromptTemplate, PromptTemplate, FewShotPromptTemplate
from langchain_huggingface import HuggingFaceEmbeddings
from Ai.llmqianfan import createLLM
llm = createLLM()
examples = [
{"question": "大", "answer": "小"},
{"question": "黑", "answer": "白"},
{"question":"高","answer":"矮"},
{"question":"胖","answer":"瘦"},
{"question":"高兴","answer":"伤心"},
{"question":"漂亮","answer":"丑陋"}
]
template="""
单词:{question}
反义词:{answer}
"""
prompt=PromptTemplate(input_variables=["question","answer"],template=template)
embedding=HuggingFaceEmbeddings(model_name='../models/BAAI_bge-small-zh-v1.5')
example_selector = SemanticSimilarityExampleSelector.from_examples(
# 传入示例组
examples,
# 使用阿里云的dashscope的嵌入来做相似性搜索
embedding,
# 设置使用的向量数据库是什么
Chroma, # FAISS,
# 结果条数
k=1,
)
few_shot_prompt=FewShotPromptTemplate(
#examples=examples,
example_selector=example_selector,#使用样本选择器使用更接近的样本
example_prompt=prompt,
prefix="请参考下面的事例,按事例格式,给出用户输入的单词的反义词:<example>",
suffix="</example>\\n单词:{input}\\n反义词:",
input_variables=["input"],
example_separator="\\n"
)
p=few_shot_prompt.format(input="black")
# print(res)
res=llm.invoke(p)
print(res.content)
运行结果:
单词:black 反义词:white 进程已结束,退出代码为 0
代码中使用上节创建的千帆大模型接口
二.使用长度示例选择器实现:
from qifan_ai import CreateMyLLM
from langchain.prompts import PromptTemplate, FewShotPromptTemplate
from langchain.prompts.example_selector import LengthBasedExampleSelector
from langchain.chains import LLMChain
example = [
{"input": "白色", "output": "黑色"},
{"input": "高兴", "output": "悲伤"},
{"input": "高", "output": "矮"},
{"input": "上", "output": "下"},
{"input": "快", "output": "慢"},
{"input": "开始", "output": "结束"},
{"input": "真实", "output": "虚假"},
{"input": "兴高采烈", "output": "垂头丧气"},
{"input": "言犹未尽", "output": "义不容辞"}
]
prompt = PromptTemplate(
input_variables=["input", "output"],
template="原词:{input}\n反义词:{output}"
)
#长度示例选择器
example_selector = LengthBasedExampleSelector(
examples=example,
example_prompt=prompt,
max_length=30,
length_function=len
)
few_shot_prompt = FewShotPromptTemplate(
example_selector=example_selector,
example_prompt=prompt,
prefix="给出每个输入词的反义词:\n<example>",
suffix="</example>\n原词:{my_input}\n反义词:",
input_variables=["my_input"],
example_separator="\n"
)
llm = CreateMyLLM()
chain = LLMChain(llm=llm, prompt=few_shot_prompt, verbose=True)
res=chain.run("飞速前进")
print(res)
三.使用相关度示例选择器:(MMR)
from langchain_community.embeddings import HuggingFaceEmbeddings from qifan_ai import CreateMyLLM from langchain.prompts import PromptTemplate,FewShotPromptTemplate from langchain.chains import LLMChain from langchain.prompts.example_selector import MaxMarginalRelevanceExampleSelector,SemanticSimilarityExampleSelector from langchain_community.vectorstores import Chroma example = [ {"input": "白色", "output": "黑色"}, {"input": "高兴", "output": "悲伤"}, {"input": "高", "output": "矮"}, {"input": "上", "output": "下"}, {"input": "快", "output": "慢"}, {"input": "开始", "output": "结束"}, {"input": "真实", "output": "虚假"}, {"input": "兴高采烈", "output": "垂头丧气"}, {"input": "言犹未尽", "output": "义不容辞"} ] prompt = PromptTemplate( input_variables=["input", "output"], template="原词:{input}\n反义词:{output}" ) embedding=HuggingFaceEmbeddings(model_name='../models/BAAI_bge-small-zh-v1.5') #相关度(最大边际)示例选择器 example_selector = MaxMarginalRelevanceExampleSelector.from_examples(examples=example,embeddings=embedding,vectorstore_cls=Chroma,k=2) #相似性示例(余弦相似度)选择器,与相关度差不多,只要使用这个选择器就会变为相似性选择器 #sim_example_selector = SemanticSimilarityExampleSelector.from_examples(examples=example,embeddings=embedding,vectorstore_cls=Chroma,k=3) few_shot_prompt = FewShotPromptTemplate( example_selector=example_selector, example_prompt=prompt, prefix="给出每个输入词的反义词:\n<example>", suffix="</example>\n原词:{my_input}\n反义词:", input_variables=["my_input"], example_separator="\n" ) llm = CreateMyLLM() chain = LLMChain(llm=llm, prompt=few_shot_prompt, verbose=True) res=chain.run("十分高兴") print(res)