-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm.py
82 lines (67 loc) · 3.14 KB
/
llm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# This file takes in a question and the clip results, and fetches the most plausible action
import langchain
import os
from langchain.llms import OpenAI
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
from api import *
class LLM:
def __init__(self, model_name):
# TODO: Add a "model verbosity" thing that can include an "explain your reasoning" in the prompt
if model_name == "openai":
os.environ["OPENAI_API_KEY"] = openai_key_ronak
# self.model = OpenAI(model_name="gpt-3.5-turbo")
self.model = OpenAI(model_name="text-davinci-003")
else:
self.model = None
self.question = None
self.clip_result = None
self.answer_choices = None
# self.no_choices = "Given an image about {clip_result}, {question}? Your answer is:"
# self.choices = "Given an image about {clip_result}, {question}?Your answer choices are:{answer_choices}.Your answer should only include the answer choice. Your answer is:"
def initialize(self, question, clip_result, answer_choices):
self.question = question
self.clip_result = clip_result
self.answer_choices = answer_choices
def build_template(self, show_choices, use_clip):
template = ""
input_variables = []
chain_dict = {}
if use_clip:
template += "I am currently looking at a scene in which {clip_result}. Answer the following question: "
chain_dict["clip_result"] = self.clip_result
input_variables.append("clip_result")
else:
template += "Answer the following question: "
template += "{question}? "
chain_dict["question"] = self.question
input_variables.append("question")
if show_choices:
template += "Your answer choices are:{answer_choices}.Your answer should only include the answer choice.Even if you are unsure, use your best judgement and only respond with one of the given answer choices. "
chain_dict["answer_choices"] = self.answer_choices
input_variables.append("answer_choices")
else:
template += "Your answer is:"
return template, input_variables, chain_dict
def answer(self, show_choices, use_clip):
template, input_variables, chain_dict = self.build_template(
show_choices, use_clip
)
prompt = PromptTemplate(
input_variables=input_variables,
template=template,
)
chain = LLMChain(llm=self.model, prompt=prompt)
result = chain.run(chain_dict)
return result
if __name__ == "__main__":
llm = LLM(model_name="openai")
question = "how many people am I talking with"
clip_result = "a dog eating a human"
answer_choices = ["zero", "one", "two", "three", "four"]
llm.initialize(question, clip_result, answer_choices)
# test all the show_choices and use_clip options
for show_choices in [True, False]:
for use_clip in [True, False]:
chosen_answer = llm.answer(show_choices=show_choices, use_clip=use_clip)
# print(chosen_answer)