Fixed OpenAI calls
This commit is contained in:
parent
fe05a71d3e
commit
716567c50b
4
reverie/backend_server/input.txt
Normal file
4
reverie/backend_server/input.txt
Normal file
@ -0,0 +1,4 @@
|
||||
base_the_ville_isabella_maria_klaus
|
||||
test-simulation
|
||||
run 1
|
||||
fin
|
||||
BIN
reverie/backend_server/output.txt
Normal file
BIN
reverie/backend_server/output.txt
Normal file
Binary file not shown.
@ -66,7 +66,7 @@ def run_gpt_prompt_wake_up_hour(persona, test_input=None, verbose=False):
|
||||
fs = 8
|
||||
return fs
|
||||
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 5,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 5,
|
||||
"temperature": 0.8, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
|
||||
prompt_template = "persona/prompt_template/v2/wake_up_hour_v1.txt"
|
||||
@ -138,7 +138,7 @@ def run_gpt_prompt_daily_plan(persona,
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 500,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 500,
|
||||
"temperature": 1, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/daily_planning_v6.txt"
|
||||
@ -265,7 +265,7 @@ def run_gpt_prompt_generate_hourly_schedule(persona,
|
||||
# # ChatGPT Plugin ===========================================================
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0.5, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
|
||||
prompt_template = "persona/prompt_template/v2/generate_hourly_schedule_v2.txt"
|
||||
@ -426,7 +426,7 @@ def run_gpt_prompt_task_decomp(persona,
|
||||
fs = ["asleep"]
|
||||
return fs
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 1000,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/task_decomp_v3.txt"
|
||||
@ -602,7 +602,7 @@ def run_gpt_prompt_action_sector(action_description,
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v1/action_location_sector_v1.txt"
|
||||
@ -699,7 +699,7 @@ def run_gpt_prompt_action_arena(action_description,
|
||||
fs = ("kitchen")
|
||||
return fs
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v1/action_location_object_vMar11.txt"
|
||||
@ -755,7 +755,7 @@ def run_gpt_prompt_action_game_object(action_description,
|
||||
fs = ("bed")
|
||||
return fs
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v1/action_object_v2.txt"
|
||||
@ -825,7 +825,7 @@ def run_gpt_prompt_pronunciatio(action_description, persona, verbose=False):
|
||||
return True
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 4") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/generate_pronunciatio_v1.txt" ########
|
||||
@ -844,7 +844,7 @@ def run_gpt_prompt_pronunciatio(action_description, persona, verbose=False):
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 15,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
# "temperature": 0, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
|
||||
# prompt_template = "persona/prompt_template/v2/generate_pronunciatio_v1.txt"
|
||||
@ -933,7 +933,7 @@ def run_gpt_prompt_event_triple(action_description, persona, verbose=False):
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 30,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
|
||||
prompt_template = "persona/prompt_template/v2/generate_event_triple_v1.txt"
|
||||
@ -1001,7 +1001,7 @@ def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=Fals
|
||||
return True
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 6") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/generate_obj_event_v1.txt" ########
|
||||
@ -1018,7 +1018,7 @@ def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=Fals
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 30,
|
||||
# "temperature": 0, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
|
||||
# prompt_template = "persona/prompt_template/v2/generate_obj_event_v1.txt"
|
||||
@ -1066,7 +1066,7 @@ def run_gpt_prompt_act_obj_event_triple(act_game_object, act_obj_desc, persona,
|
||||
fs = (act_game_object, "is", "idle")
|
||||
return fs
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 30,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 30,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]}
|
||||
prompt_template = "persona/prompt_template/v2/generate_event_triple_v1.txt"
|
||||
@ -1206,7 +1206,7 @@ def run_gpt_prompt_new_decomp_schedule(persona,
|
||||
|
||||
return ret
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 1000,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/new_decomp_schedule_v1.txt"
|
||||
@ -1320,7 +1320,7 @@ def run_gpt_prompt_decide_to_talk(persona, target_persona, retrieved,test_input=
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 20,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 20,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/decide_to_talk_v2.txt"
|
||||
@ -1418,7 +1418,7 @@ def run_gpt_prompt_decide_to_react(persona, target_persona, retrieved,test_input
|
||||
return fs
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 20,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 20,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/decide_to_react_v1.txt"
|
||||
@ -1561,7 +1561,7 @@ def run_gpt_prompt_create_conversation(persona, target_persona, curr_loc,
|
||||
return convo
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 1000,
|
||||
"temperature": 0.7, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/create_conversation_v2.txt"
|
||||
@ -1626,7 +1626,7 @@ def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None
|
||||
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 11") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_conversation_v1.txt" ########
|
||||
@ -1642,7 +1642,7 @@ def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None
|
||||
# ChatGPT Plugin ===========================================================
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
# "temperature": 0, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/summarize_conversation_v1.txt"
|
||||
@ -1696,7 +1696,7 @@ def run_gpt_prompt_extract_keywords(persona, description, test_input=None, verbo
|
||||
def get_fail_safe():
|
||||
return []
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/get_keywords_v1.txt"
|
||||
@ -1741,7 +1741,7 @@ def run_gpt_prompt_keyword_to_thoughts(persona, keyword, concept_summary, test_i
|
||||
def get_fail_safe():
|
||||
return ""
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 40,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 40,
|
||||
"temperature": 0.7, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/keyword_to_thoughts_v1.txt"
|
||||
@ -1796,7 +1796,7 @@ def run_gpt_prompt_convo_to_thoughts(persona,
|
||||
def get_fail_safe():
|
||||
return ""
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 40,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 40,
|
||||
"temperature": 0.7, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/convo_to_thoughts_v1.txt"
|
||||
@ -1879,7 +1879,7 @@ def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None,
|
||||
return False
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 7") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/poignancy_event_v1.txt" ########
|
||||
@ -1897,7 +1897,7 @@ def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None,
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 3,
|
||||
# "temperature": 0, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/poignancy_event_v1.txt"
|
||||
@ -1950,7 +1950,7 @@ def run_gpt_prompt_thought_poignancy(persona, event_description, test_input=None
|
||||
return False
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 8") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/poignancy_thought_v1.txt" ########
|
||||
@ -1967,7 +1967,7 @@ def run_gpt_prompt_thought_poignancy(persona, event_description, test_input=None
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 3,
|
||||
# "temperature": 0, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/poignancy_thought_v1.txt"
|
||||
@ -2022,7 +2022,7 @@ def run_gpt_prompt_chat_poignancy(persona, event_description, test_input=None, v
|
||||
return False
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 9") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/poignancy_chat_v1.txt" ########
|
||||
@ -2040,7 +2040,7 @@ def run_gpt_prompt_chat_poignancy(persona, event_description, test_input=None, v
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 3,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 3,
|
||||
# "temperature": 0, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/poignancy_chat_v1.txt"
|
||||
@ -2098,7 +2098,7 @@ def run_gpt_prompt_focal_pt(persona, statements, n, test_input=None, verbose=Fal
|
||||
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 12") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/generate_focal_pt_v1.txt" ########
|
||||
@ -2118,7 +2118,7 @@ def run_gpt_prompt_focal_pt(persona, statements, n, test_input=None, verbose=Fal
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 150,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/generate_focal_pt_v1.txt"
|
||||
@ -2169,7 +2169,7 @@ def run_gpt_prompt_insight_and_guidance(persona, statements, n, test_input=None,
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 150,
|
||||
"temperature": 0.5, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/insight_and_evidence_v1.txt"
|
||||
@ -2225,7 +2225,7 @@ def run_gpt_prompt_agent_chat_summarize_ideas(persona, target_persona, statement
|
||||
return False
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 17") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_chat_ideas_v1.txt" ########
|
||||
@ -2242,7 +2242,7 @@ def run_gpt_prompt_agent_chat_summarize_ideas(persona, target_persona, statement
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 150,
|
||||
# "temperature": 0.5, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/summarize_chat_ideas_v1.txt"
|
||||
@ -2293,7 +2293,7 @@ def run_gpt_prompt_agent_chat_summarize_relationship(persona, target_persona, st
|
||||
return False
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 18") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_chat_relationship_v2.txt" ########
|
||||
@ -2309,7 +2309,7 @@ def run_gpt_prompt_agent_chat_summarize_relationship(persona, target_persona, st
|
||||
# ChatGPT Plugin ===========================================================
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 150,
|
||||
# "temperature": 0.5, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/summarize_chat_relationship_v1.txt"
|
||||
@ -2421,7 +2421,7 @@ def run_gpt_prompt_agent_chat(maze, persona, target_persona,
|
||||
|
||||
|
||||
# print ("HERE JULY 23 -- ----- ") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/agent_chat_v1.txt" ########
|
||||
@ -2442,7 +2442,7 @@ def run_gpt_prompt_agent_chat(maze, persona, target_persona,
|
||||
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 2000,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 2000,
|
||||
# "temperature": 0.7, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/agent_chat_v1.txt"
|
||||
@ -2502,7 +2502,7 @@ def run_gpt_prompt_summarize_ideas(persona, statements, question, test_input=Non
|
||||
return False
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 16") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/summarize_ideas_v1.txt" ########
|
||||
@ -2518,7 +2518,7 @@ def run_gpt_prompt_summarize_ideas(persona, statements, question, test_input=Non
|
||||
# ChatGPT Plugin ===========================================================
|
||||
|
||||
|
||||
# gpt_param = {"engine": "text-davinci-003", "max_tokens": 150,
|
||||
# gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 150,
|
||||
# "temperature": 0.5, "top_p": 1, "stream": False,
|
||||
# "frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
# prompt_template = "persona/prompt_template/v2/summarize_ideas_v1.txt"
|
||||
@ -2593,7 +2593,7 @@ def run_gpt_prompt_generate_next_convo_line(persona, interlocutor_desc, prev_con
|
||||
|
||||
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 250,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 250,
|
||||
"temperature": 1, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/generate_next_convo_line_v1.txt"
|
||||
@ -2633,7 +2633,7 @@ def run_gpt_prompt_generate_whisper_inner_thought(persona, whisper, test_input=N
|
||||
def get_fail_safe():
|
||||
return "..."
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/whisper_inner_thought_v1.txt"
|
||||
@ -2670,7 +2670,7 @@ def run_gpt_prompt_planning_thought_on_convo(persona, all_utt, test_input=None,
|
||||
def get_fail_safe():
|
||||
return "..."
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/planning_thought_on_convo_v1.txt"
|
||||
@ -2721,7 +2721,7 @@ def run_gpt_prompt_memo_on_convo(persona, all_utt, test_input=None, verbose=Fals
|
||||
|
||||
|
||||
print ("asdhfapsh8p9hfaiafdsi;ldfj as DEBUG 15") ########
|
||||
gpt_param = {"engine": "text-davinci-002", "max_tokens": 15,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 15,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v3_ChatGPT/memo_on_convo_v1.txt" ########
|
||||
@ -2736,7 +2736,7 @@ def run_gpt_prompt_memo_on_convo(persona, all_utt, test_input=None, verbose=Fals
|
||||
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
|
||||
# ChatGPT Plugin ===========================================================
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
prompt_template = "persona/prompt_template/v2/memo_on_convo_v1.txt"
|
||||
@ -2790,7 +2790,7 @@ def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=Fal
|
||||
__chat_func_validate, __chat_func_clean_up, verbose)
|
||||
print (output)
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
|
||||
@ -2905,7 +2905,7 @@ def run_gpt_generate_iterative_chat_utt(maze, init_persona, target_persona, retr
|
||||
__chat_func_validate, __chat_func_clean_up, verbose)
|
||||
print (output)
|
||||
|
||||
gpt_param = {"engine": "text-davinci-003", "max_tokens": 50,
|
||||
gpt_param = {"engine": "gpt-3.5-turbo-instruct", "max_tokens": 50,
|
||||
"temperature": 0, "top_p": 1, "stream": False,
|
||||
"frequency_penalty": 0, "presence_penalty": 0, "stop": None}
|
||||
return output, [output, prompt, gpt_param, prompt_input, fail_safe]
|
||||
|
||||
@ -9,10 +9,15 @@ import random
|
||||
import openai
|
||||
import time
|
||||
|
||||
from gpt4all import GPT4All
|
||||
from utils import *
|
||||
openai.api_key = openai_api_key
|
||||
|
||||
def ChatGPT_request(prompt):
|
||||
if use_openai:
|
||||
openai.api_key = openai_api_key
|
||||
else:
|
||||
model = GPT4All("mistral-7b-instruct-v0.1.Q4_0.gguf", device="gpu", n_threads=4)
|
||||
|
||||
def GPT_request(prompt):
|
||||
"""
|
||||
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
||||
server and returns the response.
|
||||
@ -25,16 +30,20 @@ def ChatGPT_request(prompt):
|
||||
a str of GPT-3's response.
|
||||
"""
|
||||
# temp_sleep()
|
||||
try:
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
return completion["choices"][0]["message"]["content"]
|
||||
if use_openai:
|
||||
try:
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
return completion["choices"][0]["message"]["content"]
|
||||
|
||||
except:
|
||||
print ("ChatGPT ERROR")
|
||||
return "ChatGPT ERROR"
|
||||
except:
|
||||
print ("ChatGPT ERROR")
|
||||
return "ChatGPT ERROR"
|
||||
else:
|
||||
response = model.generate(prompt)
|
||||
return response
|
||||
|
||||
prompt = """
|
||||
---
|
||||
@ -61,7 +70,7 @@ Example output json:
|
||||
{"output": "[["Jane Doe", "Hi!"], ["John Doe", "Hello there!"] ... ]"}
|
||||
"""
|
||||
|
||||
print (ChatGPT_request(prompt))
|
||||
print (GPT_request(prompt))
|
||||
|
||||
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user