Added GPT4All Mistral
This commit is contained in:
parent
716567c50b
commit
56ad3a50a1
BIN
reverie/backend_server/output2.txt
Normal file
BIN
reverie/backend_server/output2.txt
Normal file
Binary file not shown.
@ -423,13 +423,13 @@ def revise_identity(persona):
|
|||||||
plan_prompt += f" *{persona.scratch.curr_time.strftime('%A %B %d')}*? "
|
plan_prompt += f" *{persona.scratch.curr_time.strftime('%A %B %d')}*? "
|
||||||
plan_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n"
|
plan_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n"
|
||||||
plan_prompt += f"Write the response from {p_name}'s perspective."
|
plan_prompt += f"Write the response from {p_name}'s perspective."
|
||||||
plan_note = ChatGPT_single_request(plan_prompt)
|
plan_note = GPT_single_request(plan_prompt)
|
||||||
# print (plan_note)
|
# print (plan_note)
|
||||||
|
|
||||||
thought_prompt = statements + "\n"
|
thought_prompt = statements + "\n"
|
||||||
thought_prompt += f"Given the statements above, how might we summarize {p_name}'s feelings about their days up to now?\n\n"
|
thought_prompt += f"Given the statements above, how might we summarize {p_name}'s feelings about their days up to now?\n\n"
|
||||||
thought_prompt += f"Write the response from {p_name}'s perspective."
|
thought_prompt += f"Write the response from {p_name}'s perspective."
|
||||||
thought_note = ChatGPT_single_request(thought_prompt)
|
thought_note = GPT_single_request(thought_prompt)
|
||||||
# print (thought_note)
|
# print (thought_note)
|
||||||
|
|
||||||
currently_prompt = f"{p_name}'s status from {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
|
currently_prompt = f"{p_name}'s status from {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
|
||||||
@ -441,7 +441,7 @@ def revise_identity(persona):
|
|||||||
currently_prompt += "Follow this format below:\nStatus: <new status>"
|
currently_prompt += "Follow this format below:\nStatus: <new status>"
|
||||||
# print ("DEBUG ;adjhfno;asdjao;asdfsidfjo;af", p_name)
|
# print ("DEBUG ;adjhfno;asdjao;asdfsidfjo;af", p_name)
|
||||||
# print (currently_prompt)
|
# print (currently_prompt)
|
||||||
new_currently = ChatGPT_single_request(currently_prompt)
|
new_currently = GPT_single_request(currently_prompt)
|
||||||
# print (new_currently)
|
# print (new_currently)
|
||||||
# print (new_currently[10:])
|
# print (new_currently[10:])
|
||||||
|
|
||||||
@ -452,7 +452,7 @@ def revise_identity(persona):
|
|||||||
daily_req_prompt += f"Follow this format (the list should have 4~6 items but no more):\n"
|
daily_req_prompt += f"Follow this format (the list should have 4~6 items but no more):\n"
|
||||||
daily_req_prompt += f"1. wake up and complete the morning routine at <time>, 2. ..."
|
daily_req_prompt += f"1. wake up and complete the morning routine at <time>, 2. ..."
|
||||||
|
|
||||||
new_daily_req = ChatGPT_single_request(daily_req_prompt)
|
new_daily_req = GPT_single_request(daily_req_prompt)
|
||||||
new_daily_req = new_daily_req.replace('\n', ' ')
|
new_daily_req = new_daily_req.replace('\n', ' ')
|
||||||
print ("WE ARE HERE!!!", new_daily_req)
|
print ("WE ARE HERE!!!", new_daily_req)
|
||||||
persona.scratch.daily_plan_req = new_daily_req
|
persona.scratch.daily_plan_req = new_daily_req
|
||||||
|
|||||||
@ -1,123 +1,134 @@
|
|||||||
"""
|
"""
|
||||||
Author: Joon Sung Park (joonspk@stanford.edu)
|
Author: Joon Sung Park (joonspk@stanford.edu)
|
||||||
|
Editor: Andreas Fruhwirt (afruhwirt@student.tugraz.at)
|
||||||
|
|
||||||
File: gpt_structure.py
|
File: gpt_structure.py
|
||||||
Description: Wrapper functions for calling OpenAI APIs.
|
Description: Wrapper functions for executing GPT requests
|
||||||
"""
|
"""
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
import openai
|
import openai
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from gpt4all import GPT4All, Embed4All
|
||||||
from utils import *
|
from utils import *
|
||||||
|
|
||||||
|
if use_openai:
|
||||||
openai.api_key = openai_api_key
|
openai.api_key = openai_api_key
|
||||||
|
else:
|
||||||
|
model = GPT4All("mistral-7b-instruct-v0.1.Q4_0.gguf", device="gpu", n_threads=4)
|
||||||
|
embedder = Embed4All()
|
||||||
|
|
||||||
def temp_sleep(seconds=0.1):
|
def temp_sleep(seconds=0.1):
|
||||||
time.sleep(seconds)
|
time.sleep(seconds)
|
||||||
|
|
||||||
def ChatGPT_single_request(prompt):
|
def GPT_single_request(prompt):
|
||||||
temp_sleep()
|
temp_sleep()
|
||||||
|
|
||||||
|
if use_openai:
|
||||||
completion = openai.ChatCompletion.create(
|
completion = openai.ChatCompletion.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
messages=[{"role": "user", "content": prompt}]
|
messages=[{"role": "user", "content": prompt}]
|
||||||
)
|
)
|
||||||
return completion["choices"][0]["message"]["content"]
|
return completion["choices"][0]["message"]["content"]
|
||||||
|
else:
|
||||||
|
with model.chat_session():
|
||||||
|
response = model.generate(prompt)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# #####################[SECTION 1: CHATGPT-3 STRUCTURE] ######################
|
# #####################[SECTION 1: CHATGPT-3 STRUCTURE] ######################
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
def GPT4_request(prompt):
|
#def GPT4_request(prompt):
|
||||||
|
# """
|
||||||
|
# Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
||||||
|
# server and returns the response.
|
||||||
|
# ARGS:
|
||||||
|
# prompt: a str prompt
|
||||||
|
# gpt_parameter: a python dictionary with the keys indicating the names of
|
||||||
|
# the parameter and the values indicating the parameter
|
||||||
|
# values.
|
||||||
|
# RETURNS:
|
||||||
|
# a str of GPT-3's response.
|
||||||
|
# """
|
||||||
|
# temp_sleep()
|
||||||
|
#
|
||||||
|
# try:
|
||||||
|
# completion = openai.ChatCompletion.create(
|
||||||
|
# model="gpt-4",
|
||||||
|
# messages=[{"role": "user", "content": prompt}]
|
||||||
|
# )
|
||||||
|
# return completion["choices"][0]["message"]["content"]
|
||||||
|
#
|
||||||
|
# except:
|
||||||
|
# print ("ChatGPT ERROR")
|
||||||
|
# return "ChatGPT ERROR"
|
||||||
|
|
||||||
|
|
||||||
|
def GPT_request(prompt):
|
||||||
"""
|
"""
|
||||||
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
Given a prompt and a dictionary of GPT parameters, fetch the response of the LLM
|
||||||
server and returns the response.
|
|
||||||
ARGS:
|
ARGS:
|
||||||
prompt: a str prompt
|
prompt: a str prompt
|
||||||
gpt_parameter: a python dictionary with the keys indicating the names of
|
|
||||||
the parameter and the values indicating the parameter
|
|
||||||
values.
|
|
||||||
RETURNS:
|
RETURNS:
|
||||||
a str of GPT-3's response.
|
a str of the LLM's response.
|
||||||
"""
|
|
||||||
temp_sleep()
|
|
||||||
|
|
||||||
try:
|
|
||||||
completion = openai.ChatCompletion.create(
|
|
||||||
model="gpt-4",
|
|
||||||
messages=[{"role": "user", "content": prompt}]
|
|
||||||
)
|
|
||||||
return completion["choices"][0]["message"]["content"]
|
|
||||||
|
|
||||||
except:
|
|
||||||
print ("ChatGPT ERROR")
|
|
||||||
return "ChatGPT ERROR"
|
|
||||||
|
|
||||||
|
|
||||||
def ChatGPT_request(prompt):
|
|
||||||
"""
|
|
||||||
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
|
||||||
server and returns the response.
|
|
||||||
ARGS:
|
|
||||||
prompt: a str prompt
|
|
||||||
gpt_parameter: a python dictionary with the keys indicating the names of
|
|
||||||
the parameter and the values indicating the parameter
|
|
||||||
values.
|
|
||||||
RETURNS:
|
|
||||||
a str of GPT-3's response.
|
|
||||||
"""
|
"""
|
||||||
# temp_sleep()
|
# temp_sleep()
|
||||||
try:
|
try:
|
||||||
|
if use_openai:
|
||||||
completion = openai.ChatCompletion.create(
|
completion = openai.ChatCompletion.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
messages=[{"role": "user", "content": prompt}]
|
messages=[{"role": "user", "content": prompt}]
|
||||||
)
|
)
|
||||||
return completion["choices"][0]["message"]["content"]
|
return completion["choices"][0]["message"]["content"]
|
||||||
|
else:
|
||||||
|
with model.chat_session():
|
||||||
|
response = model.generate(prompt)
|
||||||
|
return response
|
||||||
except:
|
except:
|
||||||
print ("ChatGPT ERROR")
|
print ("GPT ERROR")
|
||||||
return "ChatGPT ERROR"
|
return "GPT ERROR"
|
||||||
|
|
||||||
|
|
||||||
def GPT4_safe_generate_response(prompt,
|
#def GPT4_safe_generate_response(prompt,
|
||||||
example_output,
|
# example_output,
|
||||||
special_instruction,
|
# special_instruction,
|
||||||
repeat=3,
|
# repeat=3,
|
||||||
fail_safe_response="error",
|
# fail_safe_response="error",
|
||||||
func_validate=None,
|
# func_validate=None,
|
||||||
func_clean_up=None,
|
# func_clean_up=None,
|
||||||
verbose=False):
|
# verbose=False):
|
||||||
prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
||||||
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
# prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
||||||
prompt += "Example output json:\n"
|
# prompt += "Example output json:\n"
|
||||||
prompt += '{"output": "' + str(example_output) + '"}'
|
# prompt += '{"output": "' + str(example_output) + '"}'
|
||||||
|
#
|
||||||
if verbose:
|
# if verbose:
|
||||||
print ("CHAT GPT PROMPT")
|
# print ("CHAT GPT PROMPT")
|
||||||
print (prompt)
|
# print (prompt)
|
||||||
|
#
|
||||||
for i in range(repeat):
|
# for i in range(repeat):
|
||||||
|
#
|
||||||
try:
|
# try:
|
||||||
curr_gpt_response = GPT4_request(prompt).strip()
|
# curr_gpt_response = GPT4_request(prompt).strip()
|
||||||
end_index = curr_gpt_response.rfind('}') + 1
|
# end_index = curr_gpt_response.rfind('}') + 1
|
||||||
curr_gpt_response = curr_gpt_response[:end_index]
|
# curr_gpt_response = curr_gpt_response[:end_index]
|
||||||
curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
# curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
||||||
|
#
|
||||||
if func_validate(curr_gpt_response, prompt=prompt):
|
# if func_validate(curr_gpt_response, prompt=prompt):
|
||||||
return func_clean_up(curr_gpt_response, prompt=prompt)
|
# return func_clean_up(curr_gpt_response, prompt=prompt)
|
||||||
|
#
|
||||||
if verbose:
|
# if verbose:
|
||||||
print ("---- repeat count: \n", i, curr_gpt_response)
|
# print ("---- repeat count: \n", i, curr_gpt_response)
|
||||||
print (curr_gpt_response)
|
# print (curr_gpt_response)
|
||||||
print ("~~~~")
|
# print ("~~~~")
|
||||||
|
#
|
||||||
except:
|
# except:
|
||||||
pass
|
# pass
|
||||||
|
#
|
||||||
return False
|
# return False
|
||||||
|
|
||||||
|
|
||||||
def ChatGPT_safe_generate_response(prompt,
|
def ChatGPT_safe_generate_response(prompt,
|
||||||
@ -127,7 +138,7 @@ def ChatGPT_safe_generate_response(prompt,
|
|||||||
fail_safe_response="error",
|
fail_safe_response="error",
|
||||||
func_validate=None,
|
func_validate=None,
|
||||||
func_clean_up=None,
|
func_clean_up=None,
|
||||||
verbose=False):
|
verbose=True):
|
||||||
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
||||||
prompt = '"""\n' + prompt + '\n"""\n'
|
prompt = '"""\n' + prompt + '\n"""\n'
|
||||||
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
||||||
@ -141,7 +152,7 @@ def ChatGPT_safe_generate_response(prompt,
|
|||||||
for i in range(repeat):
|
for i in range(repeat):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
curr_gpt_response = ChatGPT_request(prompt).strip()
|
curr_gpt_response = GPT_request(prompt).strip()
|
||||||
end_index = curr_gpt_response.rfind('}') + 1
|
end_index = curr_gpt_response.rfind('}') + 1
|
||||||
curr_gpt_response = curr_gpt_response[:end_index]
|
curr_gpt_response = curr_gpt_response[:end_index]
|
||||||
curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
||||||
@ -169,14 +180,14 @@ def ChatGPT_safe_generate_response_OLD(prompt,
|
|||||||
fail_safe_response="error",
|
fail_safe_response="error",
|
||||||
func_validate=None,
|
func_validate=None,
|
||||||
func_clean_up=None,
|
func_clean_up=None,
|
||||||
verbose=False):
|
verbose=True):
|
||||||
if verbose:
|
if verbose:
|
||||||
print ("CHAT GPT PROMPT")
|
print ("CHAT GPT PROMPT")
|
||||||
print (prompt)
|
print (prompt)
|
||||||
|
|
||||||
for i in range(repeat):
|
for i in range(repeat):
|
||||||
try:
|
try:
|
||||||
curr_gpt_response = ChatGPT_request(prompt).strip()
|
curr_gpt_response = GPT_request(prompt).strip()
|
||||||
if func_validate(curr_gpt_response, prompt=prompt):
|
if func_validate(curr_gpt_response, prompt=prompt):
|
||||||
return func_clean_up(curr_gpt_response, prompt=prompt)
|
return func_clean_up(curr_gpt_response, prompt=prompt)
|
||||||
if verbose:
|
if verbose:
|
||||||
@ -207,7 +218,7 @@ def GPT_request(prompt, gpt_parameter):
|
|||||||
a str of GPT-3's response.
|
a str of GPT-3's response.
|
||||||
"""
|
"""
|
||||||
temp_sleep()
|
temp_sleep()
|
||||||
try:
|
if use_openai:
|
||||||
response = openai.Completion.create(
|
response = openai.Completion.create(
|
||||||
model=gpt_parameter["engine"],
|
model=gpt_parameter["engine"],
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
@ -219,10 +230,17 @@ def GPT_request(prompt, gpt_parameter):
|
|||||||
stream=gpt_parameter["stream"],
|
stream=gpt_parameter["stream"],
|
||||||
stop=gpt_parameter["stop"],)
|
stop=gpt_parameter["stop"],)
|
||||||
return response.choices[0].text
|
return response.choices[0].text
|
||||||
except:
|
else:
|
||||||
print ("TOKEN LIMIT EXCEEDED")
|
def callback(token_id, response):
|
||||||
return "TOKEN LIMIT EXCEEDED"
|
if gpt_parameter["stop"] is None:
|
||||||
|
return True
|
||||||
|
for param in gpt_parameter["stop"]:
|
||||||
|
if param in response:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
response = model.generate(prompt, max_tokens=gpt_parameter["max_tokens"], temp=gpt_parameter["temperature"], top_p=gpt_parameter["top_p"],
|
||||||
|
streaming=gpt_parameter["stream"], callback=callback)
|
||||||
|
return response
|
||||||
|
|
||||||
def generate_prompt(curr_input, prompt_lib_file):
|
def generate_prompt(curr_input, prompt_lib_file):
|
||||||
"""
|
"""
|
||||||
@ -258,7 +276,7 @@ def safe_generate_response(prompt,
|
|||||||
fail_safe_response="error",
|
fail_safe_response="error",
|
||||||
func_validate=None,
|
func_validate=None,
|
||||||
func_clean_up=None,
|
func_clean_up=None,
|
||||||
verbose=False):
|
verbose=True):
|
||||||
if verbose:
|
if verbose:
|
||||||
print (prompt)
|
print (prompt)
|
||||||
|
|
||||||
@ -277,8 +295,11 @@ def get_embedding(text, model="text-embedding-ada-002"):
|
|||||||
text = text.replace("\n", " ")
|
text = text.replace("\n", " ")
|
||||||
if not text:
|
if not text:
|
||||||
text = "this is blank"
|
text = "this is blank"
|
||||||
|
if use_openai:
|
||||||
return openai.Embedding.create(
|
return openai.Embedding.create(
|
||||||
input=[text], model=model)['data'][0]['embedding']
|
input=[text], model=model)['data'][0]['embedding']
|
||||||
|
else:
|
||||||
|
return embedder.embed(text)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@ -42,7 +42,16 @@ def GPT_request(prompt):
|
|||||||
print ("ChatGPT ERROR")
|
print ("ChatGPT ERROR")
|
||||||
return "ChatGPT ERROR"
|
return "ChatGPT ERROR"
|
||||||
else:
|
else:
|
||||||
response = model.generate(prompt)
|
with model.chat_session():
|
||||||
|
def callback(token_id, response):
|
||||||
|
print(response)
|
||||||
|
if "}" in response:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
response = model.generate(prompt, callback=callback)
|
||||||
|
idx = response.find("}")
|
||||||
|
if idx != -1:
|
||||||
|
response = response[:idx+1]
|
||||||
return response
|
return response
|
||||||
|
|
||||||
prompt = """
|
prompt = """
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user