Added GPT4All Mistral
This commit is contained in:
parent
716567c50b
commit
56ad3a50a1
BIN
reverie/backend_server/output2.txt
Normal file
BIN
reverie/backend_server/output2.txt
Normal file
Binary file not shown.
@ -423,13 +423,13 @@ def revise_identity(persona):
|
||||
plan_prompt += f" *{persona.scratch.curr_time.strftime('%A %B %d')}*? "
|
||||
plan_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n"
|
||||
plan_prompt += f"Write the response from {p_name}'s perspective."
|
||||
plan_note = ChatGPT_single_request(plan_prompt)
|
||||
plan_note = GPT_single_request(plan_prompt)
|
||||
# print (plan_note)
|
||||
|
||||
thought_prompt = statements + "\n"
|
||||
thought_prompt += f"Given the statements above, how might we summarize {p_name}'s feelings about their days up to now?\n\n"
|
||||
thought_prompt += f"Write the response from {p_name}'s perspective."
|
||||
thought_note = ChatGPT_single_request(thought_prompt)
|
||||
thought_note = GPT_single_request(thought_prompt)
|
||||
# print (thought_note)
|
||||
|
||||
currently_prompt = f"{p_name}'s status from {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
|
||||
@ -441,7 +441,7 @@ def revise_identity(persona):
|
||||
currently_prompt += "Follow this format below:\nStatus: <new status>"
|
||||
# print ("DEBUG ;adjhfno;asdjao;asdfsidfjo;af", p_name)
|
||||
# print (currently_prompt)
|
||||
new_currently = ChatGPT_single_request(currently_prompt)
|
||||
new_currently = GPT_single_request(currently_prompt)
|
||||
# print (new_currently)
|
||||
# print (new_currently[10:])
|
||||
|
||||
@ -452,7 +452,7 @@ def revise_identity(persona):
|
||||
daily_req_prompt += f"Follow this format (the list should have 4~6 items but no more):\n"
|
||||
daily_req_prompt += f"1. wake up and complete the morning routine at <time>, 2. ..."
|
||||
|
||||
new_daily_req = ChatGPT_single_request(daily_req_prompt)
|
||||
new_daily_req = GPT_single_request(daily_req_prompt)
|
||||
new_daily_req = new_daily_req.replace('\n', ' ')
|
||||
print ("WE ARE HERE!!!", new_daily_req)
|
||||
persona.scratch.daily_plan_req = new_daily_req
|
||||
|
||||
@ -1,123 +1,134 @@
|
||||
"""
|
||||
Author: Joon Sung Park (joonspk@stanford.edu)
|
||||
Editor: Andreas Fruhwirt (afruhwirt@student.tugraz.at)
|
||||
|
||||
File: gpt_structure.py
|
||||
Description: Wrapper functions for calling OpenAI APIs.
|
||||
Description: Wrapper functions for executing GPT requests
|
||||
"""
|
||||
import json
|
||||
import random
|
||||
import openai
|
||||
import time
|
||||
|
||||
from gpt4all import GPT4All, Embed4All
|
||||
from utils import *
|
||||
|
||||
openai.api_key = openai_api_key
|
||||
if use_openai:
|
||||
openai.api_key = openai_api_key
|
||||
else:
|
||||
model = GPT4All("mistral-7b-instruct-v0.1.Q4_0.gguf", device="gpu", n_threads=4)
|
||||
embedder = Embed4All()
|
||||
|
||||
def temp_sleep(seconds=0.1):
|
||||
time.sleep(seconds)
|
||||
|
||||
def ChatGPT_single_request(prompt):
|
||||
def GPT_single_request(prompt):
|
||||
temp_sleep()
|
||||
|
||||
if use_openai:
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
return completion["choices"][0]["message"]["content"]
|
||||
else:
|
||||
with model.chat_session():
|
||||
response = model.generate(prompt)
|
||||
return response
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# #####################[SECTION 1: CHATGPT-3 STRUCTURE] ######################
|
||||
# ============================================================================
|
||||
|
||||
def GPT4_request(prompt):
|
||||
#def GPT4_request(prompt):
|
||||
# """
|
||||
# Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
||||
# server and returns the response.
|
||||
# ARGS:
|
||||
# prompt: a str prompt
|
||||
# gpt_parameter: a python dictionary with the keys indicating the names of
|
||||
# the parameter and the values indicating the parameter
|
||||
# values.
|
||||
# RETURNS:
|
||||
# a str of GPT-3's response.
|
||||
# """
|
||||
# temp_sleep()
|
||||
#
|
||||
# try:
|
||||
# completion = openai.ChatCompletion.create(
|
||||
# model="gpt-4",
|
||||
# messages=[{"role": "user", "content": prompt}]
|
||||
# )
|
||||
# return completion["choices"][0]["message"]["content"]
|
||||
#
|
||||
# except:
|
||||
# print ("ChatGPT ERROR")
|
||||
# return "ChatGPT ERROR"
|
||||
|
||||
|
||||
def GPT_request(prompt):
|
||||
"""
|
||||
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
||||
server and returns the response.
|
||||
Given a prompt and a dictionary of GPT parameters, fetch the response of the LLM
|
||||
ARGS:
|
||||
prompt: a str prompt
|
||||
gpt_parameter: a python dictionary with the keys indicating the names of
|
||||
the parameter and the values indicating the parameter
|
||||
values.
|
||||
RETURNS:
|
||||
a str of GPT-3's response.
|
||||
"""
|
||||
temp_sleep()
|
||||
|
||||
try:
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-4",
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
return completion["choices"][0]["message"]["content"]
|
||||
|
||||
except:
|
||||
print ("ChatGPT ERROR")
|
||||
return "ChatGPT ERROR"
|
||||
|
||||
|
||||
def ChatGPT_request(prompt):
|
||||
"""
|
||||
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
|
||||
server and returns the response.
|
||||
ARGS:
|
||||
prompt: a str prompt
|
||||
gpt_parameter: a python dictionary with the keys indicating the names of
|
||||
the parameter and the values indicating the parameter
|
||||
values.
|
||||
RETURNS:
|
||||
a str of GPT-3's response.
|
||||
a str of the LLM's response.
|
||||
"""
|
||||
# temp_sleep()
|
||||
try:
|
||||
if use_openai:
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
return completion["choices"][0]["message"]["content"]
|
||||
|
||||
else:
|
||||
with model.chat_session():
|
||||
response = model.generate(prompt)
|
||||
return response
|
||||
except:
|
||||
print ("ChatGPT ERROR")
|
||||
return "ChatGPT ERROR"
|
||||
print ("GPT ERROR")
|
||||
return "GPT ERROR"
|
||||
|
||||
|
||||
def GPT4_safe_generate_response(prompt,
|
||||
example_output,
|
||||
special_instruction,
|
||||
repeat=3,
|
||||
fail_safe_response="error",
|
||||
func_validate=None,
|
||||
func_clean_up=None,
|
||||
verbose=False):
|
||||
prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
||||
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
||||
prompt += "Example output json:\n"
|
||||
prompt += '{"output": "' + str(example_output) + '"}'
|
||||
|
||||
if verbose:
|
||||
print ("CHAT GPT PROMPT")
|
||||
print (prompt)
|
||||
|
||||
for i in range(repeat):
|
||||
|
||||
try:
|
||||
curr_gpt_response = GPT4_request(prompt).strip()
|
||||
end_index = curr_gpt_response.rfind('}') + 1
|
||||
curr_gpt_response = curr_gpt_response[:end_index]
|
||||
curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
||||
|
||||
if func_validate(curr_gpt_response, prompt=prompt):
|
||||
return func_clean_up(curr_gpt_response, prompt=prompt)
|
||||
|
||||
if verbose:
|
||||
print ("---- repeat count: \n", i, curr_gpt_response)
|
||||
print (curr_gpt_response)
|
||||
print ("~~~~")
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
return False
|
||||
#def GPT4_safe_generate_response(prompt,
|
||||
# example_output,
|
||||
# special_instruction,
|
||||
# repeat=3,
|
||||
# fail_safe_response="error",
|
||||
# func_validate=None,
|
||||
# func_clean_up=None,
|
||||
# verbose=False):
|
||||
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
||||
# prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
||||
# prompt += "Example output json:\n"
|
||||
# prompt += '{"output": "' + str(example_output) + '"}'
|
||||
#
|
||||
# if verbose:
|
||||
# print ("CHAT GPT PROMPT")
|
||||
# print (prompt)
|
||||
#
|
||||
# for i in range(repeat):
|
||||
#
|
||||
# try:
|
||||
# curr_gpt_response = GPT4_request(prompt).strip()
|
||||
# end_index = curr_gpt_response.rfind('}') + 1
|
||||
# curr_gpt_response = curr_gpt_response[:end_index]
|
||||
# curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
||||
#
|
||||
# if func_validate(curr_gpt_response, prompt=prompt):
|
||||
# return func_clean_up(curr_gpt_response, prompt=prompt)
|
||||
#
|
||||
# if verbose:
|
||||
# print ("---- repeat count: \n", i, curr_gpt_response)
|
||||
# print (curr_gpt_response)
|
||||
# print ("~~~~")
|
||||
#
|
||||
# except:
|
||||
# pass
|
||||
#
|
||||
# return False
|
||||
|
||||
|
||||
def ChatGPT_safe_generate_response(prompt,
|
||||
@ -127,7 +138,7 @@ def ChatGPT_safe_generate_response(prompt,
|
||||
fail_safe_response="error",
|
||||
func_validate=None,
|
||||
func_clean_up=None,
|
||||
verbose=False):
|
||||
verbose=True):
|
||||
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
|
||||
prompt = '"""\n' + prompt + '\n"""\n'
|
||||
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
||||
@ -141,7 +152,7 @@ def ChatGPT_safe_generate_response(prompt,
|
||||
for i in range(repeat):
|
||||
|
||||
try:
|
||||
curr_gpt_response = ChatGPT_request(prompt).strip()
|
||||
curr_gpt_response = GPT_request(prompt).strip()
|
||||
end_index = curr_gpt_response.rfind('}') + 1
|
||||
curr_gpt_response = curr_gpt_response[:end_index]
|
||||
curr_gpt_response = json.loads(curr_gpt_response)["output"]
|
||||
@ -169,14 +180,14 @@ def ChatGPT_safe_generate_response_OLD(prompt,
|
||||
fail_safe_response="error",
|
||||
func_validate=None,
|
||||
func_clean_up=None,
|
||||
verbose=False):
|
||||
verbose=True):
|
||||
if verbose:
|
||||
print ("CHAT GPT PROMPT")
|
||||
print (prompt)
|
||||
|
||||
for i in range(repeat):
|
||||
try:
|
||||
curr_gpt_response = ChatGPT_request(prompt).strip()
|
||||
curr_gpt_response = GPT_request(prompt).strip()
|
||||
if func_validate(curr_gpt_response, prompt=prompt):
|
||||
return func_clean_up(curr_gpt_response, prompt=prompt)
|
||||
if verbose:
|
||||
@ -207,7 +218,7 @@ def GPT_request(prompt, gpt_parameter):
|
||||
a str of GPT-3's response.
|
||||
"""
|
||||
temp_sleep()
|
||||
try:
|
||||
if use_openai:
|
||||
response = openai.Completion.create(
|
||||
model=gpt_parameter["engine"],
|
||||
prompt=prompt,
|
||||
@ -219,10 +230,17 @@ def GPT_request(prompt, gpt_parameter):
|
||||
stream=gpt_parameter["stream"],
|
||||
stop=gpt_parameter["stop"],)
|
||||
return response.choices[0].text
|
||||
except:
|
||||
print ("TOKEN LIMIT EXCEEDED")
|
||||
return "TOKEN LIMIT EXCEEDED"
|
||||
|
||||
else:
|
||||
def callback(token_id, response):
|
||||
if gpt_parameter["stop"] is None:
|
||||
return True
|
||||
for param in gpt_parameter["stop"]:
|
||||
if param in response:
|
||||
return False
|
||||
return True
|
||||
response = model.generate(prompt, max_tokens=gpt_parameter["max_tokens"], temp=gpt_parameter["temperature"], top_p=gpt_parameter["top_p"],
|
||||
streaming=gpt_parameter["stream"], callback=callback)
|
||||
return response
|
||||
|
||||
def generate_prompt(curr_input, prompt_lib_file):
|
||||
"""
|
||||
@ -258,7 +276,7 @@ def safe_generate_response(prompt,
|
||||
fail_safe_response="error",
|
||||
func_validate=None,
|
||||
func_clean_up=None,
|
||||
verbose=False):
|
||||
verbose=True):
|
||||
if verbose:
|
||||
print (prompt)
|
||||
|
||||
@ -277,8 +295,11 @@ def get_embedding(text, model="text-embedding-ada-002"):
|
||||
text = text.replace("\n", " ")
|
||||
if not text:
|
||||
text = "this is blank"
|
||||
if use_openai:
|
||||
return openai.Embedding.create(
|
||||
input=[text], model=model)['data'][0]['embedding']
|
||||
else:
|
||||
return embedder.embed(text)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@ -42,7 +42,16 @@ def GPT_request(prompt):
|
||||
print ("ChatGPT ERROR")
|
||||
return "ChatGPT ERROR"
|
||||
else:
|
||||
response = model.generate(prompt)
|
||||
with model.chat_session():
|
||||
def callback(token_id, response):
|
||||
print(response)
|
||||
if "}" in response:
|
||||
return False
|
||||
return True
|
||||
response = model.generate(prompt, callback=callback)
|
||||
idx = response.find("}")
|
||||
if idx != -1:
|
||||
response = response[:idx+1]
|
||||
return response
|
||||
|
||||
prompt = """
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user