Spaces:
Runtime error
Runtime error
File size: 3,361 Bytes
3736582 6e3d323 3736582 d2dc8ab 3736582 d2dc8ab 3736582 d2dc8ab 3736582 c578d7a 3736582 c578d7a 3736582 d2dc8ab 3736582 d2dc8ab 3736582 901aa88 3736582 b1f3c8d c578d7a 3736582 c578d7a 3736582 c578d7a 3736582 d2dc8ab 3736582 d2dc8ab 3736582 c578d7a 3736582 c578d7a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import openai
from time import time
import os
import logging
import streamlit as st
openai.api_key = st.secrets['openai_API_key']
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as file:
return file.read()
def gpt35_rephrase(fact):
# Dynamically generate the prompt to rephrase the fact as a PubMed query using GPT3.5
prompt = open_file('prompts/gpt35_rephrase.txt').replace('<<FACT>>', fact)
try:
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
max_tokens=250,
temperature=0
)
response = response['choices'][0]['text'].strip()
filename = '%s_gpt3.txt' % time()
# Create the logs folder if it does not exist
if not os.path.exists('gpt3_rephrase_logs'):
os.makedirs('gpt3_rephrase_logs')
# Save the whole prompt and the response so that we can inspect it when necessary
with open('gpt3_rephrase_logs/%s' % filename, 'w', encoding="utf-8") as outfile:
outfile.write('PROMPT:\n\n' + prompt + '\n\n###############\n\nRESPONSE:\n\n' + response)
return response
except Exception as e:
logging.error('Error communicating with OpenAI (rephrase): ', exc_info=e)
def gpt35_check_fact(evidence, fact):
# Dynamically generate the prompt to check the fact against the given PubMed article conclusion/abstract
prompt = open_file('prompts/gpt35_fact_check.txt').replace('<<EVIDENCE>>', evidence).replace('<<HYPOTHESIS>>', fact)
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=3, # Don't need more for Entails/Contradicts/Undetermined
temperature=0
)
response = response['choices'][0]['text'].strip()
response = response.replace('.', '')
filename = '%s_gpt3.txt' % time()
if not os.path.exists('gpt3_factchecking_logs'):
os.makedirs('gpt3_factchecking_logs')
with open('gpt3_factchecking_logs/%s' % filename, 'w', encoding='utf-8') as outfile:
outfile.write('PROMPT:\n\n' + prompt + '\n\n###############\n\nRESPONSE:\n\n' + response)
return response
except Exception as e:
logging.error('Error communicating with OpenAI (check_fact): ', exc_info=e)
def gpt35_turbo_rephrase(fact):
# Dynamically generate the prompt to rephrase the fact as a PubMed query using GPT3.5 turbo - lower cost than 3.5
prompt = open_file('prompts/gpt35_rephrase.txt').replace('<<FACT>>', fact)
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{'role': 'user',
'content': prompt}
]
)
response = response['choices'][0]['message']['content'].strip()
filename = '%s_gpt3.txt' % time()
if not os.path.exists('gpt35_rephrase_logs'):
os.makedirs('gpt35_rephrase_logs')
with open('gpt35_rephrase_logs/%s' % filename, 'w', encoding="utf-8") as outfile:
outfile.write('PROMPT:\n\n' + prompt + '\n\n###############\n\nRESPONSE:\n\n' + response)
return response
except Exception as e:
logging.error('Error communicating with OpenAI (gpt35_rephrase): ', exc_info=e)
|