Craig Pretzinger commited on
Commit
0f0f232
·
1 Parent(s): 94cbad2

Fixed OpenAI API call and handled response

Browse files
Files changed (1) hide show
  1. app.py +22 -17
app.py CHANGED
@@ -52,34 +52,39 @@ def handle_fda_query(query):
52
  logits = model(**inputs).logits
53
  return "FDA Query Processed: Contains regulatory info." if torch.argmax(logits, dim=1).item() == 1 else "FDA Query Processed: General."
54
 
55
- # Enhance via GPT-4o-mini
56
  def enhance_with_gpt4o(fda_response):
57
- response = openai.ChatCompletion.create(
58
- model="gpt-4o-mini",
59
- messages=[{"role": "system", "content": "You are an expert FDA assistant."}, {"role": "user", "content": f"Enhance this FDA info: {fda_response}"}],
60
- max_tokens=150
61
- )
62
- return response['choices'][0]['message']['content']
63
-
64
- # Initialize Hugging Face Inference Client
65
- client = InferenceClient("gpt-4o-mini") # Correct model, make sure to replace with your actual model
66
 
67
  def respond(message, system_message, max_tokens, temperature, top_p):
68
  try:
69
  # First retrieve info via PubMedBERT
70
  fda_response = handle_fda_query(message)
71
 
72
- # Stream the enhanced response via GPT-4o-mini using the client
 
 
 
 
 
 
 
 
 
 
 
73
  enhanced_response = ""
74
- <<<<<<< Updated upstream
75
  for chat_message in client.chat_completion(...):
76
  payload = json.loads(chat_message.lstrip("data:").rstrip("\n"))
77
  enhanced_response += payload["content"] # Or however the payload structure works
78
- =======
79
- for chat_message in client.chat_completion(...): # Add params if needed
80
- payload = clean_payload(chat_message)
81
- enhanced_response += payload["content"]
82
- >>>>>>> Stashed changes
83
 
84
  # Return both the PubMedBERT result and the enhanced version
85
  return f"Original Info from PubMedBERT: {fda_response}\n\nEnhanced Info via GPT-4o-mini: {enhanced_response}"
 
52
  logits = model(**inputs).logits
53
  return "FDA Query Processed: Contains regulatory info." if torch.argmax(logits, dim=1).item() == 1 else "FDA Query Processed: General."
54
 
55
+ # Function to enhance info via GPT-4o-mini
56
  def enhance_with_gpt4o(fda_response):
57
+ try:
58
+ response = openai.ChatCompletion.create(
59
+ model="gpt-4o-mini", # Correct model
60
+ messages=[{"role": "system", "content": "You are an expert FDA assistant."}, {"role": "user", "content": f"Enhance this FDA info: {fda_response}"}],
61
+ max_tokens=150
62
+ )
63
+ return response['choices'][0]['message']['content']
64
+ except Exception as e:
65
+ return f"Error: {str(e)}"
66
 
67
  def respond(message, system_message, max_tokens, temperature, top_p):
68
  try:
69
  # First retrieve info via PubMedBERT
70
  fda_response = handle_fda_query(message)
71
 
72
+ # Stream the enhanced response via GPT-4o-mini using the correct OpenAI API
73
+ response = openai.ChatCompletion.create(
74
+ model="gpt-4o-mini",
75
+ messages=[
76
+ {"role": "system", "content": "You are an expert FDA assistant."},
77
+ {"role": "user", "content": f"Enhance this FDA info: {fda_response}"}
78
+ ],
79
+ max_tokens=max_tokens,
80
+ temperature=temperature,
81
+ top_p=top_p
82
+ )
83
+
84
  enhanced_response = ""
 
85
  for chat_message in client.chat_completion(...):
86
  payload = json.loads(chat_message.lstrip("data:").rstrip("\n"))
87
  enhanced_response += payload["content"] # Or however the payload structure works
 
 
 
 
 
88
 
89
  # Return both the PubMedBERT result and the enhanced version
90
  return f"Original Info from PubMedBERT: {fda_response}\n\nEnhanced Info via GPT-4o-mini: {enhanced_response}"