Spaces:
Running
Running
File size: 4,581 Bytes
a231872 510d2c1 a231872 8243283 a231872 9598ec0 a231872 9598ec0 a231872 510d2c1 8243283 510d2c1 8243283 510d2c1 8243283 510d2c1 8243283 510d2c1 8243283 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import serializers
from .engine import execute_prompt, bundle_function, propose_recipes, compute_reduced_prices
from rest_framework.views import APIView
from mistralai import Mistral
import os
import base64
import json
import requests
class PromptSerializer(serializers.Serializer):
prompt = serializers.CharField()
@api_view(['GET'])
def execute_prompt_route_get(request):
prompt = request.query_params.get('prompt', None)
print(prompt)
if prompt is None:
return Response({"error": "No prompt provided"}, status=400)
is_local = False
result = execute_prompt(prompt, is_local)
return Response(result)
@api_view(['GET'])
def recipe_generate_route(request):
isLocal = False
try:
json_objs = compute_reduced_prices()
obj= json.loads(json_objs)
bundle_articles = bundle_function(obj[:10])
result = execute_prompt(propose_recipes(bundle_articles), False)
except (FileNotFoundError, json.JSONDecodeError) as e:
return Response({'error': str(e)}, status=500)
except Exception as e:
print(f"An error occurred: {e}")
return Response({'error': 'Something went wrong'}, status=500)
return Response(result)
class VisionLLMView(APIView):
def post(self, request, format=None):
data = request.data
#print(data)
# Retrieve the API key from environment variables
api_key = os.environ["MISTRAL_API_KEY"]
# Specify model
model = "pixtral-12b-2409"
# Initialize the Mistral client
client = Mistral(api_key=api_key)
# Decode the base64 image
#image_data = base64.b64decode(data['image'])
#image_data = base64.b64decode(data['messages'][0]['image'][0])
image_data = (data['messages'][0]['image'][0])
prompt = data['messages'][0]['prompt']
# Define the messages for the chat
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": prompt
},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{image_data}"
}
]
}
]
# Get the chat response
chat_response = client.chat.complete(
model=model,
messages=messages
)
content = chat_response.choices[0].message.content
#print(chat_response.choices[0].message.content)
# Return the content of the response
return Response({"response": content})
class NIMVisionLLMView(APIView):
def post(self, request, format=None):
try:
invoke_url = "https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-11b-vision-instruct/chat/completions"
stream = False
api_key = os.environ["NIM_API_KEY"]
data = request.data
image_data = (data['messages'][0]['image'][0])
prompt = data['messages'][0]['prompt']
headers = {
"Authorization": f"Bearer {api_key}",
"Accept": "text/event-stream" if stream else "application/json"
}
payload = {
"model": 'meta/llama-3.2-11b-vision-instruct',
"messages": [
{
"role": "user",
"content": f'{prompt} <img src="data:image/png;base64,{image_data}" />'
}
],
"max_tokens": 512,
"temperature": 1.00,
"top_p": 1.00,
"stream": stream
}
response = requests.post(invoke_url, headers=headers, json=payload)
if stream:
for line in response.iter_lines():
if line:
#print(line.decode("utf-8"))
data = line.decode("utf-8")
#content = json.loads(data)['choices'][0]['delta'].get('content', '')
else:
#print(response.json())
data = response.json()
content = data['choices'][0]['message']['content']
#print(content)
return Response({"response": content})
except Exception as e: # Added general exception handling
print(f"Error: {e}")
return None
|