import os import requests import yaml from flask import Flask, request, jsonify from dotenv import load_dotenv from PIL import Image import io from transformers import pipeline # Load environment variables load_dotenv() api_key = os.getenv('HF_API_KEY') model_path = os.getenv('MODEL_PATH') app = Flask(__name__) # Load configuration with open('config.yaml', 'r') as file: config = yaml.safe_load(file) def get_model_predictions(text): headers = {"Authorization": f"Bearer {api_key}"} payload = {"inputs": text} response = requests.post(f"https://api-inference.huggingface.co/models/{model_path}", headers=headers, json=payload) return response.json() def process_image(image_file): image = Image.open(image_file) # Implement image processing here return "Image processed" @app.route('/predict', methods=['POST']) def predict(): data = request.get_json() text = data.get('text') image_file = data.get('image') if text: prediction = get_model_predictions(text) return jsonify(prediction) elif image_file: image = io.BytesIO(image_file) result = process_image(image) return jsonify({"result": result}) else: return jsonify({"error": "No input provided"}) @app.route('/', methods=['GET']) def index(): return "Welcome to My AI! Use /predict to interact." if __name__ == '__main__': app.run(debug=True, use_reloader=False)