Anupam251272 commited on
Commit
6945642
·
verified ·
1 Parent(s): ff7bebd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -80
app.py CHANGED
@@ -1,61 +1,81 @@
1
  import os
2
- import streamlit as st
3
  from huggingface_hub import login, hf_hub_download
4
- from datasets import load_dataset
 
5
  from llama_cpp import Llama
6
  import chromadb
7
- from chromadb.config import Settings # Added import for Settings
8
  from sentence_transformers import SentenceTransformer
 
 
 
9
 
10
- # Load Hugging Face token from environment variable
11
- hf_token = os.getenv("HF_TOKEN")
12
- if hf_token:
13
- login(token=hf_token)
14
- else:
15
- raise ValueError("HF_TOKEN is not set. Please add it to your Hugging Face Space secrets.")
16
-
17
- # Load dataset
18
- dataset = load_dataset("Maryem2025/final_dataset")
19
 
20
- # Initialize Llama model
21
- llm = Llama(
22
- model_path=hf_hub_download(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  repo_id="TheBloke/CapybaraHermes-2.5-Mistral-7B-GGUF",
24
  filename="capybarahermes-2.5-mistral-7b.Q2_K.gguf",
25
- token=hf_token # Ensure the token is passed for authentication
26
- ),
27
- n_ctx=2048,
28
- )
 
 
 
 
 
 
29
 
30
- # Initialize ChromaDB
31
  class VectorStore:
32
  def __init__(self, collection_name):
33
  self.embedding_model = SentenceTransformer('sentence-transformers/multi-qa-MiniLM-L6-cos-v1')
34
- # Use Settings to configure persistence
35
- self.chroma_client = chromadb.Client(Settings(
36
- chroma_db_impl="duckdb+parquet",
37
- persist_directory="./chroma_db" # Ensure persistence
38
- ))
39
- if collection_name in [c.name for c in self.chroma_client.list_collections()]:
40
- self.chroma_client.delete_collection(name=collection_name)
41
  self.collection = self.chroma_client.create_collection(name=collection_name)
42
 
43
- def populate_vectors(self, dataset):
44
- titles = dataset['train']['title'][:2000]
45
- servings = dataset['train']['servings'][:2000]
46
- total_times = dataset['train']['total_time'][:2000]
47
- courses = dataset['train']['course'][:2000]
48
- sections = dataset['train']['sections'][:2000]
49
- instructions = dataset['train']['instructions'][:2000]
50
- cuisines = dataset['train']['cuisine'][:2000]
51
- calories = dataset['train']['calories'][:2000]
 
 
 
 
 
 
 
 
 
52
 
53
  texts = [
54
- f"Title: {title}. Servings: {serving}. Total Time: {total_time} minutes. "
55
- f"Course: {course}. Sections: {section}. Instructions: {instruction}. "
56
- f"Cuisine: {cuisine}. Calories: {calorie}."
57
- for title, serving, total_time, course, section, instruction, cuisine, calorie
58
- in zip(titles, servings, total_times, courses, sections, instructions, cuisines, calories)
59
  ]
60
 
61
  for i, item in enumerate(texts):
@@ -65,48 +85,138 @@ class VectorStore:
65
  def search_context(self, query, n_results=1):
66
  query_embedding = self.embedding_model.encode([query]).tolist()
67
  results = self.collection.query(query_embeddings=query_embedding, n_results=n_results)
68
- return results['documents'][0] # Adjusted to access the correct document
69
-
70
- # Initialize and populate vector store
71
- vector_store = VectorStore("embedding_vector")
72
- vector_store.populate_vectors(dataset)
73
-
74
- # Define function for generating text
75
- def generate_text(message):
76
- context_results = vector_store.search_context(message, n_results=1)
77
- context = context_results[0] if context_results else ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  prompt_template = (
80
- f"SYSTEM: You are a recipe generating bot.\n"
81
- f"SYSTEM: {context}\n"
82
- f"USER: {message}\n"
83
  f"ASSISTANT:\n"
84
  )
85
 
86
- output = llm(
87
- prompt_template,
88
- temperature=0.3,
89
- top_p=0.95,
90
- top_k=40,
91
- repeat_penalty=1.1,
92
- max_tokens=600,
93
- )
94
-
95
- input_string = output['choices'][0]['text'].strip()
96
- cleaned_text = input_string.strip("[]'").replace('\\n', '\n')
97
- return cleaned_text
98
-
99
- # Streamlit UI
100
- st.title("JOSHI’s AI Chef 🍽️")
101
- st.write("Generate recipes using AI powered by Hugging Face and ChromaDB!")
102
-
103
- user_input = st.text_area("Enter ingredients or ask for a recipe:", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- if st.button("Generate Recipe"):
106
- if user_input:
107
- with st.spinner("Generating recipe... 🍲"):
108
- response = generate_text(user_input)
109
- st.subheader("Generated Recipe:")
110
- st.write(response)
111
- else:
112
- st.warning("Please enter a message.")
 
1
  import os
 
2
  from huggingface_hub import login, hf_hub_download
3
+ import pandas as pd
4
+ import gradio as gr
5
  from llama_cpp import Llama
6
  import chromadb
 
7
  from sentence_transformers import SentenceTransformer
8
+ from deep_translator import GoogleTranslator # Changed from googletrans to deep_translator
9
+ import re
10
+ import requests # Import the requests library
11
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Charger le token depuis les secrets
14
+ hf_token = os.getenv("HF_TOKEN")
15
+ login(token=hf_token)
16
+
17
+ # Charger le dataset depuis un fichier CSV local
18
+ csv_file = "/content/indian_food (1).csv"
19
+ try:
20
+ df = pd.read_csv(csv_file)
21
+ print("Dataset chargé avec succès depuis le fichier CSV local.")
22
+ except FileNotFoundError:
23
+ print(f"Erreur: Fichier CSV non trouvé à l'emplacement: {csv_file}")
24
+ exit()
25
+ except Exception as e:
26
+ print(f"Erreur lors du chargement du CSV: {e}")
27
+ exit()
28
+
29
+ # Initialisation du modèle Llama
30
+ llm = None # Initialize to None
31
+ try:
32
+ # Use /tmp for the model path within Hugging Face Spaces
33
+ model_path = hf_hub_download(
34
  repo_id="TheBloke/CapybaraHermes-2.5-Mistral-7B-GGUF",
35
  filename="capybarahermes-2.5-mistral-7b.Q2_K.gguf",
36
+ cache_dir="/tmp" # Store the model in /tmp
37
+ )
38
+
39
+ llm = Llama(
40
+ model_path=model_path,
41
+ n_ctx=2048,
42
+ )
43
+ print("Llama model loaded successfully.")
44
+ except Exception as e:
45
+ print(f"Error loading Llama model: {e}")
46
 
47
+ # Initialisation de ChromaDB Vector Store
48
  class VectorStore:
49
  def __init__(self, collection_name):
50
  self.embedding_model = SentenceTransformer('sentence-transformers/multi-qa-MiniLM-L6-cos-v1')
51
+ self.chroma_client = chromadb.Client()
52
+
53
+ if collection_name in self.chroma_client.list_collections():
54
+ self.chroma_client.delete_collection(collection_name)
 
 
 
55
  self.collection = self.chroma_client.create_collection(name=collection_name)
56
 
57
+ def populate_vectors(self, df):
58
+ titles = df['name'].tolist()
59
+ ingredients = df['ingredients'].tolist()
60
+ diets = df['diet'].tolist()
61
+ prep_times = df['prep_time'].tolist()
62
+
63
+ # Load nutritional information, handling potentially missing columns and types
64
+ calories = df['calories'].astype(str).tolist() if 'calories' in df else ['None'] * len(df)
65
+ sugar = df['sugar'].astype(str).tolist() if 'sugar' in df else ['None'] * len(df)
66
+ gluten = df['gluten'].astype(str).tolist() if 'gluten' in df else ['None'] * len(df)
67
+
68
+ titles = titles[:2000]
69
+ ingredients = ingredients[:2000]
70
+ diets = diets[:2000]
71
+ prep_times = prep_times[:2000]
72
+ calories = calories[:2000]
73
+ sugar = sugar[:2000]
74
+ gluten = gluten[:2000]
75
 
76
  texts = [
77
+ f"Recipe: {title}. Ingredients: {ingredient}. Diet: {diet}. Prep Time: {prep_time} minutes. Calories: {calorie}. Sugar: {sugar}. Gluten: {gluten}."
78
+ for title, ingredient, diet, prep_time, calorie, sugar, gluten in zip(titles, ingredients, diets, prep_times, calories, sugar, gluten)
 
 
 
79
  ]
80
 
81
  for i, item in enumerate(texts):
 
85
  def search_context(self, query, n_results=1):
86
  query_embedding = self.embedding_model.encode([query]).tolist()
87
  results = self.collection.query(query_embeddings=query_embedding, n_results=n_results)
88
+ return results['documents']
89
+
90
+ # Initialisation du store de vecteurs et peuplement
91
+ vector_store = None # Initialize to None
92
+ try:
93
+ vector_store = VectorStore("indian_food_embedding")
94
+ vector_store.populate_vectors(df)
95
+ print("Vector store initialized and populated.")
96
+ except Exception as e:
97
+ print(f"Error initializing or populating vector store: {e}")
98
+
99
+
100
+ # Replace the translate_text function with this new version
101
+ def translate_text(text, target_language='en'):
102
+ """Translates the given text to the target language."""
103
+ try:
104
+ if target_language == 'en':
105
+ translator = GoogleTranslator(source='auto', target='en')
106
+ else:
107
+ translator = GoogleTranslator(source='en', target=target_language)
108
+
109
+ translated_text = translator.translate(text)
110
+ return translated_text
111
+ except Exception as e:
112
+ print(f"Translation error: {e}")
113
+ print(f"Detailed error: {type(e).__name__}, {e}") # Print more details for debugging.
114
+ return text # Return original text if translation fails
115
+
116
+ def generate_text(message, max_tokens=600, temperature=0.3, top_p=0.95,
117
+ gluten_free=False, dairy_free=False, allergies="", input_language='en'): # Added input_language
118
+
119
+ if llm is None:
120
+ return "Error: Llama model could not be loaded. Please check the console for errors."
121
+
122
+ if vector_store is None:
123
+ return "Error: Vector store could not be initialized. Please check the console for errors."
124
+
125
+ # Translate the input message to English
126
+ message_en = message
127
+ if input_language != 'en':
128
+ try:
129
+ message_en = translate_text(message, target_language='en')
130
+ except Exception as e:
131
+ print(f"Error translating input message: {e}")
132
+ return "Error translating input. Please try again in English."
133
+
134
+
135
+
136
+ context = ""
137
+ query = message_en
138
+ if gluten_free:
139
+ query += " gluten-free"
140
+ if dairy_free:
141
+ query += " dairy-free"
142
+ if allergies:
143
+ query += f" avoid ingredients: {allergies}"
144
+
145
+ try:
146
+ context_results = vector_store.search_context(query, n_results=1)
147
+ if context_results and isinstance(context_results, list):
148
+ context = context_results[0] if context_results else ""
149
+ else:
150
+ context = "" # or handle the error appropriately
151
+ print("Warning: No context found or invalid context format.")
152
+ except Exception as e:
153
+ return f"Error searching vector store: {e}"
154
 
155
  prompt_template = (
156
+ f"SYSTEM: You are a helpful recipe generating bot specializing in Indian cuisine, assisting with dietary restrictions.\n"
157
+ f"SYSTEM: Here is some context:\n{context}\n"
158
+ f"USER: {message_en}\n" # Use the English translated message
159
  f"ASSISTANT:\n"
160
  )
161
 
162
+ try:
163
+ output = llm(
164
+ prompt_template,
165
+ temperature=temperature,
166
+ top_p=top_p,
167
+ top_k=40,
168
+ repeat_penalty=1.1,
169
+ max_tokens=max_tokens,
170
+ )
171
+
172
+ input_string = output['choices'][0]['text'].strip()
173
+ cleaned_text = input_string.strip("[]'").replace('\\n', '\n')
174
+ continuous_text = '\n'.join(cleaned_text.split('\n'))
175
+
176
+ # Translate the output back to the input language
177
+ output_text = continuous_text
178
+ if input_language != 'en':
179
+ try:
180
+ output_text = translate_text(continuous_text, target_language=input_language)
181
+ except Exception as e:
182
+ print(f"Error translating output message: {e}")
183
+ output_text = "Error translating output. Here is the English version:\n\n" + continuous_text
184
+
185
+
186
+ # Gluten Check on Output
187
+ if context and isinstance(context, str):
188
+ context_lower = context.lower()
189
+ if "gluten: yes" in context_lower:
190
+ output_text += "\n\nWarning: This recipe contains gluten."
191
+ elif "gluten: no" in context_lower:
192
+ output_text += "\n\nGood news! This recipe is gluten-free."
193
+
194
+ return output_text
195
+
196
+ except Exception as e:
197
+ return f"Error generating text: {e}"
198
+
199
+ demo = gr.Interface(
200
+ fn=generate_text,
201
+ inputs=[
202
+ gr.Textbox(lines=2, placeholder="Enter your message here...", label="Message"),
203
+ gr.Slider(minimum=50, maximum=1000, value=600, step=50, label="Max Tokens"),
204
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.3, step=0.1, label="Temperature"),
205
+ gr.Slider(minimum=0.7, maximum=1.0, value=0.95, step=0.05, label="Top P"),
206
+ gr.Checkbox(label="Gluten-Free"),
207
+ gr.Checkbox(label="Dairy-Free"),
208
+ gr.Textbox(lines=1, placeholder="e.g., peanuts, shellfish", label="Allergies (comma-separated)"),
209
+ gr.Dropdown(choices=['en', 'hi'], value='en', label="Input Language (en=English, hi=Hindi/Hinglish)"), # Added language selection
210
+ ],
211
+ outputs=gr.Textbox(label="Generated Text"),
212
+ title="Indian Recipe Bot",
213
+ description="Running LLM with context retrieval from ChromaDB. Supports dietary restrictions, allergies, and Hinglish input/output!",
214
+ examples=[
215
+ ["mujhe chawal aur dal hai, main kya bana sakta hoon jo gluten-free ho?", 600, 0.3, 0.95, True, False, "", 'hi'],
216
+ ["Suggest a vegetarian dish with spinach and no nuts.", 600, 0.3, 0.95, False, False, "nuts", 'en'],
217
+ ],
218
+ cache_examples=False,
219
+ )
220
 
221
+ if __name__ == "__main__":
222
+ demo.launch()