Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,22 @@
|
|
1 |
import os
|
2 |
import requests
|
|
|
3 |
|
4 |
api_token = os.environ.get("TOKEN")
|
5 |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
6 |
headers = {"Authorization": f"Bearer {api_token}"}
|
7 |
|
|
|
|
|
8 |
def query(payload):
|
9 |
response = requests.post(API_URL, headers=headers, json=payload)
|
10 |
return response.json()
|
11 |
|
12 |
-
def analyze_sentiment(
|
13 |
output = query({
|
14 |
-
"inputs": f'''
|
15 |
-
system
|
16 |
-
You're going to deeply analyze the
|
17 |
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
|
18 |
For posts that talk about image generation models, return "image_generation"
|
19 |
For texts that ask for information from the community, return "questions"
|
@@ -24,14 +27,58 @@ For posts about tools and libraries, return "tools_libraries"
|
|
24 |
For posts containing tutorials and guides, return "tutorials_guides"
|
25 |
For posts about debugging and problem-solving, return "debugging"
|
26 |
Respond only with the category name, without any additional explanation or text.
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
30 |
'''
|
31 |
})
|
32 |
|
|
|
|
|
33 |
if isinstance(output, list) and len(output) > 0:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
|
|
|
1 |
import os
|
2 |
import requests
|
3 |
+
import gradio as gr
|
4 |
|
5 |
api_token = os.environ.get("TOKEN")
|
6 |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
7 |
headers = {"Authorization": f"Bearer {api_token}"}
|
8 |
|
9 |
+
|
10 |
+
|
11 |
def query(payload):
|
12 |
response = requests.post(API_URL, headers=headers, json=payload)
|
13 |
return response.json()
|
14 |
|
15 |
+
def analyze_sentiment(text):
|
16 |
output = query({
|
17 |
+
"inputs": f'''<|begin_of_text|>
|
18 |
+
<|start_header_id|>system<|end_header_id|>
|
19 |
+
You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
|
20 |
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
|
21 |
For posts that talk about image generation models, return "image_generation"
|
22 |
For texts that ask for information from the community, return "questions"
|
|
|
27 |
For posts containing tutorials and guides, return "tutorials_guides"
|
28 |
For posts about debugging and problem-solving, return "debugging"
|
29 |
Respond only with the category name, without any additional explanation or text.
|
30 |
+
<|eot_id|>
|
31 |
+
<|start_header_id|>user<|end_header_id|>
|
32 |
+
{text}
|
33 |
+
<|eot_id|>
|
34 |
+
<|start_header_id|>assistant<|end_header_id|>
|
35 |
'''
|
36 |
})
|
37 |
|
38 |
+
|
39 |
+
|
40 |
if isinstance(output, list) and len(output) > 0:
|
41 |
+
response = output[0].get('generated_text', '').strip().lower()
|
42 |
+
|
43 |
+
questions = response.count('questions')
|
44 |
+
ChatmodelLLM = response.count('Chatmodel/LLM')
|
45 |
+
other = response.count('other')
|
46 |
+
image_generation = response.count("image_generation")
|
47 |
+
fine_tuning = response.count("fine_tuning")
|
48 |
+
ethics_bias = response.count("ethics_bias")
|
49 |
+
datasets = response.count("datasets")
|
50 |
+
tools_libraries = response.count("tools_libraries")
|
51 |
+
tutorials_guides = response.count("tutorials_guides")
|
52 |
+
debugging = response.count("debugging")
|
53 |
+
|
54 |
+
if questions == 2:
|
55 |
+
return 'questions'
|
56 |
+
elif ChatmodelLLM == 2:
|
57 |
+
return 'Chat Model/LLM'
|
58 |
+
elif other == 2:
|
59 |
+
return "Other"
|
60 |
+
elif image_generation == 2:
|
61 |
+
return "Image Generation"
|
62 |
+
elif fine_tuning == 2:
|
63 |
+
return "Fine-tuning"
|
64 |
+
elif ethics_bias == 2:
|
65 |
+
return "Ethics and Bias"
|
66 |
+
elif datasets == 2:
|
67 |
+
return "Datasets"
|
68 |
+
elif tools_libraries == 2:
|
69 |
+
return "Tools and Libraries"
|
70 |
+
elif tutorials_guides == 2:
|
71 |
+
return "Tutorials and Guides"
|
72 |
+
elif debugging == 2:
|
73 |
+
return "Debugging"
|
74 |
+
else :
|
75 |
+
return f"Erreur: Réponse ambiguë - '{response}'"
|
76 |
+
|
77 |
|
78 |
+
demo = gr.Interface(
|
79 |
+
fn=analyze_sentiment,
|
80 |
+
inputs="text",
|
81 |
+
outputs="text"
|
82 |
+
)
|
83 |
|
84 |
+
demo.launch()
|