Spaces:
Build error
Build error
ahmeterdempmk
commited on
Commit
•
94d4e23
1
Parent(s):
05b3566
Upload Streamlit app file
Browse files
app.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from unsloth import FastLanguageModel
|
4 |
+
from transformers import TextStreamer
|
5 |
+
import json
|
6 |
+
|
7 |
+
@st.cache_resource
|
8 |
+
def load_model():
|
9 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
10 |
+
model_name="ahmeterdempmk/Gemma2-2b-E-Commerce-Tuned",
|
11 |
+
max_seq_length=2048,
|
12 |
+
dtype=None,
|
13 |
+
load_in_4bit=True,
|
14 |
+
)
|
15 |
+
FastLanguageModel.for_inference(model)
|
16 |
+
return model, tokenizer
|
17 |
+
|
18 |
+
model, tokenizer = load_model()
|
19 |
+
|
20 |
+
st.title(" E-Commerce Text Generation")
|
21 |
+
|
22 |
+
text = st.text_area("Enter product information:", placeholder="Example: Rosehip Marmalade, keep it cold")
|
23 |
+
|
24 |
+
if st.button("Apply"):
|
25 |
+
if text:
|
26 |
+
with st.spinner("Generating response..."):
|
27 |
+
prompt = f"""
|
28 |
+
You are extracting product title and description from given text and rewriting the description and enhancing it when necessary.
|
29 |
+
Always give response in the user's input language.
|
30 |
+
Always answer in the given json format. Do not use any other keywords. Do not make up anything.
|
31 |
+
Explanations should contain at least five sentences each.
|
32 |
+
|
33 |
+
Json Format:
|
34 |
+
{{
|
35 |
+
"title": "<title of the product>",
|
36 |
+
"description": "<description of the product>"
|
37 |
+
}}
|
38 |
+
|
39 |
+
Examples:
|
40 |
+
|
41 |
+
Product Information: Rosehip Marmalade, keep it cold
|
42 |
+
Answer: {{"title": "Rosehip Marmalade", "description": "You should store this delicisious roseship marmelade in cold conditions. You can use it in your breakfasts and meals."}}
|
43 |
+
|
44 |
+
Product Information: Blackberry jam spoils in the heat
|
45 |
+
Answer: {{"title": "Blackberry Jam", "description": "Please store it in cold conditions. Recommended to be consumed at breakfast. Very sweet."}}
|
46 |
+
|
47 |
+
Now answer this:
|
48 |
+
Product Information: {text}
|
49 |
+
"""
|
50 |
+
inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
|
51 |
+
|
52 |
+
output = model.generate(**inputs, max_new_tokens=128)
|
53 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
54 |
+
answer_start = response.find("Now answer this:") + len("Now answer this:")
|
55 |
+
answer = response[answer_start:].strip()
|
56 |
+
|
57 |
+
json_start = answer.find("{")
|
58 |
+
json_end = answer.find("}") + 1
|
59 |
+
json_response = answer[json_start:json_end].strip()
|
60 |
+
|
61 |
+
st.subheader("JSON Format Answer:")
|
62 |
+
st.text(f"{json_response}")
|
63 |
+
|
64 |
+
try:
|
65 |
+
json_data = json.loads(json_response)
|
66 |
+
title = json_data["title"]
|
67 |
+
description = json_data["description"]
|
68 |
+
|
69 |
+
st.subheader("Product Title:")
|
70 |
+
st.text(title)
|
71 |
+
|
72 |
+
st.subheader("Product Description:")
|
73 |
+
st.text(description)
|
74 |
+
|
75 |
+
except json.JSONDecodeError:
|
76 |
+
st.error("An error has occurred! Please try again.")
|
77 |
+
else:
|
78 |
+
st.warning("Please enter product information.")
|