onurnsfw commited on
Commit
35a07ed
1 Parent(s): 161ff34

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Load model directly
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import streamlit as st
4
+ import numpy as np
5
+ import torch
6
+
7
+ @st.cache_resource
8
+ def get_model():
9
+ tokenizer = AutoTokenizer.from_pretrained("onurnsfw/Gemma2-9b-classifier")
10
+ model = AutoModelForCausalLM.from_pretrained("onurnsfw/Gemma2-9b-classifier")
11
+ return tokenizer,model
12
+
13
+ tokenizer,model = get_model()
14
+
15
+ user_input = st.text_area('Enter Text to Analyze')
16
+ button = st.button("Analyze")
17
+
18
+ if user_input and button :
19
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
20
+
21
+ ### Instruction:
22
+ {}
23
+
24
+ ### Input:
25
+ {}
26
+
27
+ ### Response:
28
+ {}"""
29
+
30
+ inputs = tokenizer(
31
+ [
32
+ alpaca_prompt.format(
33
+ "Match the potential use case with the corresponding activity and emission values based on the provided context.", # instruction
34
+ "{user_input}",
35
+ "",
36
+ )
37
+ ], return_tensors = "pt").to("cuda")
38
+
39
+ outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
40
+
41
+ st.write("Prediction: ",tokenizer.batch_decode(outputs))