Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,8 @@ import torch
|
|
18 |
|
19 |
# Load the dataset containing PEC numbers and names
|
20 |
def load_dataset(file_path='PEC_Numbers_and_Names.xlsx'):
|
|
|
|
|
21 |
df = pd.read_excel(file_path)
|
22 |
return df
|
23 |
|
@@ -38,17 +40,17 @@ def process_with_model(pec_number):
|
|
38 |
inputs = tokenizer(pec_number, return_tensors="pt")
|
39 |
with torch.no_grad():
|
40 |
outputs = model(**inputs)
|
41 |
-
# Return a simple representation of the model's output
|
42 |
return outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
|
43 |
|
44 |
# Combine both functions to create a prediction
|
45 |
def predict(pec_number):
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
52 |
|
53 |
# Build the Gradio interface
|
54 |
iface = gr.Interface(
|
|
|
18 |
|
19 |
# Load the dataset containing PEC numbers and names
|
20 |
def load_dataset(file_path='PEC_Numbers_and_Names.xlsx'):
|
21 |
+
if not os.path.exists(file_path):
|
22 |
+
raise FileNotFoundError(f"File not found: {file_path}")
|
23 |
df = pd.read_excel(file_path)
|
24 |
return df
|
25 |
|
|
|
40 |
inputs = tokenizer(pec_number, return_tensors="pt")
|
41 |
with torch.no_grad():
|
42 |
outputs = model(**inputs)
|
|
|
43 |
return outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
|
44 |
|
45 |
# Combine both functions to create a prediction
|
46 |
def predict(pec_number):
|
47 |
+
try:
|
48 |
+
df = load_dataset()
|
49 |
+
name = get_name(pec_number, df)
|
50 |
+
model_output = process_with_model(pec_number)
|
51 |
+
return f"Name: {name}\nModel Output: {model_output}"
|
52 |
+
except FileNotFoundError as e:
|
53 |
+
return str(e)
|
54 |
|
55 |
# Build the Gradio interface
|
56 |
iface = gr.Interface(
|