Update app.py
Browse files
app.py
CHANGED
@@ -10,9 +10,10 @@ import requests
|
|
10 |
import json
|
11 |
import openai
|
12 |
|
13 |
-
#
|
14 |
userinput = ""
|
15 |
-
|
|
|
16 |
if 'claims_extraction' not in st.session_state:
|
17 |
st.session_state.claims_extraction = ""
|
18 |
|
@@ -56,17 +57,17 @@ if st.button('Start Transcription'):
|
|
56 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as audio_file:
|
57 |
audio_file.write(audio_data)
|
58 |
audio_file_path = audio_file.name
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
|
65 |
-
|
66 |
-
|
67 |
|
68 |
-
|
69 |
-
|
70 |
|
71 |
# Model Selection Dropdown
|
72 |
model_choice = st.selectbox(
|
@@ -121,6 +122,10 @@ if userinput and api_key and st.button("Extract Claims", key="claims_extraction"
|
|
121 |
# Display generated objectives for all chunks
|
122 |
learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
|
123 |
|
|
|
|
|
|
|
|
|
124 |
from transformers import AutoConfig, AutoTokenizer, AutoModel
|
125 |
from summarizer import Summarizer
|
126 |
|
@@ -153,11 +158,9 @@ for chunk in chunks:
|
|
153 |
summary = bert_legal_model(chunk, min_length=8, ratio=0.05)
|
154 |
summaries.append(summary)
|
155 |
|
156 |
-
|
157 |
# Now you have a list of summaries for each chunk
|
158 |
# You can access them using `summaries[0]`, `summaries[1]`, etc.
|
159 |
# After generating summaries
|
160 |
for i, summary in enumerate(summaries):
|
161 |
st.write(f"### Summary {i+1}")
|
162 |
st.write(summary)
|
163 |
-
|
|
|
10 |
import json
|
11 |
import openai
|
12 |
|
13 |
+
# Initialize user input
|
14 |
userinput = ""
|
15 |
+
|
16 |
+
# Initialize session state for claims_extraction
|
17 |
if 'claims_extraction' not in st.session_state:
|
18 |
st.session_state.claims_extraction = ""
|
19 |
|
|
|
57 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as audio_file:
|
58 |
audio_file.write(audio_data)
|
59 |
audio_file_path = audio_file.name
|
60 |
+
st.audio(audio_file_path, format="audio/wav")
|
61 |
+
st.info("Transcribing...")
|
62 |
+
st.success("Transcription complete")
|
63 |
+
result = model.transcribe(audio_file_path)
|
64 |
+
transcript = result['text']
|
65 |
|
66 |
+
with st.expander("See transcript"):
|
67 |
+
st.markdown(transcript)
|
68 |
|
69 |
+
# Update the user input field with the transcription
|
70 |
+
userinput = st.text_area("Input Text:", transcript) # Moved up here
|
71 |
|
72 |
# Model Selection Dropdown
|
73 |
model_choice = st.selectbox(
|
|
|
122 |
# Display generated objectives for all chunks
|
123 |
learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
|
124 |
|
125 |
+
|
126 |
+
# Get the extracted claims from Streamlit's session state
|
127 |
+
claims_extracted = st.session_state.claims_extraction
|
128 |
+
|
129 |
from transformers import AutoConfig, AutoTokenizer, AutoModel
|
130 |
from summarizer import Summarizer
|
131 |
|
|
|
158 |
summary = bert_legal_model(chunk, min_length=8, ratio=0.05)
|
159 |
summaries.append(summary)
|
160 |
|
|
|
161 |
# Now you have a list of summaries for each chunk
|
162 |
# You can access them using `summaries[0]`, `summaries[1]`, etc.
|
163 |
# After generating summaries
|
164 |
for i, summary in enumerate(summaries):
|
165 |
st.write(f"### Summary {i+1}")
|
166 |
st.write(summary)
|
|