Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,14 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
def chunk_text(text, chunk_size=2000):
|
5 |
chunks = []
|
@@ -21,6 +30,31 @@ st.title("Patent Claims Extraction")
|
|
21 |
# API Key Input
|
22 |
api_key = st.text_input("Enter your OpenAI API Key:", type="password")
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
# Model Selection Dropdown
|
25 |
model_choice = st.selectbox(
|
26 |
"Select the model you want to use:",
|
@@ -28,7 +62,7 @@ model_choice = st.selectbox(
|
|
28 |
)
|
29 |
|
30 |
# Context, Subject, and Level
|
31 |
-
context = "You are a patent claims identifier and extractor. You will freeform text, identify any claims contained therein that may be patentable. You identify
|
32 |
userinput = st.text_input("Input Text:", "Freeform text here!")
|
33 |
|
34 |
# Initialize OpenAI API
|
@@ -42,10 +76,11 @@ claims_extraction = ""
|
|
42 |
# Initialize status placeholder
|
43 |
learning_status_placeholder = st.empty()
|
44 |
disable_button_bool = False
|
|
|
45 |
if userinput and api_key and st.button("Extract Claims", key="claims_extraction", disabled=disable_button_bool):
|
46 |
# Split the user input into chunks
|
47 |
input_chunks = chunk_text(userinput)
|
48 |
-
|
49 |
# Initialize a variable to store the extracted claims
|
50 |
all_extracted_claims = ""
|
51 |
|
@@ -57,7 +92,7 @@ if userinput and api_key and st.button("Extract Claims", key="claims_extraction"
|
|
57 |
claims_extraction_response = openai.ChatCompletion.create(
|
58 |
model=model_choice,
|
59 |
messages=[
|
60 |
-
{"role": "user", "content": f"Extract any patentable claims from the following: \n {chunk}. \n
|
61 |
]
|
62 |
)
|
63 |
|
@@ -73,46 +108,47 @@ if userinput and api_key and st.button("Extract Claims", key="claims_extraction"
|
|
73 |
# Display generated objectives for all chunks
|
74 |
learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
|
75 |
|
76 |
-
#
|
77 |
-
if st.button("Extract Claims") and api_key:
|
78 |
-
|
79 |
-
#
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
109 |
|
110 |
# Display status message
|
111 |
-
lesson_plan=st.text("Extracting Patentable Claims...")
|
112 |
|
113 |
# Extract and display
|
114 |
assistant_reply = claims_extraction_response['choices'][0]['message']['content']
|
115 |
-
claims_extraction=st.text(assistant_reply.strip())
|
116 |
|
117 |
-
# Citation
|
118 |
st.markdown("<sub>This app was created by [Taylor Ennen](https://github.com/taylor-ennen/GPT-Streamlit-MVP) & [Tonic](https://huggingface.co/tonic)</sub>", unsafe_allow_html=True)
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
from audiorecorder import audiorecorder
|
5 |
+
import whisper
|
6 |
+
import os
|
7 |
+
import streamlit.components.v1 as components
|
8 |
+
import tempfile
|
9 |
+
import io
|
10 |
+
import requests
|
11 |
+
import json
|
12 |
|
13 |
def chunk_text(text, chunk_size=2000):
|
14 |
chunks = []
|
|
|
30 |
# API Key Input
|
31 |
api_key = st.text_input("Enter your OpenAI API Key:", type="password")
|
32 |
|
33 |
+
# Camera Input
|
34 |
+
image = st.camera_input("Camera input")
|
35 |
+
|
36 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tf:
|
37 |
+
if image:
|
38 |
+
tf.write(image.read())
|
39 |
+
temp_image_path = tf.name
|
40 |
+
else:
|
41 |
+
temp_image_path = None
|
42 |
+
|
43 |
+
# Audio Recording
|
44 |
+
audio = st.audio_recorder("Click to record audio", "Click to stop recording")
|
45 |
+
|
46 |
+
submit_button = st.button("Use this audio")
|
47 |
+
|
48 |
+
if submit_button:
|
49 |
+
model = whisper.load_model("base")
|
50 |
+
result = model.transcribe(audio)
|
51 |
+
st.info("Transcribing...")
|
52 |
+
st.success("Transcription complete")
|
53 |
+
transcript = result['text']
|
54 |
+
|
55 |
+
with st.expander("See transcript"):
|
56 |
+
st.markdown(transcript)
|
57 |
+
|
58 |
# Model Selection Dropdown
|
59 |
model_choice = st.selectbox(
|
60 |
"Select the model you want to use:",
|
|
|
62 |
)
|
63 |
|
64 |
# Context, Subject, and Level
|
65 |
+
context = "You are a patent claims identifier and extractor. You will freeform text, identify any claims contained therein that may be patentable. You identify, extract, print such claims, briefly explain why each claim is patentable."
|
66 |
userinput = st.text_input("Input Text:", "Freeform text here!")
|
67 |
|
68 |
# Initialize OpenAI API
|
|
|
76 |
# Initialize status placeholder
|
77 |
learning_status_placeholder = st.empty()
|
78 |
disable_button_bool = False
|
79 |
+
|
80 |
if userinput and api_key and st.button("Extract Claims", key="claims_extraction", disabled=disable_button_bool):
|
81 |
# Split the user input into chunks
|
82 |
input_chunks = chunk_text(userinput)
|
83 |
+
|
84 |
# Initialize a variable to store the extracted claims
|
85 |
all_extracted_claims = ""
|
86 |
|
|
|
92 |
claims_extraction_response = openai.ChatCompletion.create(
|
93 |
model=model_choice,
|
94 |
messages=[
|
95 |
+
{"role": "user", "content": f"Extract any patentable claims from the following: \n {chunk}. \n Extract each claim. Briefly explain why you extracted this word phrase. Exclude any additional commentary."}
|
96 |
]
|
97 |
)
|
98 |
|
|
|
108 |
# Display generated objectives for all chunks
|
109 |
learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
|
110 |
|
111 |
+
# Claims Extraction
|
112 |
+
if st.button("Extract Claims") and api_key and transcript:
|
113 |
+
# You should have 'transcript' available at this point
|
114 |
+
# Ensure 'transcript' is defined before this block.
|
115 |
+
|
116 |
+
# Split the user input into chunks
|
117 |
+
input_chunks = chunk_text(transcript) # Use 'transcript' instead of 'userinput'
|
118 |
+
|
119 |
+
# Initialize a variable to store the extracted claims
|
120 |
+
all_extracted_claims = ""
|
121 |
+
|
122 |
+
for chunk in input_chunks:
|
123 |
+
# Display status message for the current chunk
|
124 |
+
learning_status_placeholder.text(f"Extracting Patentable Claims for chunk {input_chunks.index(chunk) + 1}...")
|
125 |
+
|
126 |
+
# API call to generate objectives for the current chunk
|
127 |
+
claims_extraction_response = openai.ChatCompletion.create(
|
128 |
+
model=model_choice,
|
129 |
+
messages=[
|
130 |
+
{"role": "user", "content": f"Extract any patentable claims from the following: \n {chunk}. \n Extract each claim. Briefly explain why you extracted this word phrase. Exclude any additional commentary."}
|
131 |
+
]
|
132 |
+
)
|
133 |
+
|
134 |
+
# Extract the generated objectives from the API response
|
135 |
+
claims_extraction = claims_extraction_response['choices'][0]['message']['content']
|
136 |
+
|
137 |
+
# Append the extracted claims from the current chunk to the overall results
|
138 |
+
all_extracted_claims += claims_extraction.strip()
|
139 |
+
|
140 |
+
# Save the generated objectives to session state
|
141 |
+
st.session_state.claims_extraction = all_extracted_claims
|
142 |
+
|
143 |
+
# Display generated objectives for all chunks
|
144 |
+
learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
|
145 |
|
146 |
# Display status message
|
147 |
+
lesson_plan = st.text("Extracting Patentable Claims...")
|
148 |
|
149 |
# Extract and display
|
150 |
assistant_reply = claims_extraction_response['choices'][0]['message']['content']
|
151 |
+
claims_extraction = st.text(assistant_reply.strip())
|
152 |
|
153 |
+
# Citation
|
154 |
st.markdown("<sub>This app was created by [Taylor Ennen](https://github.com/taylor-ennen/GPT-Streamlit-MVP) & [Tonic](https://huggingface.co/tonic)</sub>", unsafe_allow_html=True)
|