Spaces:
Runtime error
Runtime error
aliceblue11
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -11,11 +11,14 @@ import torch
|
|
11 |
from PIL import Image
|
12 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
13 |
import random
|
|
|
14 |
|
15 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
16 |
|
17 |
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
18 |
|
|
|
|
|
19 |
|
20 |
# Initialize Florence model
|
21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -177,6 +180,32 @@ class HuggingFaceInferenceNode:
|
|
177 |
# 생략된 기능들...
|
178 |
pass
|
179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
# Gradio 인터페이스 생성 함수
|
181 |
def create_interface():
|
182 |
prompt_generator = PromptGenerator() # PromptGenerator 클래스가 정의되었으므로 사용 가능
|
@@ -238,7 +267,7 @@ def create_interface():
|
|
238 |
|
239 |
with gr.Column(scale=2):
|
240 |
with gr.Accordion("LLM을 사용한 프롬프트 생성", open=False):
|
241 |
-
model = gr.Dropdown(["Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="모델", value="Llama 3")
|
242 |
happy_talk = gr.Checkbox(label="행복한 대화", value=True)
|
243 |
compress = gr.Checkbox(label="압축", value=True)
|
244 |
compression_level = gr.Radio(["부드럽게", "중간", "강하게"], label="압축 레벨", value="강하게")
|
@@ -273,7 +302,7 @@ def create_interface():
|
|
273 |
)
|
274 |
|
275 |
generate_text_button.click(
|
276 |
-
|
277 |
inputs=[model, output, happy_talk, compress, compression_level, poster, custom_base_prompt],
|
278 |
outputs=text_output
|
279 |
)
|
|
|
11 |
from PIL import Image
|
12 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
13 |
import random
|
14 |
+
import openai # OpenAI API 라이브러리 추가
|
15 |
|
16 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
17 |
|
18 |
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
19 |
|
20 |
+
# OpenAI API 클라이언트 설정
|
21 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
22 |
|
23 |
# Initialize Florence model
|
24 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
180 |
# 생략된 기능들...
|
181 |
pass
|
182 |
|
183 |
+
# gpt-4o-mini와 Cohere Command R+를 사용한 프롬프트 생성 함수
|
184 |
+
def call_gpt4o_mini(content, system_message, max_tokens=1000, temperature=0.7, top_p=1):
|
185 |
+
response = openai.ChatCompletion.create(
|
186 |
+
model="gpt-4o-mini",
|
187 |
+
messages=[
|
188 |
+
{"role": "system", "content": system_message},
|
189 |
+
{"role": "user", "content": content},
|
190 |
+
],
|
191 |
+
max_tokens=max_tokens,
|
192 |
+
temperature=temperature,
|
193 |
+
top_p=top_p,
|
194 |
+
)
|
195 |
+
return response.choices[0].message['content']
|
196 |
+
|
197 |
+
def call_cohere(content, temperature=0.7, max_tokens=1000):
|
198 |
+
response = openai.ChatCompletion.create(
|
199 |
+
model="Cohere-Command-R+",
|
200 |
+
messages=[
|
201 |
+
{"role": "user", "content": content},
|
202 |
+
],
|
203 |
+
max_tokens=max_tokens,
|
204 |
+
temperature=temperature,
|
205 |
+
)
|
206 |
+
return response.choices[0].message['content']
|
207 |
+
|
208 |
+
|
209 |
# Gradio 인터페이스 생성 함수
|
210 |
def create_interface():
|
211 |
prompt_generator = PromptGenerator() # PromptGenerator 클래스가 정의되었으므로 사용 가능
|
|
|
267 |
|
268 |
with gr.Column(scale=2):
|
269 |
with gr.Accordion("LLM을 사용한 프롬프트 생성", open=False):
|
270 |
+
model = gr.Dropdown(["Mixtral", "Mistral", "Llama 3", "Mistral-Nemo", "gpt-4o-mini", "Cohere-Command-R+"], label="모델", value="Llama 3")
|
271 |
happy_talk = gr.Checkbox(label="행복한 대화", value=True)
|
272 |
compress = gr.Checkbox(label="압축", value=True)
|
273 |
compression_level = gr.Radio(["부드럽게", "중간", "강하게"], label="압축 레벨", value="강하게")
|
|
|
302 |
)
|
303 |
|
304 |
generate_text_button.click(
|
305 |
+
lambda model, input_text, happy_talk, compress, compression_level, poster, custom_base_prompt: call_gpt4o_mini(input_text, custom_base_prompt) if model == "gpt-4o-mini" else call_cohere(input_text),
|
306 |
inputs=[model, output, happy_talk, compress, compression_level, poster, custom_base_prompt],
|
307 |
outputs=text_output
|
308 |
)
|