Kvikontent commited on
Commit
93a61c7
·
verified ·
1 Parent(s): 4c03b99

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -29
app.py CHANGED
@@ -1,33 +1,29 @@
 
 
 
 
1
  import gradio as gr
2
- from diffusers import DiffusionPipeline
3
 
4
- # Load the DiffusionPipeline and LORA weights
5
- pipeline = DiffusionPipeline.from_pretrained("stablediffusionapi/juggernaut-xl-v5")
6
- pipeline.load_lora_weights("Kvikontent/kviimager2.0")
 
 
 
7
 
8
- # Define the function to handle user input and generate the image
9
- def generate_image(prompt):
10
- image = pipeline(prompt)
11
- return image
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Create a Gradio interface
14
- iface = gr.Interface(
15
- fn=generate_image,
16
- inputs="text",
17
- outputs="image",
18
- title="KVIImager 2.0 Demo",
19
- description="Enter a prompt to generate an image"
20
- )
21
-
22
- # Add examples for the user input
23
- examples = [
24
- "Sunset over the ocean",
25
- "Starry night sky"
26
- ]
27
-
28
- iface.set_config(
29
- examples=examples
30
- )
31
-
32
- # Launch the Gradio interface
33
- iface.launch()
 
1
+ import requests
2
+ import base64
3
+ import io
4
+ from PIL import Image
5
  import gradio as gr
 
6
 
7
+ API_TOKEN = os.environ["API_TOKEN"] # replace with your own API Token here
8
+ API_URL = "https://api-inference.huggingface.co/models/Kvikontent/kviimager2.0"
9
+ HEADERS = {
10
+ 'Content-Type': 'application/json',
11
+ 'Authorization': f'{API_TOKEN}'
12
+ }
13
 
14
+ def predictor(prompt):
15
+ payload = {'inputs': prompt}
16
+
17
+ try:
18
+ response = requests.request("POST", url=API_URL, data=payload, headers=HEADERS)
19
+
20
+ if not (response is None or response.status_code != 200):
21
+ result = response.text[7:-5]
22
+
23
+ return Image.open(io.BytesIO(base64.b64decode(result)))
24
+
25
+ except Exception as e:
26
+ print('Error processing request')
27
+ raise ValueError(e)
28
 
29
+ iface = gr.Interface(fn=predictor, inputs="textbox", outputs='image').launch()