Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,8 +15,6 @@ def predict_step(image_paths, model):
|
|
15 |
i_image = Image.open(image_path)
|
16 |
if i_image.mode != "RGB":
|
17 |
i_image = i_image.convert(mode="RGB")
|
18 |
-
|
19 |
-
#i_image.resize((640, 480))
|
20 |
|
21 |
images.append(i_image)
|
22 |
|
@@ -69,9 +67,9 @@ def predict_step_pixel(dataset_pixel_values, model):
|
|
69 |
"""
|
70 |
def load_image2txt_model(image_model_name):
|
71 |
model = VisionEncoderDecoderModel.from_pretrained(image_model_name)
|
72 |
-
feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/swin-large-patch4-window7-224")
|
73 |
|
74 |
-
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
75 |
tokenizer.pad_token = tokenizer.eos_token
|
76 |
|
77 |
model = model.to(device)
|
@@ -81,7 +79,6 @@ def inference_image_pipe(image_input):
|
|
81 |
image_model_name = "./checkpoint-21000"
|
82 |
|
83 |
tokenizer, feature_extractor, image_model = load_image2txt_model(image_model_name)
|
84 |
-
#with autocast('cpu'):
|
85 |
text = predict_step_single_image(image_input, tokenizer, feature_extractor, image_model)[0]
|
86 |
return text
|
87 |
|
@@ -89,7 +86,7 @@ with gr.Interface(fn=inference_image_pipe,
|
|
89 |
inputs=gr.Image(shape=(256, 256)),
|
90 |
outputs="text",
|
91 |
examples=["3212210S4492629-1.png", "3216497S4499373-1.png"]) as demo:
|
92 |
-
gr.Markdown("POC
|
93 |
|
94 |
|
95 |
if __name__ == "__main__":
|
|
|
15 |
i_image = Image.open(image_path)
|
16 |
if i_image.mode != "RGB":
|
17 |
i_image = i_image.convert(mode="RGB")
|
|
|
|
|
18 |
|
19 |
images.append(i_image)
|
20 |
|
|
|
67 |
"""
|
68 |
def load_image2txt_model(image_model_name):
|
69 |
model = VisionEncoderDecoderModel.from_pretrained(image_model_name)
|
70 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/swin-large-patch4-window7-224", use_auth_token=auth_token)
|
71 |
|
72 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2", use_auth_token=auth_token)
|
73 |
tokenizer.pad_token = tokenizer.eos_token
|
74 |
|
75 |
model = model.to(device)
|
|
|
79 |
image_model_name = "./checkpoint-21000"
|
80 |
|
81 |
tokenizer, feature_extractor, image_model = load_image2txt_model(image_model_name)
|
|
|
82 |
text = predict_step_single_image(image_input, tokenizer, feature_extractor, image_model)[0]
|
83 |
return text
|
84 |
|
|
|
86 |
inputs=gr.Image(shape=(256, 256)),
|
87 |
outputs="text",
|
88 |
examples=["3212210S4492629-1.png", "3216497S4499373-1.png"]) as demo:
|
89 |
+
gr.Markdown("POC XRaySwinGen - Automatic Medical Report")
|
90 |
|
91 |
|
92 |
if __name__ == "__main__":
|