wjbmattingly commited on
Commit
3f06e75
1 Parent(s): 6748646

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -6
app.py CHANGED
@@ -8,17 +8,40 @@ import matplotlib.patches as patches
8
  import numpy as np
9
  import requests
10
  from io import BytesIO
 
11
 
12
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
13
 
14
- config = AutoConfig.from_pretrained('medieval-data/florence2-medieval-bbox-line-detection', trust_remote_code=True)
 
 
 
 
15
  # Modify the vision configuration
16
- if hasattr(config, 'vision_config'):
17
- config.vision_config.model_type = 'davit'
18
 
19
- model = AutoModelForCausalLM.from_pretrained('medieval-data/florence2-medieval-bbox-line-detection', trust_remote_code=True).eval()
20
- processor = AutoProcessor.from_pretrained('medieval-data/florence2-medieval-bbox-line-detection', trust_remote_code=True)
21
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  TITLE = "# [Florence-2-DocVQA Demo](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA)"
23
  DESCRIPTION = "The demo for Florence-2 fine-tuned on DocVQA dataset. You can find the notebook [here](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing). Read more about Florence-2 fine-tuning [here](finetune-florence2)."
24
 
 
8
  import numpy as np
9
  import requests
10
  from io import BytesIO
11
+ from unittest.mock import patch
12
 
13
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
 
15
+ model_dir = 'medieval-data/florence2-medieval-bbox-line-detection'
16
+ with patch("transformers.dynamic_module_utils.get_imports", fixed_get_imports):
17
+ # Load the configuration
18
+ config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
19
+
20
  # Modify the vision configuration
21
+ if hasattr(config, 'vision_config'):
22
+ config.vision_config.model_type = 'davit'
23
 
24
+ print("Modified vision configuration:")
25
+ print(config.vision_config)
26
+
27
+ # Try to load the model with the modified configuration
28
+ try:
29
+ model = AutoModelForCausalLM.from_pretrained(
30
+ model_dir,
31
+ config=config,
32
+ trust_remote_code=True
33
+ )
34
+ print("Model loaded successfully!")
35
+ except Exception as e:
36
+ print(f"Failed to load model: {str(e)}")
37
+
38
+ # Load the processor
39
+ processor = AutoProcessor.from_pretrained(
40
+ model_dir,
41
+ trust_remote_code=True,
42
+ revision='refs/pr/6'
43
+ )
44
+ print("Processor loaded successfully!")
45
  TITLE = "# [Florence-2-DocVQA Demo](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA)"
46
  DESCRIPTION = "The demo for Florence-2 fine-tuned on DocVQA dataset. You can find the notebook [here](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing). Read more about Florence-2 fine-tuning [here](finetune-florence2)."
47