khulnasoft commited on
Commit
6fd481b
·
verified ·
1 Parent(s): cb45dd9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +63 -2
README.md CHANGED
@@ -1,5 +1,66 @@
1
  ---
2
  license: other
3
- license_name: deepcode-license
4
  license_link: LICENSE
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: other
3
+ license_name: deepcode-ai
4
  license_link: LICENSE
5
+ ---
6
+
7
+ ## 3. Quick Start
8
+ ### Installation
9
+ On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command:
10
+ ```shell
11
+ git clone https://github.com/deepcode-ai/DeepCode-VL-VL
12
+ cd DeepCode-VL-VL
13
+ pip install -e .
14
+ ```
15
+ ### Simple Inference Example
16
+ ```python
17
+ import torch
18
+ from transformers import AutoModelForCausalLM
19
+ from deepcode_vl.models import VLChatProcessor, MultiModalityCausalLM
20
+ from deepcode_vl.utils.io import load_pil_images
21
+ # specify the path to the model
22
+ model_path = "deepcode-ai/deepcode-base"
23
+ vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
24
+ tokenizer = vl_chat_processor.tokenizer
25
+ vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
26
+ vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
27
+ conversation = [
28
+ {
29
+ "role": "User",
30
+ "content": "<image_placeholder>Describe each stage of this image.",
31
+ "images": ["./images/training_pipelines.png"]
32
+ },
33
+ {
34
+ "role": "Assistant",
35
+ "content": ""
36
+ }
37
+ ]
38
+ # load images and prepare for inputs
39
+ pil_images = load_pil_images(conversation)
40
+ prepare_inputs = vl_chat_processor(
41
+ conversations=conversation,
42
+ images=pil_images,
43
+ force_batchify=True
44
+ ).to(vl_gpt.device)
45
+ # run image encoder to get the image embeddings
46
+ inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
47
+ # run the model to get the response
48
+ outputs = vl_gpt.language_model.generate(
49
+ inputs_embeds=inputs_embeds,
50
+ attention_mask=prepare_inputs.attention_mask,
51
+ pad_token_id=tokenizer.eos_token_id,
52
+ bos_token_id=tokenizer.bos_token_id,
53
+ eos_token_id=tokenizer.eos_token_id,
54
+ max_new_tokens=512,
55
+ do_sample=False,
56
+ use_cache=True
57
+ )
58
+ answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
59
+ print(f"{prepare_inputs['sft_format'][0]}", answer)
60
+ ```
61
+ ### CLI Chat
62
+ ```bash
63
+ python cli_chat.py --model_path "deepcode-ai/deepcode-base"
64
+ # or local path
65
+ python cli_chat.py --model_path "local model path"
66
+ ``