Neo111x commited on
Commit
229f200
·
verified ·
1 Parent(s): 053d391

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +76 -0
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### 1. Introduction of LLM4Decompile
3
+
4
+ Falcon3-decompiler-3b aims to decompile x86 assembly instructions into C.
5
+
6
+
7
+
8
+ ### 2. Evaluation Results
9
+
10
+
11
+
12
+ ### 3. How to Use
13
+ Here is an example of how to use our model
14
+ Note: **Replace** asm_func with the function that you want to decompile
15
+
16
+ **Decompilation:** Use falcon3-decompiler-3b to translate ghidra decompilation output to more readable code:
17
+ ```python
18
+ from transformers import AutoTokenizer, AutoModelForCausalLM
19
+ import torch
20
+
21
+ model_path = 'LLM4Binary/llm4decompile-1.3b-v1.5' # V1.5 Model
22
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
23
+ model = AutoModelForCausalLM.from_pretrained(model_path,torch_dtype=torch.bfloat16).cuda()
24
+
25
+ from transformers import AutoTokenizer, AutoModelForCausalLM
26
+ import torch
27
+ import os
28
+
29
+ asm_func = """
30
+ char * func0(char **param_1,int param_2)
31
+
32
+ {
33
+ char **ppcVar1;
34
+ char *__s;
35
+ size_t sVar2;
36
+ int iVar3;
37
+ char *pcVar4;
38
+
39
+ pcVar4 = "";
40
+ if (0 < param_2) {
41
+ iVar3 = 0;
42
+ ppcVar1 = param_1 + (ulong)(param_2 - 1) + 1;
43
+ do {
44
+ __s = *param_1;
45
+ sVar2 = strlen(__s);
46
+ if (iVar3 < (int)sVar2) {
47
+ pcVar4 = __s;
48
+ iVar3 = (int)sVar2;
49
+ }
50
+ param_1 = param_1 + 1;
51
+ } while (param_1 != ppcVar1);
52
+ }
53
+ return pcVar4;
54
+ }
55
+ """
56
+
57
+ before = f"# This is the assembly code:\n"#prompt
58
+ after = "\n# What is the source code?\n"#prompt
59
+ asm_func = before+asm_func.strip()+after
60
+ model_path = "Neo111x/falcon3-decompiler-3b"
61
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
62
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype="auto", device_map="auto").to("cuda:0")
63
+
64
+ inputs = tokenizer(asm_func, return_tensors="pt").to("cuda:0")
65
+ with torch.no_grad():
66
+ outputs = model.generate(**inputs, max_new_tokens=2048)### max length to 4096, max new tokens should be below the range
67
+ c_func_decompile = tokenizer.decode(outputs[0][len(inputs[0]):-1])
68
+
69
+ # Note only decompile one function, where the original file may contain multiple functions
70
+ print(f'original function:\n{func}')
71
+ print(f'decompiled function:\n{c_func_decompile}')
72
+ ```
73
+
74
+ ### 5. Contact
75
+
76
+ If you have any questions, please raise an issue.