inflaton commited on
Commit
0b5e165
·
1 Parent(s): d6b30d7

best eval LF

Browse files
llm_toolkit/eval_logical_reasoning.py CHANGED
@@ -24,6 +24,7 @@ data_path = os.getenv("LOGICAL_REASONING_DATA_PATH")
24
  results_path = os.getenv("LOGICAL_REASONING_RESULTS_PATH")
25
  use_english_datasets = os.getenv("USE_ENGLISH_DATASETS") == "true"
26
  using_p1 = os.getenv("USING_P1_PROMPT_TEMPLATE") == "true"
 
27
  max_new_tokens = int(os.getenv("MAX_NEW_TOKENS", 16))
28
  repetition_penalty = float(os.getenv("REPETITION_PENALTY", 1.0))
29
 
@@ -43,6 +44,7 @@ model, tokenizer = load_model(
43
  model_name,
44
  load_in_4bit=load_in_4bit,
45
  adapter_name_or_path=adapter_name_or_path,
 
46
  dtype=dtype,
47
  )
48
 
 
24
  results_path = os.getenv("LOGICAL_REASONING_RESULTS_PATH")
25
  use_english_datasets = os.getenv("USE_ENGLISH_DATASETS") == "true"
26
  using_p1 = os.getenv("USING_P1_PROMPT_TEMPLATE") == "true"
27
+ using_llama_factory = os.getenv("USING_LLAMA_FACTORY") == "true"
28
  max_new_tokens = int(os.getenv("MAX_NEW_TOKENS", 16))
29
  repetition_penalty = float(os.getenv("REPETITION_PENALTY", 1.0))
30
 
 
44
  model_name,
45
  load_in_4bit=load_in_4bit,
46
  adapter_name_or_path=adapter_name_or_path,
47
+ using_llama_factory=using_llama_factory,
48
  dtype=dtype,
49
  )
50
 
results/mgtv-results_internlm_best.csv CHANGED
The diff for this file is too large to render. See raw diff
 
scripts/eval-mgtv-best.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ BASEDIR=$(dirname "$0")
4
+ cd $BASEDIR/..
5
+ echo Current Directory:
6
+ pwd
7
+
8
+ BASEDIR=`pwd`
9
+
10
+ nvidia-smi
11
+ uname -a
12
+ cat /etc/os-release
13
+ lscpu
14
+ grep MemTotal /proc/meminfo
15
+
16
+ #pip install -r requirements.txt
17
+ #cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes]
18
+
19
+ pip install transformers==4.41.2
20
+
21
+ export USING_LLAMA_FACTORY=true
22
+
23
+ export MODEL_NAME=internlm/internlm2_5-7b-chat-1m
24
+ export ADAPTER_NAME_OR_PATH=inflaton-ai/InternLM_2_5-7b_LoRA-Adapter
25
+ export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
26
+ export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_internlm_best.csv
27
+
28
+ echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
29
+ python llm_toolkit/eval_logical_reasoning.py
scripts/eval-mgtv.sh CHANGED
@@ -1 +1 @@
1
- eval-mgtv-glm-4-9b.sh
 
1
+ eval-mgtv-best.sh