Spaces:
Paused
Paused
File size: 12,735 Bytes
71e668b 70e3503 91feea3 6f8b2c2 91feea3 6f8b2c2 7f49dd1 eb160c2 31c7c6f cc91f22 91feea3 a0747c9 91feea3 eb160c2 91feea3 4e30116 eb160c2 91feea3 eb160c2 91feea3 9078d55 91feea3 25b34f4 a0747c9 6a0b199 11f3af5 91feea3 c303ab9 b775bb9 91feea3 7f937d0 a2e2b64 91feea3 9078d55 91feea3 329e995 91feea3 71ea24c 4aff23a ff0f378 4f0d6ba 31c7c6f 4f0d6ba 9808c6b 7f937d0 9808c6b 7f937d0 9808c6b 71ea24c 91feea3 9e85ff2 8716861 2f9b2f6 8716861 9e85ff2 c4c341e 9e85ff2 c4c341e 9e85ff2 c4c341e 9e85ff2 c4c341e 9e85ff2 c4c341e 8f4b344 9e85ff2 c4c341e 9e85ff2 8e9395a 5d078f9 9e85ff2 91feea3 a7ac19b 91feea3 a7ac19b 91feea3 a7ac19b 7f49dd1 b37c716 a2ad7bd f0f4211 4dc9c10 8416dd1 6f8b2c2 8416dd1 6f8b2c2 8416dd1 1f6bf28 8416dd1 7f49dd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
import logging
import json
import os
import datetime
import hashlib
import csv
import requests
import re
import html
import torch
from torch import cuda, bfloat16
import sys
import gc
from pygments.lexers import guess_lexer, ClassNotFound
import gradio as gr
from pygments import highlight
from pygments.lexers import guess_lexer,get_lexer_by_name
from pygments.formatters import HtmlFormatter
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import datasets
from datasets import load_dataset
import evaluate
from transformers import LlamaForCausalLM, LlamaTokenizer
from setfit import SetFitModel, SetFitTrainer
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
for stop_word in stop_words:
if s.endswith(stop_word):
return True
for i in range(1, len(stop_word)):
if s.endswith(stop_word[:i]):
return True
return False
def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
#prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
prompt = "Das folgende ist eine Unterhaltung zwischen einem Menschen und einem KI-Assistenten, der Baize genannt wird. Baize ist ein open-source KI-Assistent, der von UCSD entwickelt wurde. Der Mensch und der KI-Assistent chatten abwechselnd miteinander in deutsch. Die Antworten des KI Assistenten sind immer so ausführlich wie möglich und in Markdown Schreibweise und in deutscher Sprache. Wenn nötig übersetzt er sie ins Deutsche. Die Antworten des KI-Assistenten vermeiden Themen und Antworten zu unethischen, kontroversen oder sensiblen Themen. Die Antworten sind immer sehr höflich formuliert..\n[|Human|]Hallo!\n[|AI|]Hi!"
history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0],x[1]) for x in history]
history.append("\n[|Human|]{}\n[|AI|]".format(text))
history_text = ""
flag = False
for x in history[::-1]:
if tokenizer(prompt+history_text+x, return_tensors="pt")['input_ids'].size(-1) <= max_length:
history_text = x + history_text
flag = True
else:
break
if flag:
return prompt+history_text,tokenizer(prompt+history_text, return_tensors="pt")
else:
return None
def load_tokenizer_and_model(base_model, load_8bit=False):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
#tokenizer = AutoTokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=True, bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|pad|>')
tokenizer = AutoTokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
if device == "cuda":
model = AutoModelForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_8bit,
torch_dtype=torch.float16,
device_map="auto",
use_auth_token=True,
)
else:
model = AutoModelForCausalLM.from_pretrained(
base_model, device_map={"": device}, low_cpu_mem_usage=True
)
return tokenizer,model, device
# hier werden aber Chat-Daten geladen!!!!
def load_tokenizer_and_model_Baize(base_model, load_8bit=True):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
tokenizer = LlamaTokenizer.from_pretrained(base_model, add_eos_token=True, use_auth_token=True)
model = LlamaForCausalLM.from_pretrained(base_model, load_in_8bit=True, device_map="auto")
return tokenizer,model, device
def load_model(base_model, load_8bit=False):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if device == "cuda":
model = AutoModelForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_8bit,
torch_dtype=torch.float16,
device_map="auto",
use_auth_token=True
)
else:
model = AutoModelForCausalLM.from_pretrained(
base_model, device_map={"": device}, low_cpu_mem_usage=True, use_auth_token=True
)
#if not load_8bit:
#model.half() # seems to fix bugs for some users.
model.eval()
return model, device
def load_tokenizer(base_model):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
tokenizer = AutoTokenizer.from_pretrained(base_model, use_fast = True)
return tokenizer
# Greedy Search
def greedy_search(input_ids: torch.Tensor,
model: torch.nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
stop_words: list,
max_length: int,
temperature: float = 1.0,
top_p: float = 1.0,
top_k: int = 25) -> Iterator[str]:
generated_tokens = []
past_key_values = None
current_length = 1
for i in range(max_length):
with torch.no_grad():
if past_key_values is None:
outputs = model(input_ids)
else:
outputs = model(input_ids[:, -1:], past_key_values=past_key_values)
logits = outputs.logits[:, -1, :]
past_key_values = outputs.past_key_values
# apply temperature
logits /= temperature
probs = torch.softmax(logits, dim=-1)
# apply top_p
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > top_p
probs_sort[mask] = 0.0
# apply top_k
#if top_k is not None:
# probs_sort1, _ = torch.topk(probs_sort, top_k)
# min_top_probs_sort = torch.min(probs_sort1, dim=-1, keepdim=True).values
# probs_sort = torch.where(probs_sort < min_top_probs_sort, torch.full_like(probs_sort, float(0.0)), probs_sort)
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
input_ids = torch.cat((input_ids, next_token), dim=-1)
generated_tokens.append(next_token[0].item())
text = tokenizer.decode(generated_tokens)
yield text
if any([x in text for x in stop_words]):
del past_key_values
del logits
del probs
del probs_sort
del probs_idx
del probs_sum
gc.collect()
return
########################################
#Predict
def predict(model,
tokenizer,
device,
text,
history,
top_p,
temperature,
max_length_tokens,
max_context_length_tokens,):
if text=="":
return "Leer"
try:
model
except:
return [[text,"No Model Found"]]
inputs = generate_prompt_with_history(text,history,tokenizer,max_length=max_context_length_tokens)
if inputs is None:
return "Too long"
else:
prompt,inputs=inputs
begin_length = len(prompt)
input_ids = inputs["input_ids"][:,-max_context_length_tokens:].to(device)
torch.cuda.empty_cache()
#torch.no_grad() bedeutet, dass für die betreffenden tensoren keine Ableitungen berechnet werden bei der backpropagation
#hier soll das NN ja auch nicht geändert werden 8backprop ist nicht nötig), da es um interference-prompts geht!
with torch.no_grad():
antwort=[[""],[""]]
#die vergangenen prompts werden alle als Tupel in history abgelegt sortiert nach 'Human' und 'AI'- dass sind daher auch die stop-words, die den jeweils nächsten Eintrag kennzeichnen
for x in greedy_search(input_ids,model,tokenizer,stop_words=["[|Human|]", "[|AI|]"],max_length=max_length_tokens,temperature=temperature,top_p=top_p):
if is_stop_word_or_prefix(x,["[|Human|]", "[|AI|]"]) is False:
if "[|Human|]" in x:
x = x[:x.index("[|Human|]")].strip()
if "[|AI|]" in x:
x = x[:x.index("[|AI|]")].strip()
x = x.strip()
a, b= [[y[0],convert_to_markdown(y[1])] for y in history]+[[text, convert_to_markdown(x)]],history + [[text,x]]
antwort = antwort + a
del input_ids
gc.collect()
torch.cuda.empty_cache()
try:
return antwort
except:
pass
#Funktion, die der trainer braucht, um das Training zu evaluieren - mit einer Metrik
def compute_metrics(eval_pred):
#Metrik berechnen, um das training messen zu können - wird es besser???
metric = evaluate.load("accuracy") #3 Arten von gegebener Metrik: f1 oder roc_auc oder accuracy
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
#Call compute on metric to calculate the accuracy of your predictions.
#Before passing your predictions to compute, you need to convert the predictions to logits (remember all Transformers models return logits):
return metric.compute(predictions=predictions, references=labels)
def compute_metrics2(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
recall = recall_score(y_true=labels, y_pred=pred)
precision = precision_score(y_true=labels, y_pred=pred)
f1 = f1_score(y_true=labels, y_pred=pred)
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
def convert_to_markdown(text):
text = text.replace("$","$")
def replace_leading_tabs_and_spaces(line):
new_line = []
for char in line:
if char == "\t":
new_line.append("	")
elif char == " ":
new_line.append(" ")
else:
break
return "".join(new_line) + line[len(new_line):]
markdown_text = ""
lines = text.split("\n")
in_code_block = False
for line in lines:
if in_code_block is False and line.startswith("```"):
in_code_block = True
markdown_text += f"{line}\n"
elif in_code_block is True and line.startswith("```"):
in_code_block = False
markdown_text += f"{line}\n"
elif in_code_block:
markdown_text += f"{line}\n"
else:
line = replace_leading_tabs_and_spaces(line)
line = re.sub(r"^(#)", r"\\\1", line)
markdown_text += f"{line} \n"
return markdown_text
#Datasets encodieren - in train und val Sets
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
#######################################################
#Fine-Tuning
#######################################################
#load Dataset
def daten_laden(name):
return load_dataset("alexkueck/tis", delimiter=";", column_names=["id", "text"])
#return load_dataset(name)
return
#Quantisation - tzo speed up training
def bnb_config (load4Bit, double_quant):
bnb_config = BitsAndBytesConfig(
load_in_4bit= load4Bit,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=bfloat16,
bnb_4bit_use_double_quant=double_quant,
)
return bnb_config
|