import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import spaces import re from markdownify import markdownify models = { "jinaai/reader-lm-0.5b": AutoModelForCausalLM.from_pretrained("jinaai/reader-lm-0.5b", trust_remote_code=True).eval().to("cuda"), "jinaai/reader-lm-1.5b": AutoModelForCausalLM.from_pretrained("jinaai/reader-lm-1.5b", trust_remote_code=True).eval().to("cuda") } tokenizers = { "jinaai/reader-lm-0.5b": AutoTokenizer.from_pretrained("jinaai/reader-lm-0.5b", trust_remote_code=True), "jinaai/reader-lm-1.5b": AutoTokenizer.from_pretrained("jinaai/reader-lm-1.5b", trust_remote_code=True), } @spaces.GPU def run_example(html_content, model_id="jinaai/reader-lm-1.5b"): print("Start Model Processing") model = models[model_id] tokenizer = tokenizers[model_id] messages = [{"role": "user", "content": html_content}] input_text=tokenizer.apply_chat_template(messages, tokenize=False) inputs = tokenizer.encode(input_text, return_tensors="pt").to("cuda") outputs = model.generate(inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08) pattern = r"<\|im_start\|>assistant(.*?)<\|im_end\|>" assistant_response = re.findall(pattern, tokenizer.decode(outputs[0]), re.DOTALL) print("Start Markdownify Processing") markdownify_output = markdownify(html_content) return assistant_response[0], markdownify_output css = """ #output { height: 500px; overflow: auto; border: 1px solid #ccc; } """ example_html = """