import streamlit as st from transformers import AutoTokenizer, pipeline, logging from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ" model_basename = "Jackson2-4bit-128g-GPTQ.safetensors" use_strict = False use_triton = False tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir, use_fast=True) quantize_config = BaseQuantizeConfig( bits=4, group_size=128, desc_act=False ) model = AutoGPTQForCausalLM.from_quantized(quantized_model_dir, use_safetensors=True, strict=use_strict, model_basename=model_basename, device="cuda:0", use_triton=use_triton, quantize_config=quantize_config) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.1, top_p=0.95, repetition_penalty=1.15 ) user_input = st.text_input("Input a phrase") prompt_template=f'''USER: {user_input} ASSISTANT:''' # Generate output when the "Generate" button is pressed if st.button("Generate the prompt"): output = pipe(prompt_template)[0]['generated_text'] st.text_area("Prompt", value=output)