TheBloke commited on
Commit
bce43c2
·
1 Parent(s): 0b402a5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -85,6 +85,10 @@ model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
85
  use_triton=use_triton,
86
  quantize_config=None)
87
 
 
 
 
 
88
  print("\n\n*** Generate:")
89
 
90
  input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
@@ -96,10 +100,6 @@ print(tokenizer.decode(output[0]))
96
  # Prevent printing spurious transformers error when using pipeline with AutoGPTQ
97
  logging.set_verbosity(logging.CRITICAL)
98
 
99
- prompt = "Tell me about AI"
100
- prompt_template=f'''### Human: {prompt}
101
- ### Assistant:'''
102
-
103
  print("*** Pipeline:")
104
  pipe = pipeline(
105
  "text-generation",
 
85
  use_triton=use_triton,
86
  quantize_config=None)
87
 
88
+ prompt = "Tell me about AI"
89
+ prompt_template=f'''### Instruction: {prompt}
90
+ ### Response:'''
91
+
92
  print("\n\n*** Generate:")
93
 
94
  input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
 
100
  # Prevent printing spurious transformers error when using pipeline with AutoGPTQ
101
  logging.set_verbosity(logging.CRITICAL)
102
 
 
 
 
 
103
  print("*** Pipeline:")
104
  pipe = pipeline(
105
  "text-generation",