Files changed (1) hide show
  1. app.py +2 -10
app.py CHANGED
@@ -4,19 +4,11 @@ from llama_cpp import Llama
4
  llm = Llama(model_path="ggml-alpaca-7b-q4.bin")
5
 
6
  def generate_text(input_text):
7
- output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
8
  return output['choices'][0]['text']
9
 
10
  input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
11
  output_text = gr.outputs.Textbox(label="Output text")
12
 
13
- description = "llama.cpp implementation in python [https://github.com/abetlen/llama-cpp-python]"
14
-
15
- examples = [
16
- ["What is the capital of France? ", "The capital of France is Paris."],
17
- ["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."],
18
- ["What is the square root of 64?", "The square root of 64 is 8."]
19
- ]
20
-
21
- gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Llama Language Model", description=description, examples=examples).launch()
22
 
 
4
  llm = Llama(model_path="ggml-alpaca-7b-q4.bin")
5
 
6
  def generate_text(input_text):
7
+ output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n", "#"], echo=False)
8
  return output['choices'][0]['text']
9
 
10
  input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
11
  output_text = gr.outputs.Textbox(label="Output text")
12
 
13
+ gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Alpaca GGML").launch()
 
 
 
 
 
 
 
 
14