matthewfarant commited on
Commit
ed16986
1 Parent(s): b363574

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -1,10 +1,12 @@
1
  from langchain.prompts import PromptTemplate
2
  from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
3
  from langchain_community.chat_models import ChatOllama
 
4
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
5
  from langchain_core.tools import Tool
6
  from langchain_google_community import GoogleSearchAPIWrapper
7
  from firecrawl import FirecrawlApp
 
8
 
9
  import gradio as gr
10
  import os
@@ -21,14 +23,17 @@ os.environ["GOOGLE_CSE_ID"] = os.getenv('GOOGLE_CSE_ID')
21
  os.environ["GOOGLE_API_KEY"] = os.getenv('GOOGLE_API_KEY')
22
 
23
  # Llama Endpoint
24
- llm = HuggingFaceEndpoint(
25
- # endpoint_url = os.getenv('HF_ENDPOINT'),
26
- repo_id="meta-llama/Meta-Llama-3.1-8B-Instruct",
27
- task="text-generation",
28
- max_new_tokens=4000,
29
- do_sample=False,
30
- repetition_penalty=1.03
31
- )
 
 
 
32
 
33
  llama3 = ChatHuggingFace(llm=llm, temperature = 1)
34
  llama3_json = ChatHuggingFace(llm=llm, format = 'json', temperature = 0)
 
1
  from langchain.prompts import PromptTemplate
2
  from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
3
  from langchain_community.chat_models import ChatOllama
4
+ from langchain_community.llms import HuggingFacePipeline
5
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
6
  from langchain_core.tools import Tool
7
  from langchain_google_community import GoogleSearchAPIWrapper
8
  from firecrawl import FirecrawlApp
9
+ from transformers import pipeline
10
 
11
  import gradio as gr
12
  import os
 
23
  os.environ["GOOGLE_API_KEY"] = os.getenv('GOOGLE_API_KEY')
24
 
25
  # Llama Endpoint
26
+ # llm = HuggingFaceEndpoint(
27
+ # # endpoint_url = os.getenv('HF_ENDPOINT'),
28
+ # repo_id="meta-llama/Meta-Llama-3.1-8B-Instruct",
29
+ # task="text-generation",
30
+ # max_new_tokens=4000,
31
+ # do_sample=False,
32
+ # repetition_penalty=1.03
33
+ # )
34
+
35
+ pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-3.1-8B-Instruct")
36
+ llm = HuggingFacePipeline(pipeline=pipe)
37
 
38
  llama3 = ChatHuggingFace(llm=llm, temperature = 1)
39
  llama3_json = ChatHuggingFace(llm=llm, format = 'json', temperature = 0)