matthewfarant commited on
Commit
f559c3d
1 Parent(s): a3f2e55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -27,12 +27,10 @@ llm = HuggingFaceEndpoint(
27
  do_sample=False,
28
  repetition_penalty=1.03,
29
  )
30
-
31
  llama3 = ChatHuggingFace(llm=llm, temperature = 1)
32
  llama3_json = ChatHuggingFace(llm=llm, format = 'json', temperature = 0)
33
 
34
  google_search = GoogleSearchAPIWrapper()
35
-
36
  firecrawl_app = FirecrawlApp(api_key=os.getenv('FIRECRAWL_KEY'))
37
 
38
 
@@ -132,7 +130,16 @@ generate_chain = generate_prompt | llama3_json | JsonOutputParser()
132
 
133
  # Full Flow Function
134
  def fact_check_flow(user_question):
135
- keyword = []
 
 
 
 
 
 
 
 
 
136
 
137
  # Step 2: Transform question into search query keyword
138
  keyword = query_chain.invoke({"question": user_question})["query"]
 
27
  do_sample=False,
28
  repetition_penalty=1.03,
29
  )
 
30
  llama3 = ChatHuggingFace(llm=llm, temperature = 1)
31
  llama3_json = ChatHuggingFace(llm=llm, format = 'json', temperature = 0)
32
 
33
  google_search = GoogleSearchAPIWrapper()
 
34
  firecrawl_app = FirecrawlApp(api_key=os.getenv('FIRECRAWL_KEY'))
35
 
36
 
 
130
 
131
  # Full Flow Function
132
  def fact_check_flow(user_question):
133
+ # Step 1 : Initializing
134
+ llm = HuggingFaceEndpoint(
135
+ repo_id="meta-llama/Meta-Llama-3.1-8B-Instruct",
136
+ task="text-generation",
137
+ max_new_tokens=4000,
138
+ do_sample=False,
139
+ repetition_penalty=1.03,
140
+ )
141
+ llama3 = ChatHuggingFace(llm=llm, temperature = 1)
142
+ llama3_json = ChatHuggingFace(llm=llm, format = 'json', temperature = 0)
143
 
144
  # Step 2: Transform question into search query keyword
145
  keyword = query_chain.invoke({"question": user_question})["query"]