GenAICoder commited on
Commit
4757e3a
·
verified ·
1 Parent(s): 7c5299e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -15,9 +15,13 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
15
  #from transformers import pipeline
16
  # Load model directly
17
  #from transformers import AutoModelForCausalLM
 
18
 
 
19
 
20
- access_token = os.getenv("HUGGINGFACE_API_KEY")
 
 
21
 
22
 
23
 
@@ -89,7 +93,7 @@ def get_conversational_chain(retriever):
89
  #repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
90
  #repo_id= 'nvidia/Llama3-ChatQA-1.5-8B'
91
  repo_id= 'google/gemma-1.1-2b-it'
92
- llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.3,token = access_token)
93
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
94
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
95
  #llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)
 
15
  #from transformers import pipeline
16
  # Load model directly
17
  #from transformers import AutoModelForCausalLM
18
+ from getpass import getpass
19
 
20
+ HUGGINGFACEHUB_API_TOKEN = getpass()
21
 
22
+
23
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
24
+ #access_token = os.getenv("HUGGINGFACE_API_KEY")
25
 
26
 
27
 
 
93
  #repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
94
  #repo_id= 'nvidia/Llama3-ChatQA-1.5-8B'
95
  repo_id= 'google/gemma-1.1-2b-it'
96
+ llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.3,token = HUGGINGFACEHUB_API_TOKEN)
97
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
98
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
99
  #llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)