from langchain.chat_models import ChatOpenAI from langchain.document_loaders import WebBaseLoader from langchain.chains.summarize import load_summarize_chain from bs4 import BeautifulSoup import os from dotenv import load_dotenv load_dotenv() from langchain import HuggingFaceHub import requests import sys #OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') hf_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN') #starchat_repo_id = os.environ.get('starchat_repo_id') repo_id=os.environ.get('repo_id') #port = os.getenv('port') llm = HuggingFaceHub(repo_id=repo_id, #for Llama2 #repo_id=starchat_repo_id, #for StarChat huggingfacehub_api_token=hf_token, model_kwargs={#"min_length":512, #for StarChat "min_length":1024, #for Llama2 "max_new_tokens":3072, "do_sample":True, #for StarChat #"max_new_tokens":5632, "do_sample":True, #for Llama2 "temperature":0.1, "top_k":50, "top_p":0.95, "eos_token_id":49155}) loader = WebBaseLoader("https://www.usinoip.com/") docs = loader.load() #llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k") chain = load_summarize_chain(llm, chain_type="stuff") result=chain.run(docs) print(result)