smakamali commited on
Commit
610c69b
1 Parent(s): c077c1c

initial commit

Browse files
Files changed (2) hide show
  1. app.py +230 -0
  2. requirements.txt +282 -0
app.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def transcribe_youtube_video(url, force_transcribe=False):
2
+
3
+ text = ''
4
+ try:
5
+ from youtube_transcript_api import YouTubeTranscriptApi
6
+ import pytube
7
+ from pytube import YouTube
8
+
9
+ vid_id = pytube.extract.video_id(url)
10
+ temp = YouTubeTranscriptApi.get_transcript(vid_id)
11
+ for t in temp:
12
+ text+=t['text']+' '
13
+ yt = YouTube(str(url))
14
+
15
+ except:
16
+ pass
17
+
18
+ if text == '' or force_transcribe:
19
+ from pytube import YouTube
20
+ import torch
21
+
22
+ save_dir="./docs/youtube/"
23
+ yt = YouTube(str(url))
24
+ audio = yt.streams.filter(only_audio = True).first()
25
+ out_file = audio.download(filename="audio.mp3",output_path = save_dir)
26
+
27
+ import transformers
28
+
29
+ whisper_asr = transformers.pipeline(
30
+ "automatic-speech-recognition", model="openai/whisper-large", device_map= 'auto',
31
+ )
32
+
33
+ whisper_asr.model.config.forced_decoder_ids = (
34
+ whisper_asr.tokenizer.get_decoder_prompt_ids(
35
+ language="en",
36
+ task="transcribe"
37
+ )
38
+ )
39
+ temp = whisper_asr(out_file,chunk_length_s=20)
40
+ text = temp['text']
41
+
42
+ del(whisper_asr)
43
+ torch.cuda.empty_cache()
44
+
45
+ return yt.title, text
46
+
47
+ def summarize_text(title,text):
48
+
49
+ from langchain.chains.llm import LLMChain
50
+ from langchain.prompts import PromptTemplate
51
+ from langchain.chains import ReduceDocumentsChain, MapReduceDocumentsChain
52
+ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
53
+ import torch
54
+ import transformers
55
+ from transformers import BitsAndBytesConfig
56
+ from transformers import AutoTokenizer, AutoModelForCausalLM
57
+
58
+ quantization_config = BitsAndBytesConfig(
59
+ load_in_4bit=True,
60
+ bnb_4bit_compute_dtype=torch.float16,
61
+ bnb_4bit_quant_type="nf4",
62
+ bnb_4bit_use_double_quant=True,
63
+ )
64
+
65
+ # model = "nomic-ai/gpt4all-falcon"
66
+ model = "tiiuae/falcon-7b-instruct"
67
+
68
+ tokenizer = AutoTokenizer.from_pretrained(model,trust_remote_code=True,)
69
+ model = AutoModelForCausalLM.from_pretrained(model,
70
+ # trust_remote_code=True,
71
+ quantization_config=quantization_config,
72
+ )
73
+
74
+ from langchain import HuggingFacePipeline
75
+ import torch
76
+
77
+ pipeline = transformers.pipeline(
78
+ "text-generation",
79
+ model=model,
80
+ tokenizer=tokenizer,
81
+ torch_dtype=torch.bfloat16,
82
+ device_map="auto",
83
+ max_new_tokens = 150,
84
+ pad_token_id=tokenizer.eos_token_id,
85
+ # device=-1,
86
+ )
87
+
88
+ llm = HuggingFacePipeline(pipeline=pipeline)
89
+
90
+ pipeline2 = transformers.pipeline(
91
+ "text-generation",
92
+ model=model,
93
+ tokenizer=tokenizer,
94
+ torch_dtype=torch.bfloat16,
95
+ device_map="auto",
96
+ max_new_tokens = 250,
97
+ pad_token_id=tokenizer.eos_token_id,
98
+ repetition_penalty= 2.0,
99
+ # device=-1,
100
+ )
101
+
102
+ llm2 = HuggingFacePipeline(pipeline=pipeline2)
103
+
104
+ # Map
105
+ map_template = """
106
+ Summarize the following text in a clear and concise way:
107
+ TITLE: `{title}`
108
+ TEXT:`{docs}`
109
+ Brief Summary:
110
+ """
111
+ map_prompt = PromptTemplate(template = map_template,
112
+ input_variables = ['title','docs'])
113
+ map_chain = LLMChain(llm=llm, prompt=map_prompt)
114
+
115
+ # Reduce - Collapse
116
+ reduce_template = """
117
+ The following is set of partial summaries of a video titled {title}:
118
+ partial summaries: {doc_summaries}
119
+ Take these and distill them into a consolidated summary.
120
+ Summary:
121
+ """
122
+
123
+ reduce_prompt = PromptTemplate(template = reduce_template,
124
+ input_variables = ['title','doc_summaries'])
125
+ reduce_chain = LLMChain(llm=llm, prompt=reduce_prompt)
126
+
127
+ # Takes a list of documents, combines them into a single string, and passes this to an LLMChain
128
+ collapse_documents_chain = StuffDocumentsChain(
129
+ llm_chain=reduce_chain, document_variable_name="doc_summaries"
130
+ )
131
+
132
+ # Final Reduce - Combine
133
+ final_reduce_template = """
134
+ The following is set of partial summaries of a video titled '{title}':
135
+ partial summaries:
136
+
137
+ {doc_summaries}
138
+
139
+ Generate a summary of the whole text that includes `Video Subject`, and the `Key Highlights` as maximum 10 pullet points listing the main facts, arguments, or points:
140
+ """
141
+ final_reduce_prompt = PromptTemplate(template = final_reduce_template,
142
+ input_variables = ['title','doc_summaries'])
143
+ final_reduce_chain = LLMChain(llm=llm2, prompt=final_reduce_prompt)
144
+
145
+ # Takes a list of documents, combines them into a single string, and passes this to an LLMChain
146
+ combine_documents_chain = StuffDocumentsChain(
147
+ llm_chain=final_reduce_chain, document_variable_name="doc_summaries"
148
+ )
149
+
150
+ # Combines and iteravely reduces the mapped documents
151
+ reduce_documents_chain = ReduceDocumentsChain(
152
+ # This is final chain that is called.
153
+ combine_documents_chain=combine_documents_chain,
154
+ # If documents exceed context for `StuffDocumentsChain`
155
+ collapse_documents_chain=collapse_documents_chain,
156
+ # The maximum number of tokens to group documents into.
157
+ token_max=500,
158
+ )
159
+
160
+ # Combining documents by mapping a chain over them, then combining results
161
+ map_reduce_chain = MapReduceDocumentsChain(
162
+ # Map chain
163
+ llm_chain=map_chain,
164
+ # Reduce chain
165
+ reduce_documents_chain=reduce_documents_chain,
166
+ # The variable name in the llm_chain to put the documents in
167
+ document_variable_name="docs",
168
+ # Return the results of the map steps in the output
169
+ return_intermediate_steps=False,
170
+ )
171
+
172
+ from langchain.document_loaders import TextLoader
173
+ from langchain.text_splitter import TokenTextSplitter
174
+
175
+ with open('./transcript.txt','w') as f:
176
+ f.write(text)
177
+ loader = TextLoader("./transcript.txt")
178
+ doc = loader.load()
179
+ text_splitter = TokenTextSplitter(chunk_size=500, chunk_overlap=0)
180
+ docs = text_splitter.split_documents(doc)
181
+
182
+ summary = map_reduce_chain.run({'input_documents':docs, 'title':title})
183
+
184
+ del(llm)
185
+ del(llm2)
186
+ del(model)
187
+ del(tokenizer)
188
+ torch.cuda.empty_cache()
189
+
190
+ return summary
191
+
192
+ import gradio as gr
193
+ import pytube
194
+ from pytube import YouTube
195
+
196
+ def get_youtube_title(url):
197
+ yt = YouTube(str(url))
198
+ return yt.title
199
+
200
+ def get_video(url):
201
+ vid_id = pytube.extract.video_id(url)
202
+ embed_html = '<iframe width="100%" height="315" src="https://www.youtube.com/embed/{}" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'.format(vid_id)
203
+ return embed_html
204
+
205
+ def summarize_youtube_video(url,force_transcribe):
206
+ title,text = transcribe_youtube_video(url,force_transcribe)
207
+ Summary = summarize_text(title,text)
208
+ return Summary
209
+
210
+ html = '<iframe width="100%" height="315" src="https://www.youtube.com/embed/" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
211
+
212
+ with gr.Blocks() as demo:
213
+ # gr.Markdown("Transribe a YouTube video using this demo.")
214
+ with gr.Row():
215
+ with gr.Column(scale=3):
216
+ url = gr.Textbox(label="Enter YouTube video URL here:",placeholder="https://www.youtube.com/watch?v=")
217
+ force_transcribe = gr.Checkbox(label="Transcribe even if transcription is available.")
218
+ with gr.Column(scale=1):
219
+ gr.Markdown("# Summarize a YouTube video using this demo!",scale=2)
220
+ sum_btn = gr.Button("Summarize!",scale=1)
221
+ title = gr.Textbox(label="Video Title",placeholder="title...")
222
+ with gr.Row():
223
+ video = gr.HTML(html)
224
+ output = gr.Textbox(label="Summary",placeholder="summary...")
225
+ sum_btn.click(fn=get_youtube_title, inputs=url, outputs=title, api_name="get_youtube_title")
226
+ sum_btn.click(fn=summarize_youtube_video, inputs=[url,force_transcribe], outputs=output, api_name="summarize_youtube_video", queue=True)
227
+ sum_btn.click(fn=get_video, inputs=url, outputs=video, api_name="get_youtube_video",queue=False)
228
+
229
+ demo.queue()
230
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file may be used to create an environment using:
2
+ # $ conda create --name <env> --file <this file>
3
+ # platform: win-64
4
+ abseil-cpp=20211102.0=h0e60522_0
5
+ accelerate=0.22.0=pypi_0
6
+ aiofiles=23.2.1=pypi_0
7
+ aiohttp=3.8.3=py38h2bbff1b_0
8
+ aiosignal=1.3.1=pyhd8ed1ab_0
9
+ altair=5.1.1=pypi_0
10
+ anyio=3.7.1=pypi_0
11
+ arrow-cpp=11.0.0=ha81ea56_2
12
+ astroid=2.15.8=pypi_0
13
+ asttokens=2.2.1=pyhd8ed1ab_0
14
+ async-timeout=4.0.3=pyhd8ed1ab_0
15
+ attrs=23.1.0=pyh71513ae_1
16
+ audioread=3.0.1=pypi_0
17
+ aws-c-common=0.6.8=h2bbff1b_1
18
+ aws-c-event-stream=0.1.6=hd77b12b_6
19
+ aws-checksums=0.1.11=h2bbff1b_2
20
+ aws-sdk-cpp=1.8.185=hd77b12b_1
21
+ backcall=0.2.0=pyh9f0ad1d_0
22
+ backports=1.0=pyhd8ed1ab_3
23
+ backports.functools_lru_cache=1.6.5=pyhd8ed1ab_0
24
+ bitsandbytes=0.41.1=pypi_0
25
+ blas=1.0=mkl
26
+ boost-cpp=1.78.0=h5b4e17d_0
27
+ bottleneck=1.3.5=py38h080aedc_0
28
+ brotlipy=0.7.0=py38h2bbff1b_1003
29
+ bzip2=1.0.8=h8ffe710_4
30
+ c-ares=1.19.0=h2bbff1b_0
31
+ ca-certificates=2023.7.22=h56e8100_0
32
+ cachetools=5.3.1=pyhd8ed1ab_0
33
+ certifi=2023.7.22=pyhd8ed1ab_0
34
+ cffi=1.15.1=py38h2bbff1b_3
35
+ charset-normalizer=2.0.4=pyhd3eb1b0_0
36
+ click=8.1.7=win_pyh7428d3b_0
37
+ colorama=0.4.6=pyhd8ed1ab_0
38
+ comm=0.1.4=pyhd8ed1ab_0
39
+ contourpy=1.1.1=pypi_0
40
+ cryptography=41.0.2=py38hac1b9e3_0
41
+ cuda-cccl=12.2.140=0
42
+ cuda-cudart=11.8.89=0
43
+ cuda-cudart-dev=11.8.89=0
44
+ cuda-cupti=11.8.87=0
45
+ cuda-libraries=11.8.0=0
46
+ cuda-libraries-dev=11.8.0=0
47
+ cuda-nvrtc=11.8.89=0
48
+ cuda-nvrtc-dev=11.8.89=0
49
+ cuda-nvtx=11.8.86=0
50
+ cuda-profiler-api=12.2.140=0
51
+ cuda-runtime=11.8.0=0
52
+ cycler=0.12.0=pypi_0
53
+ dataclasses=0.8=pyhc8e2a94_3
54
+ dataclasses-json=0.5.7=pyhd8ed1ab_0
55
+ datasets=2.14.4=pyhd8ed1ab_0
56
+ debugpy=1.6.7=py38hd77b12b_0
57
+ decorator=5.1.1=pyhd8ed1ab_0
58
+ dill=0.3.7=pyhd8ed1ab_0
59
+ einops=0.6.1=pypi_0
60
+ exceptiongroup=1.1.3=pypi_0
61
+ executing=1.2.0=pyhd8ed1ab_0
62
+ faiss=1.7.4=py38h7a05997_0_cpu
63
+ fastapi=0.103.2=pypi_0
64
+ ffmpeg=4.3.1=ha925a31_0
65
+ ffmpeg-python=0.2.0=pypi_0
66
+ ffmpy=0.3.1=pypi_0
67
+ filelock=3.9.0=py38haa95532_0
68
+ fonttools=4.43.0=pypi_0
69
+ freetype=2.12.1=ha860e81_0
70
+ frozenlist=1.3.3=py38h2bbff1b_0
71
+ fsspec=2023.6.0=pyh1a96a4e_0
72
+ future=0.18.3=pypi_0
73
+ gflags=2.2.2=ha925a31_1004
74
+ giflib=5.2.1=h8cc25b3_3
75
+ glog=0.5.0=h4797de2_0
76
+ google-api-core=2.11.1=pyhd8ed1ab_0
77
+ google-auth=2.22.0=pyh1a96a4e_0
78
+ googleapis-common-protos=1.60.0=pyhd8ed1ab_0
79
+ gpt4all=1.0.9=pypi_0
80
+ gradio=3.45.2=pypi_0
81
+ gradio-client=0.5.3=pypi_0
82
+ greenlet=2.0.1=py38hd77b12b_0
83
+ grpc-cpp=1.48.2=hfe90ff0_1
84
+ h11=0.14.0=pypi_0
85
+ httpcore=0.18.0=pypi_0
86
+ httpx=0.25.0=pypi_0
87
+ huggingface_hub=0.16.4=pyhd8ed1ab_0
88
+ idna=3.4=py38haa95532_0
89
+ importlib-metadata=6.8.0=pyha770c72_0
90
+ importlib-resources=6.1.0=pypi_0
91
+ importlib_metadata=6.8.0=hd8ed1ab_0
92
+ intel-openmp=2023.1.0=h59b6b97_46319
93
+ ipykernel=6.25.1=pyh6817e22_0
94
+ ipython=8.12.0=pyh08f2357_0
95
+ isort=5.12.0=pypi_0
96
+ jedi=0.19.0=pyhd8ed1ab_0
97
+ jinja2=3.1.2=py38haa95532_0
98
+ joblib=1.3.2=pyhd8ed1ab_0
99
+ jpeg=9e=h2bbff1b_1
100
+ jsonschema=4.19.1=pypi_0
101
+ jsonschema-specifications=2023.7.1=pypi_0
102
+ jupyter_client=8.3.1=pyhd8ed1ab_0
103
+ jupyter_core=5.3.1=py38haa244fe_0
104
+ kiwisolver=1.4.5=pypi_0
105
+ langchain=0.0.277=pyhd8ed1ab_0
106
+ langsmith=0.0.27=pyhd8ed1ab_0
107
+ lazy-loader=0.3=pypi_0
108
+ lazy-object-proxy=1.9.0=pypi_0
109
+ lerc=3.0=hd77b12b_0
110
+ libblas=3.9.0=1_h8933c1f_netlib
111
+ libbrotlicommon=1.0.9=h8ffe710_7
112
+ libbrotlidec=1.0.9=h8ffe710_7
113
+ libbrotlienc=1.0.9=h8ffe710_7
114
+ libcublas=11.11.3.6=0
115
+ libcublas-dev=11.11.3.6=0
116
+ libcufft=10.9.0.58=0
117
+ libcufft-dev=10.9.0.58=0
118
+ libcurand=10.3.3.141=0
119
+ libcurand-dev=10.3.3.141=0
120
+ libcurl=8.1.1=h86230a5_0
121
+ libcusolver=11.4.1.48=0
122
+ libcusolver-dev=11.4.1.48=0
123
+ libcusparse=11.7.5.86=0
124
+ libcusparse-dev=11.7.5.86=0
125
+ libdeflate=1.17=h2bbff1b_0
126
+ libevent=2.1.10=h91dae50_4
127
+ libfaiss=1.7.4=hba6d9cf_0_cpu
128
+ libfaiss-avx2=1.7.4=h1234567_0_cpu
129
+ libffi=3.4.4=hd77b12b_0
130
+ liblapack=3.9.0=5_hd5c7e75_netlib
131
+ libnpp=11.8.0.86=0
132
+ libnpp-dev=11.8.0.86=0
133
+ libnvjpeg=11.9.0.86=0
134
+ libnvjpeg-dev=11.9.0.86=0
135
+ libpng=1.6.39=h8cc25b3_0
136
+ libprotobuf=3.20.3=h23ce68f_0
137
+ librosa=0.10.1=pypi_0
138
+ libsodium=1.0.18=h8d14728_1
139
+ libssh2=1.10.0=he2ea4bf_2
140
+ libthrift=0.15.0=h4364b78_2
141
+ libtiff=4.5.0=h6c2663c_2
142
+ libuv=1.44.2=h2bbff1b_0
143
+ libwebp=1.2.4=hbc33d0d_1
144
+ libwebp-base=1.2.4=h2bbff1b_1
145
+ llvmlite=0.41.0=pypi_0
146
+ lz4-c=1.9.4=h2bbff1b_0
147
+ m2w64-gcc-libgfortran=5.3.0=6
148
+ m2w64-gcc-libs=5.3.0=7
149
+ m2w64-gcc-libs-core=5.3.0=7
150
+ m2w64-gmp=6.1.0=2
151
+ m2w64-libwinpthread-git=5.0.0.4634.697f757=2
152
+ markupsafe=2.1.1=py38h2bbff1b_0
153
+ marshmallow=3.20.1=pyhd8ed1ab_0
154
+ marshmallow-enum=1.5.1=pyh9f0ad1d_3
155
+ matplotlib=3.7.3=pypi_0
156
+ matplotlib-inline=0.1.6=pyhd8ed1ab_0
157
+ mccabe=0.7.0=pypi_0
158
+ mkl=2023.1.0=h6b88ed4_46357
159
+ mkl-service=2.4.0=py38h2bbff1b_1
160
+ mkl_fft=1.3.6=py38hf11a4ad_1
161
+ mkl_random=1.2.2=py38hf11a4ad_1
162
+ mpmath=1.3.0=py38haa95532_0
163
+ msgpack=1.0.7=pypi_0
164
+ msys2-conda-epoch=20160418=1
165
+ multidict=6.0.2=py38h2bbff1b_0
166
+ multiprocess=0.70.15=py38haa95532_0
167
+ mypy_extensions=1.0.0=pyha770c72_0
168
+ nest-asyncio=1.5.6=pyhd8ed1ab_0
169
+ networkx=2.8.8=pyhd8ed1ab_0
170
+ nltk=3.8.1=pypi_0
171
+ numba=0.58.0=pypi_0
172
+ numexpr=2.8.4=py38h7b80656_1
173
+ numpy=1.24.3=py38h79a8e48_1
174
+ numpy-base=1.24.3=py38h8a87ada_1
175
+ openapi-schema-pydantic=1.2.4=pyhd8ed1ab_0
176
+ openssl=3.1.2=hcfcfb64_0
177
+ orc=1.7.4=h623e30f_1
178
+ orjson=3.9.7=pypi_0
179
+ packaging=23.1=pyhd8ed1ab_0
180
+ pandas=2.0.3=py38h4ed8f06_0
181
+ parso=0.8.3=pyhd8ed1ab_0
182
+ pickleshare=0.7.5=py_1003
183
+ pillow=9.4.0=py38hd77b12b_0
184
+ pip=23.2.1=py38haa95532_0
185
+ pkgutil-resolve-name=1.3.10=pypi_0
186
+ platformdirs=3.10.0=py38haa95532_0
187
+ pooch=1.7.0=pypi_0
188
+ prompt-toolkit=3.0.39=pyha770c72_0
189
+ prompt_toolkit=3.0.39=hd8ed1ab_0
190
+ protobuf=3.20.3=py38haa244fe_1
191
+ psutil=5.9.0=py38h2bbff1b_0
192
+ pure_eval=0.2.2=pyhd8ed1ab_0
193
+ pyarrow=11.0.0=py38h790e06d_1
194
+ pyasn1=0.4.8=py_0
195
+ pyasn1-modules=0.2.7=py_0
196
+ pycparser=2.21=pyhd3eb1b0_0
197
+ pydantic=1.10.8=py38h2bbff1b_0
198
+ pydub=0.25.1=pypi_0
199
+ pygments=2.16.1=pyhd8ed1ab_0
200
+ pylint=2.17.6=pypi_0
201
+ pyopenssl=23.2.0=py38haa95532_0
202
+ pyparsing=3.1.1=pypi_0
203
+ pysocks=1.7.1=py38haa95532_0
204
+ python=3.8.17=h1aa4202_0
205
+ python-dateutil=2.8.2=pyhd8ed1ab_0
206
+ python-multipart=0.0.6=pypi_0
207
+ python-tzdata=2023.3=pyhd8ed1ab_0
208
+ python-xxhash=2.0.2=py38h2bbff1b_1
209
+ python_abi=3.8=2_cp38
210
+ pytorch=2.0.1=py3.8_cuda11.8_cudnn8_0
211
+ pytorch-cuda=11.8=h24eeafa_5
212
+ pytorch-mutex=1.0=cuda
213
+ pytube=15.0.0=pypi_0
214
+ pytz=2023.3=pyhd8ed1ab_0
215
+ pyu2f=0.1.5=pyhd8ed1ab_0
216
+ pywin32=305=py38h2bbff1b_0
217
+ pyyaml=6.0=py38h294d835_4
218
+ pyzmq=23.2.1=py38h09162b1_0
219
+ re2=2022.04.01=h0e60522_0
220
+ referencing=0.30.2=pypi_0
221
+ regex=2022.7.9=py38h2bbff1b_0
222
+ requests=2.31.0=py38haa95532_0
223
+ rpds-py=0.10.3=pypi_0
224
+ rsa=4.9=pyhd8ed1ab_0
225
+ sacremoses=0.0.53=pyhd8ed1ab_0
226
+ safetensors=0.3.2=py38h062c2fa_0
227
+ scikit-learn=1.3.0=pypi_0
228
+ scipy=1.10.1=pypi_0
229
+ semantic-version=2.10.0=pypi_0
230
+ sentence-transformers=2.2.2=pypi_0
231
+ sentencepiece=0.1.99=pypi_0
232
+ setuptools=68.0.0=py38haa95532_0
233
+ six=1.16.0=pyh6c4a22f_0
234
+ snappy=1.1.9=h6c2663c_0
235
+ sniffio=1.3.0=pypi_0
236
+ soundfile=0.12.1=pypi_0
237
+ soxr=0.3.6=pypi_0
238
+ sqlalchemy=1.4.39=py38h2bbff1b_0
239
+ sqlite=3.41.2=h2bbff1b_0
240
+ stack_data=0.6.2=pyhd8ed1ab_0
241
+ starlette=0.27.0=pypi_0
242
+ stringcase=1.2.0=py_0
243
+ sympy=1.12=pyh04b8f61_3
244
+ tbb=2021.8.0=h59b6b97_0
245
+ tenacity=8.2.3=pyhd8ed1ab_0
246
+ threadpoolctl=3.2.0=pypi_0
247
+ tiktoken=0.4.0=pypi_0
248
+ tk=8.6.12=h2bbff1b_0
249
+ tokenizers=0.13.2=py38h49fca51_1
250
+ tomli=2.0.1=pypi_0
251
+ tomlkit=0.12.1=pypi_0
252
+ toolz=0.12.0=pypi_0
253
+ torchaudio=2.0.2=pypi_0
254
+ torchvision=0.15.2=pypi_0
255
+ tornado=6.2=py38h294d835_0
256
+ tqdm=4.66.1=pyhd8ed1ab_0
257
+ traitlets=5.9.0=pyhd8ed1ab_0
258
+ transformers=4.33.3=pypi_0
259
+ typing-extensions=4.7.1=py38haa95532_0
260
+ typing_extensions=4.7.1=py38haa95532_0
261
+ typing_inspect=0.9.0=pyhd8ed1ab_0
262
+ ucrt=10.0.22621.0=h57928b3_0
263
+ urllib3=1.26.16=py38haa95532_0
264
+ utf8proc=2.6.1=h2bbff1b_0
265
+ uvicorn=0.23.2=pypi_0
266
+ vc=14.2=h21ff451_1
267
+ vc14_runtime=14.36.32532=hfdfe4a8_17
268
+ vs2015_runtime=14.36.32532=h05e6639_17
269
+ wcwidth=0.2.6=pyhd8ed1ab_0
270
+ websockets=11.0.3=pypi_0
271
+ wheel=0.38.4=py38haa95532_0
272
+ win_inet_pton=1.1.0=py38haa95532_0
273
+ wrapt=1.15.0=pypi_0
274
+ xxhash=0.8.0=h2bbff1b_3
275
+ xz=5.4.2=h8cc25b3_0
276
+ yaml=0.2.5=h8ffe710_2
277
+ yarl=1.7.2=py38h294d835_2
278
+ youtube-transcript-api=0.6.1=pypi_0
279
+ zeromq=4.3.4=h0e60522_1
280
+ zipp=3.16.2=pyhd8ed1ab_0
281
+ zlib=1.2.13=h8cc25b3_0
282
+ zstd=1.5.5=hd43e919_0