awacke1 commited on
Commit
b517657
β€’
1 Parent(s): be12ecd

Create backup-1-app.py

Browse files
Files changed (1) hide show
  1. backup-1-app.py +297 -0
backup-1-app.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import os
4
+ import urllib
5
+ import base64
6
+ from bs4 import BeautifulSoup
7
+ import hashlib
8
+ import json
9
+ import uuid
10
+ import glob
11
+ import zipfile
12
+
13
+ EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
14
+ URLS = {
15
+ "Lumiere": "https://lumiere-video.github.io/",
16
+ "National Library of Medicine": "https://www.nlm.nih.gov/",
17
+ "World Health Organization": "https://www.who.int/",
18
+ "UHCProvider - United Health and Optum": "https://www.uhcprovider.com/",
19
+ "CMS - Centers for Medicare & Medicaid Services": "https://www.cms.gov/",
20
+ "Mayo Clinic": "https://www.mayoclinic.org/",
21
+ "WebMD": "https://www.webmd.com/",
22
+ "MedlinePlus": "https://medlineplus.gov/",
23
+ "Healthline": "https://www.healthline.com/",
24
+ "CDC - Centers for Disease Control and Prevention": "https://www.cdc.gov/",
25
+ "Johns Hopkins Medicine": "https://www.hopkinsmedicine.org/"
26
+ }
27
+
28
+ if not os.path.exists("history.json"):
29
+ with open("history.json", "w") as f:
30
+ json.dump({}, f)
31
+
32
+ import os
33
+ import base64
34
+ import zipfile
35
+ import streamlit as st
36
+
37
+ def zip_subdirs(start_dir):
38
+ for subdir, dirs, files in os.walk(start_dir):
39
+ if subdir != start_dir: # Skip the root directory
40
+ zip_filename = os.path.join(start_dir, subdir.split(os.sep)[-1] + '.zip')
41
+ allFileSummary = ""
42
+ with zipfile.ZipFile(zip_filename, 'w') as zipf:
43
+ for file in files:
44
+ file_path = os.path.join(subdir, file)
45
+ zipf.write(file_path, os.path.relpath(file_path, start_dir))
46
+ allFileSummary=allFileSummary+(f"Added: {file_path}")
47
+ st.write(allFileSummary)
48
+ yield zip_filename
49
+
50
+ def get_zip_download_link(zip_file):
51
+ with open(zip_file, 'rb') as f:
52
+ bytes = f.read()
53
+ b64 = base64.b64encode(bytes).decode()
54
+ link_name = os.path.basename(zip_file)
55
+ href = f'<a href="data:file/zip;base64,{b64}" download="{link_name}">Download: {link_name}</a>'
56
+ return href
57
+
58
+
59
+ @st.cache_resource
60
+ def create_zip_of_files(files):
61
+ zip_name = "all_files.zip"
62
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
63
+ for file in files:
64
+ zipf.write(file)
65
+ return zip_name
66
+
67
+ @st.cache_resource
68
+ def get_zip_download_link(zip_file):
69
+ with open(zip_file, 'rb') as f:
70
+ data = f.read()
71
+ b64 = base64.b64encode(data).decode()
72
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
73
+ return href
74
+
75
+
76
+
77
+ def download_file(url, local_filename):
78
+ if url.startswith('http://') or url.startswith('https://'):
79
+ try:
80
+ with requests.get(url, stream=True) as r:
81
+ r.raise_for_status()
82
+ with open(local_filename, 'wb') as f:
83
+ for chunk in r.iter_content(chunk_size=8192):
84
+ f.write(chunk)
85
+ return local_filename
86
+ except requests.exceptions.HTTPError as err:
87
+ print(f"HTTP error occurred: {err}")
88
+
89
+ def download_html_and_files(url, subdir):
90
+ html_content = requests.get(url).text
91
+ soup = BeautifulSoup(html_content, 'html.parser')
92
+ base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
93
+
94
+ for link in soup.find_all('a'):
95
+ file_url = urllib.parse.urljoin(base_url, link.get('href'))
96
+ local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])
97
+
98
+ if not local_filename.endswith('/') and local_filename != subdir:
99
+ link['href'] = local_filename
100
+ download_file(file_url, local_filename)
101
+
102
+ with open(os.path.join(subdir, "index.html"), "w") as file:
103
+ file.write(str(soup))
104
+
105
+ def list_files(directory_path='.'):
106
+ files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
107
+ return [f for f in files if f not in EXCLUDED_FILES]
108
+
109
+ def file_editor(file_path):
110
+ st.write(f"Editing File: {os.path.basename(file_path)}")
111
+ file_content = ""
112
+
113
+ with open(file_path, "r") as f:
114
+ file_content = f.read()
115
+
116
+ file_content = st.text_area("Edit the file content:", value=file_content, height=250)
117
+
118
+ if st.button("πŸ’Ύ Save"):
119
+ with open(file_path, "w") as f:
120
+ f.write(file_content)
121
+ st.success(f"File '{os.path.basename(file_path)}' saved!")
122
+
123
+
124
+ def show_file_operations(file_path, sequence_number):
125
+ #st.write(f"File: {os.path.basename(file_path)}")
126
+ unique_key = hashlib.md5(file_path.encode()).hexdigest()
127
+ file_content = ""
128
+
129
+ col01, col02, col1, col2, col3 = st.columns(5)
130
+ with col01:
131
+ st.write(os.path.basename(file_path))
132
+ #with col02:
133
+ #st.write(file_path)
134
+ with col1:
135
+ edit_key = f"edit_{unique_key}_{sequence_number}"
136
+ if st.button(f"✏️ Edit", key=edit_key):
137
+ with open(file_path, "r") as f:
138
+ file_content = f.read()
139
+ text_area_key = f"text_area_{unique_key}_{sequence_number}"
140
+ file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=text_area_key)
141
+
142
+ with col2:
143
+ save_key = f"save_{unique_key}_{sequence_number}"
144
+ if st.button(f"πŸ’Ύ Save", key=save_key):
145
+ if file_content: # Ensure file_content is not empty
146
+ with open(file_path, "w") as f:
147
+ f.write(file_content)
148
+ st.success(f"File saved!")
149
+
150
+ with col3:
151
+ delete_key = f"delete_{unique_key}_{sequence_number}"
152
+ if st.button(f"πŸ—‘οΈ Delete", key=delete_key):
153
+ os.remove(file_path)
154
+ st.markdown(f"File deleted!")
155
+
156
+
157
+ file_sequence_numbers = {}
158
+
159
+ def show_download_links(subdir):
160
+ global file_sequence_numbers
161
+ for file in list_files(subdir):
162
+ file_path = os.path.join(subdir, file)
163
+ if file_path not in file_sequence_numbers:
164
+ file_sequence_numbers[file_path] = 1
165
+ else:
166
+ file_sequence_numbers[file_path] += 1
167
+ sequence_number = file_sequence_numbers[file_path]
168
+
169
+ if os.path.isfile(file_path):
170
+ #st.markdown(get_download_link(file_path), unsafe_allow_html=True)
171
+ st.markdown(file_path, unsafe_allow_html=True) # faster than encapsulating file into base64 download link
172
+ show_file_operations(file_path, sequence_number)
173
+ else:
174
+ st.write(f"File not found: {file}")
175
+
176
+ def get_download_link(file):
177
+ with open(file, "rb") as f:
178
+ bytes = f.read()
179
+ b64 = base64.b64encode(bytes).decode()
180
+ href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Download: {os.path.basename(file)}</a>'
181
+ return href
182
+
183
+ def main():
184
+ st.sidebar.title('🌐 Web Datasets Bulk Downloader')
185
+
186
+
187
+
188
+ # Check for query parameters for file editing
189
+ #query_params = st.query_params()
190
+ query_params = st.experimental_get_query_params()
191
+
192
+ file_to_edit = query_params.get('file_to_edit', [None])[0]
193
+
194
+ if file_to_edit and os.path.exists(file_to_edit):
195
+ file_editor(file_to_edit)
196
+ else:
197
+ # Selecting URL input method
198
+
199
+ # Selecting URL input method
200
+ url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"], index=1)
201
+ url = ""
202
+ if url_input_method == "Enter URL":
203
+ url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
204
+ else:
205
+ selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()), index=0)
206
+ url = URLS[selected_site]
207
+
208
+
209
+ #url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"])
210
+ #url = ""
211
+ #if url_input_method == "Enter URL":
212
+ # url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
213
+ #else:
214
+ # selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()))
215
+ # url = URLS[selected_site]
216
+
217
+ # Reading or creating history.json
218
+ if not os.path.exists("history.json"):
219
+ with open("history.json", "w") as f:
220
+ json.dump({}, f)
221
+
222
+ with open("history.json", "r") as f:
223
+ try:
224
+ history = json.load(f)
225
+ except:
226
+ print('error')
227
+
228
+ # Handling URL submission
229
+ if url:
230
+ subdir = hashlib.md5(url.encode()).hexdigest()
231
+ if not os.path.exists(subdir):
232
+ os.makedirs(subdir)
233
+ if url not in history:
234
+ history[url] = subdir
235
+ with open("history.json", "w") as f:
236
+ json.dump(history, f)
237
+
238
+
239
+ if st.sidebar.button('πŸ“₯ Get All the Content', help="Download content from the selected URL"):
240
+ download_html_and_files(url, history[url])
241
+ show_download_links(history[url])
242
+
243
+ if st.sidebar.button('πŸ“‚ Show Download Links', help="Show all available download links"):
244
+ for subdir in history.values():
245
+ show_download_links(subdir)
246
+
247
+
248
+ # Button for downloading content
249
+ #if st.sidebar.button('πŸ“₯ Get All the Content'):
250
+ # download_html_and_files(url, history[url])
251
+ # show_download_links(history[url])
252
+
253
+ # Button for showing download links
254
+ #if st.sidebar.button('πŸ“‚ Show Download Links'):
255
+ # for subdir in history.values():
256
+ # show_download_links(subdir)
257
+
258
+ if st.sidebar.button("πŸ—‘ Delete All", help="Delete all downloaded content"):
259
+ #if st.sidebar.button("πŸ—‘ Delete All"):
260
+ # Clear history file
261
+ with open("history.json", "w") as f:
262
+ json.dump({}, f)
263
+
264
+ # Delete all files in subdirectories
265
+ for subdir in glob.glob('*'):
266
+ if os.path.isdir(subdir) and subdir not in EXCLUDED_FILES:
267
+ for file in os.listdir(subdir):
268
+ file_path = os.path.join(subdir, file)
269
+ os.remove(file_path)
270
+ st.write(f"Deleted: {file_path}")
271
+ os.rmdir(subdir) # Remove the empty directory
272
+
273
+ st.experimental_rerun()
274
+
275
+ if st.sidebar.button("⬇️ Download All", help="Download all files in a zip"):
276
+ start_directory = '.' # Current directory
277
+ for zip_file in zip_subdirs(start_directory):
278
+ st.sidebar.markdown(zip_file, unsafe_allow_html=True)
279
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
280
+ #if st.sidebar.button("⬇️ Download All"):
281
+ # start_directory = '.' # Current directory
282
+ # for zip_file in zip_subdirs(start_directory):
283
+ # st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
284
+
285
+ # Expander for showing URL history and download links
286
+ with st.expander("URL History and Downloaded Files"):
287
+ try:
288
+ for url, subdir in history.items():
289
+ st.markdown(f"#### {url}")
290
+ # show_download_links(subdir)
291
+ except:
292
+ print('url history is empty')
293
+ # Update each time to show files we have
294
+ #for subdir in history.values():
295
+ # show_download_links(subdir)
296
+ if __name__ == "__main__":
297
+ main()