tktkdrrrrrrrrrrr commited on
Commit
152b0fe
·
verified ·
1 Parent(s): 6554632

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +22 -0
  2. main.py +525 -0
  3. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+ RUN apt update && apt install -y \
9
+ rclone \
10
+ aria2 \
11
+ zip
12
+
13
+ RUN useradd -m -u 1000 user
14
+ USER user
15
+ ENV HOME=/home/user \
16
+ PATH=/home/user/.local/bin:$PATH
17
+
18
+ WORKDIR $HOME/app
19
+
20
+ COPY --chown=user . $HOME/app
21
+
22
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import base64
3
+
4
+ # --- rclone.conf の復元処理 ---
5
+ rclone_conf_base64 = os.environ.get("RCLONE_CONF_BASE64")
6
+ if rclone_conf_base64:
7
+ conf_path = os.path.expanduser("~/.config/rclone/rclone.conf")
8
+ os.makedirs(os.path.dirname(conf_path), exist_ok=True)
9
+ with open(conf_path, "wb") as f:
10
+ f.write(base64.b64decode(rclone_conf_base64))
11
+ print(f"[INFO] Created rclone.conf at {conf_path}")
12
+ else:
13
+ print("[WARN] RCLONE_CONF_BASE64 not found; rclone may fail.")
14
+ # ------------------------------
15
+
16
+ import asyncio
17
+ import datetime
18
+ import json
19
+ import logging
20
+ import os
21
+ import re
22
+ import shutil
23
+ import subprocess
24
+ import time
25
+ import uuid
26
+ from typing import Optional
27
+
28
+ import requests
29
+ from bs4 import BeautifulSoup
30
+ from fake_useragent import UserAgent
31
+ from fastapi import FastAPI
32
+ from huggingface_hub import HfApi, hf_hub_download, login
33
+
34
+ # ロギングの設定
35
+ logging.basicConfig(level=logging.INFO)
36
+ logger = logging.getLogger(__name__)
37
+
38
+
39
+ class Config:
40
+ """設定用のクラス"""
41
+ HUGGINGFACE_API_KEY = os.environ["HUGGINGFACE_API_KEY"]
42
+ CIVITAI_API_TOKEN = os.environ["CIVITAI_API_TOKEN"]
43
+ LOG_FILE = "civitai_backup.log"
44
+ LIST_FILE = "model_list.log"
45
+ REPO_IDS = {
46
+ "log": "ttttdiva/CivitAI_log_test",
47
+ "model_list": "ttttdiva/CivitAI_model_info_test",
48
+ "current": ""
49
+ }
50
+ URLS = {
51
+ "latest": "https://civitai.com/api/v1/models?sort=Newest",
52
+ "modelPage": "https://civitai.com/models/",
53
+ "modelId": "https://civitai.com/api/v1/models/",
54
+ "modelVersionId": "https://civitai.com/api/v1/model-versions/",
55
+ "hash": "https://civitai.com/api/v1/model-versions/by-hash/"
56
+ }
57
+ JST = datetime.timezone(datetime.timedelta(hours=9))
58
+ UA = UserAgent()
59
+ HEADERS = {
60
+ 'Authorization': f'Bearer {CIVITAI_API_TOKEN}',
61
+ 'User-Agent': UA.random,
62
+ "Content-Type": "application/json"
63
+ }
64
+
65
+
66
+ class CivitAICrawler:
67
+ """CivitAIからモデルをダウンロードし、Hugging Faceにアップロードするクラス"""
68
+
69
+ def __init__(self, config: Config):
70
+ self.config = config
71
+ self.api = HfApi()
72
+ self.app = FastAPI()
73
+ self.repo_ids = self.config.REPO_IDS.copy()
74
+ self.jst = self.config.JST
75
+ self.setup_routes()
76
+
77
+ def setup_routes(self):
78
+ """FastAPIのルーティングを設定する。"""
79
+ @self.app.get("/")
80
+ def read_root():
81
+ now = str(datetime.datetime.now(self.jst))
82
+ description = f"""
83
+ CivitAIを定期的に周回し新規モデルを {self.repo_ids['current']} にバックアップするspaceです。
84
+ モデル一覧は https://huggingface.co/{self.repo_ids['model_list']}/blob/main/model_list.log を参照してください。
85
+ Status: {now} + currently running :D
86
+ """
87
+ return description
88
+
89
+ @self.app.on_event("startup")
90
+ async def startup_event():
91
+ asyncio.create_task(self.crawl())
92
+
93
+ @staticmethod
94
+ def get_filename_from_cd(content_disposition: Optional[str], default_name: str) -> str:
95
+ """Content-Dispositionヘッダーからファイル名を取得する。"""
96
+ if content_disposition:
97
+ parts = content_disposition.split(';')
98
+ for part in parts:
99
+ if "filename=" in part:
100
+ return part.split("=")[1].strip().strip('"')
101
+ return default_name
102
+
103
+ def download_file(self, url: str, destination_folder: str, default_name: str) -> Optional[str]:
104
+ """指定されたURLからファイルをダウンロードし、指定されたフォルダに保存する。"""
105
+ try:
106
+ response = requests.get(url, headers=self.config.HEADERS, stream=True)
107
+ response.raise_for_status()
108
+ except requests.RequestException as e:
109
+ logger.error(f"Failed to download file from {url}: {e}")
110
+ return None
111
+
112
+ filename = self.get_filename_from_cd(response.headers.get('content-disposition'), default_name)
113
+ file_path = os.path.join(destination_folder, filename)
114
+
115
+ # ダウンロードとファイル保存処理
116
+ with open(file_path, 'wb') as file:
117
+ for chunk in response.iter_content(chunk_size=8192):
118
+ file.write(chunk)
119
+ logger.info(f"Downloaded: {file_path}")
120
+ return file_path
121
+
122
+ def get_model_info(self, model_id: str) -> dict:
123
+ """モデルの情報を取得する。"""
124
+ try:
125
+ response = requests.get(self.config.URLS["modelId"] + str(model_id), headers=self.config.HEADERS)
126
+ response.raise_for_status()
127
+ return response.json()
128
+ except requests.RequestException as e:
129
+ logger.error(f"Failed to retrieve model info for ID {model_id}: {e}")
130
+ return {}
131
+
132
+ def download_model_files(self, model_versions: list, folder: str):
133
+ """最新のモデルバージョンと古いバージョンのファイルをまとめてダウンロード."""
134
+ for version in model_versions:
135
+ files_info = version.get("files", [])
136
+ for file_info in files_info:
137
+ download_url = file_info["downloadUrl"]
138
+ file_name = file_info["name"]
139
+ login_detected_count = 0
140
+
141
+ while login_detected_count < 5:
142
+ local_path = self.download_file(download_url, folder, file_name)
143
+ if local_path and "login" in os.listdir(folder):
144
+ # 万が一、ダウンロード先に "login" という謎ファイルが出た場合の再試行処理
145
+ login_detected_count += 1
146
+ os.remove(os.path.join(folder, "login"))
147
+ logger.warning(f"Detected 'login' file, retrying download: {file_name} ({login_detected_count}/5)")
148
+ else:
149
+ break
150
+
151
+ if login_detected_count >= 5:
152
+ # ダウンロード失敗を示すダミーファイルを作成
153
+ dummy_file_path = os.path.join(folder, f"{file_name}.download_failed")
154
+ try:
155
+ with open(dummy_file_path, "w") as f:
156
+ f.write("Download failed after 5 attempts.")
157
+ logger.error(f"Failed to download {file_name}. Created dummy file: {dummy_file_path}")
158
+ except Exception as e:
159
+ logger.error(f"Failed to create dummy file for {file_name}: {e}")
160
+
161
+ def download_images(self, model_versions: list, folder: str):
162
+ """画像を images フォルダにまとめてダウンロードする."""
163
+ images_folder = os.path.join(folder, "images")
164
+ os.makedirs(images_folder, exist_ok=True)
165
+
166
+ images = []
167
+ for version in model_versions:
168
+ for img in version.get("images", []):
169
+ image_url = img["url"]
170
+ images.append(image_url)
171
+
172
+ for image_url in images:
173
+ image_name = os.path.basename(image_url) # ファイル名部分
174
+ local_path = os.path.join(images_folder, image_name)
175
+ try:
176
+ resp = requests.get(image_url, stream=True)
177
+ resp.raise_for_status()
178
+ with open(local_path, 'wb') as imgf:
179
+ for chunk in resp.iter_content(chunk_size=8192):
180
+ imgf.write(chunk)
181
+ logger.info(f"Downloaded image: {local_path}")
182
+ except requests.RequestException as e:
183
+ logger.error(f"Failed to download image {image_url}: {e}")
184
+
185
+ def save_html_content(self, model_page_url: str, folder: str):
186
+ """モデルページのHTMLをフォルダ内に保存する."""
187
+ try:
188
+ resp = requests.get(model_page_url)
189
+ resp.raise_for_status()
190
+ html_path = os.path.join(folder, "page.html")
191
+ with open(html_path, 'w', encoding='utf-8') as f:
192
+ f.write(resp.text)
193
+ logger.info(f"Saved HTML: {html_path}")
194
+ except Exception as e:
195
+ logger.error(f"Error saving HTML content from {model_page_url}: {e}")
196
+
197
+ def save_model_info_json(self, model_info: dict, folder: str):
198
+ """モデル情報をJSONファイルとして保存."""
199
+ info_path = os.path.join(folder, "model_info.json")
200
+ try:
201
+ with open(info_path, 'w', encoding='utf-8') as f:
202
+ json.dump(model_info, f, indent=2)
203
+ logger.info(f"Saved model_info.json: {info_path}")
204
+ except Exception as e:
205
+ logger.error(f"Failed to save model info JSON: {e}")
206
+
207
+ def encrypt_and_upload_folder(self, local_folder: str):
208
+ """
209
+ 1. rcloneでフォルダ全体を暗号化 (フォルダ名含む)
210
+ 2. 暗号化されたフォルダをHugging Faceにアップロード
211
+ 3. ローカル削除
212
+ """
213
+ if not os.path.exists(local_folder):
214
+ logger.error(f"encrypt_and_upload_folder: folder not found: {local_folder}")
215
+ return
216
+
217
+ # 暗号化後のフォルダが生成されるベースパス (例: /app/encrypted)
218
+ encrypted_base_dir = "/app/encrypted"
219
+ os.makedirs(encrypted_base_dir, exist_ok=True)
220
+
221
+ # rclone実行前の /app/encrypted の状態を取得 (新規フォルダ検出用)
222
+ before_set = set(os.listdir(encrypted_base_dir))
223
+
224
+ # rcloneでフォルダごとコピー (ファイル名・フォルダ名ともに暗号化)
225
+ # ここで "cryptLocal:" は .rclone.conf 側で
226
+ # [cryptLocal]
227
+ # type = crypt
228
+ # remote = /app/encrypted
229
+ # filename_encryption = standard
230
+ # password = ****
231
+ # 等が設定されている想定
232
+ try:
233
+ subprocess.run(
234
+ ["rclone", "copy", local_folder, "cryptLocal:"],
235
+ check=True
236
+ )
237
+ except subprocess.CalledProcessError as e:
238
+ logger.error(f"rclone copy failed: {e}")
239
+ return
240
+
241
+ # rclone実行後の /app/encrypted の状態
242
+ after_set = set(os.listdir(encrypted_base_dir))
243
+ # 新しくできた暗号化フォルダを特定
244
+ new_folders = after_set - before_set
245
+ if not new_folders:
246
+ logger.error("No new encrypted folder found. Something went wrong.")
247
+ return
248
+
249
+ # 通常は1個のはずだが、複数あるなら先頭だけ使う
250
+ enc_folder_name = list(new_folders)[0]
251
+ enc_folder_path = os.path.join(encrypted_base_dir, enc_folder_name)
252
+
253
+ # Hugging Face上で、この暗号化フォルダをそのままアップロード
254
+ # => HF側もフォルダ名が暗号化された状態で表示されます
255
+ try:
256
+ # path_in_repo も同じ暗号化名を指定
257
+ self.upload_folder(enc_folder_path, path_in_repo=enc_folder_name)
258
+ logger.info(f"Uploaded encrypted folder to HF: {enc_folder_path}")
259
+ except Exception as e:
260
+ logger.error(f"Failed to upload encrypted folder {enc_folder_path}: {e}")
261
+
262
+ # ローカル削除(平文のフォルダ & 暗号化済みフォルダ)
263
+ try:
264
+ shutil.rmtree(local_folder)
265
+ shutil.rmtree(enc_folder_path)
266
+ logger.info(f"Removed local folder: {local_folder} and encrypted folder: {enc_folder_path}")
267
+ except Exception as e:
268
+ logger.error(f"Failed to remove local folders: {e}")
269
+
270
+ def upload_file(self, file_path: str, repo_id: Optional[str] = None, path_in_repo: Optional[str] = None):
271
+ """
272
+ 単一ファイルをアップロードするための関数
273
+ (今回はフォルダ丸ごとアップロードがメインだが、ログファイルなどは個別アップロード)
274
+ """
275
+ if repo_id is None:
276
+ repo_id = self.repo_ids['current']
277
+ if path_in_repo is None:
278
+ path_in_repo = os.path.basename(file_path)
279
+
280
+ max_retries = 5
281
+ attempt = 0
282
+ while attempt < max_retries:
283
+ try:
284
+ self.api.upload_file(
285
+ path_or_fileobj=file_path,
286
+ repo_id=repo_id,
287
+ path_in_repo=path_in_repo
288
+ )
289
+ logger.info(f"Uploaded file: {file_path} to {repo_id} at {path_in_repo}")
290
+ return
291
+ except Exception as e:
292
+ attempt += 1
293
+ error_message = str(e)
294
+ if "over the limit of 100000 files" in error_message:
295
+ logger.warning("File limit exceeded, creating a new repo.")
296
+ self.repo_ids['current'] = self.increment_repo_name(self.repo_ids['current'])
297
+ self.api.create_repo(repo_id=self.repo_ids['current'], private=True)
298
+ attempt = 0
299
+ continue
300
+ elif "you can retry this action in about 1 hour" in error_message:
301
+ logger.warning("Rate limit hit. Waiting 1 hour...")
302
+ time.sleep(3600)
303
+ attempt -= 1
304
+ else:
305
+ if attempt < max_retries:
306
+ logger.warning(f"Failed to upload {file_path}, retry {attempt}/{max_retries}")
307
+ else:
308
+ logger.error(f"Failed after {max_retries} attempts: {e}")
309
+ raise
310
+
311
+ def upload_folder(self, folder_path: str, path_in_repo: Optional[str] = None):
312
+ """
313
+ フォルダを Hugging Face リポジトリに一括アップロード
314
+ """
315
+ if path_in_repo is None:
316
+ path_in_repo = os.path.basename(folder_path)
317
+
318
+ max_retries = 5
319
+ attempt = 0
320
+ while attempt < max_retries:
321
+ try:
322
+ self.api.upload_folder(
323
+ folder_path=folder_path,
324
+ repo_id=self.repo_ids['current'],
325
+ path_in_repo=path_in_repo
326
+ )
327
+ logger.info(f"Uploaded folder: {folder_path} to {self.repo_ids['current']} at {path_in_repo}")
328
+ return
329
+ except Exception as e:
330
+ attempt += 1
331
+ error_message = str(e)
332
+ if "over the limit of 100000 files" in error_message:
333
+ logger.warning("File limit exceeded, creating a new repo.")
334
+ self.repo_ids['current'] = self.increment_repo_name(self.repo_ids['current'])
335
+ self.api.create_repo(repo_id=self.repo_ids['current'], private=True)
336
+ attempt = 0
337
+ continue
338
+ elif "you can retry this action in about 1 hour" in error_message:
339
+ logger.warning("Rate limit hit. Waiting 1 hour...")
340
+ time.sleep(3600)
341
+ attempt -= 1
342
+ else:
343
+ if attempt < max_retries:
344
+ logger.warning(f"Failed to upload folder {folder_path}, retry {attempt}/{max_retries}")
345
+ else:
346
+ logger.error(f"Failed after {max_retries} attempts: {e}")
347
+ raise
348
+
349
+ @staticmethod
350
+ def increment_repo_name(repo_id: str) -> str:
351
+ """リポジトリ名の末尾の数字をインクリメントする。"""
352
+ match = re.search(r'(\d+)$', repo_id)
353
+ if match:
354
+ number = int(match.group(1)) + 1
355
+ new_repo_id = re.sub(r'\d+$', str(number), repo_id)
356
+ else:
357
+ new_repo_id = f"{repo_id}1"
358
+ return new_repo_id
359
+
360
+ def read_model_list(self) -> dict:
361
+ """モデルリストを読み込む。"""
362
+ model_list = {}
363
+ try:
364
+ with open(self.config.LIST_FILE, "r", encoding="utf-8") as f:
365
+ for line in f:
366
+ line = line.strip()
367
+ if line:
368
+ parts = line.split(": ", 1)
369
+ if len(parts) == 2:
370
+ modelpage_name, model_hf_url = parts
371
+ model_list[model_hf_url] = modelpage_name
372
+ except Exception as e:
373
+ logger.error(f"Failed to read model list: {e}")
374
+ return model_list
375
+
376
+ def get_repo_info(self, repo_id):
377
+ """リポジトリの情報を取得する。"""
378
+ try:
379
+ repo_info = self.api.repo_info(repo_id=repo_id, files_metadata=True)
380
+ file_paths = [sibling.rfilename for sibling in repo_info.siblings]
381
+ return file_paths
382
+ except Exception as e:
383
+ logger.error(f"Failed to get repo info for {repo_id}: {e}")
384
+ return []
385
+
386
+ def process_model(self, model_url: str):
387
+ """1つのモデルをダウンロードしてフォルダ丸ごと暗号化&アップロードする."""
388
+ try:
389
+ # model_idを取得
390
+ model_id = model_url.rstrip("/").split("/")[-1]
391
+
392
+ # モデル情報を取得
393
+ model_info = self.get_model_info(model_id)
394
+ if not model_info or "modelVersions" not in model_info:
395
+ logger.error(f"No valid model info for ID {model_id}. Skipping.")
396
+ return
397
+
398
+ # バージョン一覧
399
+ versions = model_info["modelVersions"]
400
+ if not versions:
401
+ logger.warning(f"No modelVersions found for ID {model_id}.")
402
+ return
403
+
404
+ # フォルダ名として適当な名前をつける
405
+ # たとえばモデル名をベースにフォルダを作る(被り防止にUUIDを付与)
406
+ folder_name = model_info.get("name", "UnknownModel")
407
+ folder_name = re.sub(r'[\\/*?:"<>|]', '_', folder_name) # OSで使えない文字除去
408
+ folder_name += "_" + str(uuid.uuid4())[:8]
409
+ os.makedirs(folder_name, exist_ok=True)
410
+
411
+ # モデルファイルをダウンロード
412
+ self.download_model_files(versions, folder_name)
413
+
414
+ # 画像を images/ にダウンロード
415
+ self.download_images(versions, folder_name)
416
+
417
+ # HTMLを取得
418
+ model_page_url = f"{self.config.URLS['modelPage']}{model_id}"
419
+ self.save_html_content(model_page_url, folder_name)
420
+
421
+ # model_info.json保存
422
+ self.save_model_info_json(model_info, folder_name)
423
+
424
+ # ここでフォルダごと暗号化&アップロード
425
+ self.encrypt_and_upload_folder(folder_name)
426
+
427
+ # model_list.logに追記 (暗号化フォルダを直接参照するURLは分からないため、
428
+ # とりあえず元の modelPage名 とかモデルIDのメモを書くだけに留める)
429
+ hf_url_placeholder = f"https://huggingface.co/{self.repo_ids['current']}/tree/main/[ENCRYPTED_FOLDER]"
430
+ with open(self.config.LIST_FILE, "a", encoding="utf-8") as f:
431
+ f.write(f"{model_info.get('name', 'UnnamedModel')} (ID:{model_id}): {hf_url_placeholder}\n")
432
+
433
+ except Exception as e:
434
+ logger.error(f"Error in process_model ({model_url}): {e}")
435
+
436
+ async def crawl(self):
437
+ """モデルを定期的にチェックし、更新を行う。"""
438
+ while True:
439
+ try:
440
+ login(token=self.config.HUGGINGFACE_API_KEY, add_to_git_credential=True)
441
+
442
+ # model_list.logを最新化
443
+ model_list_path = hf_hub_download(
444
+ repo_id=self.repo_ids['model_list'],
445
+ filename=self.config.LIST_FILE
446
+ )
447
+ shutil.copyfile(model_list_path, f"./{self.config.LIST_FILE}")
448
+
449
+ # ログファイルを最新化
450
+ local_file_path = hf_hub_download(
451
+ repo_id=self.repo_ids["log"],
452
+ filename=self.config.LOG_FILE
453
+ )
454
+ shutil.copyfile(local_file_path, f"./{self.config.LOG_FILE}")
455
+
456
+ # ログを読み込み
457
+ with open(self.config.LOG_FILE, "r", encoding="utf-8") as file:
458
+ lines = file.read().splitlines()
459
+ old_models = json.loads(lines[0]) if len(lines) > 0 else []
460
+ self.repo_ids["current"] = lines[1] if len(lines) > 1 else ""
461
+
462
+ # 新着モデル確認
463
+ r = requests.get(self.config.URLS["latest"], headers=self.config.HEADERS)
464
+ r.raise_for_status()
465
+ latest_models = r.json().get("items", [])
466
+ latest_model_ids = [m["id"] for m in latest_models if "id" in m]
467
+
468
+ new_models = list(set(latest_model_ids) - set(old_models))
469
+ if new_models:
470
+ logger.info(f"New model IDs found: {new_models}")
471
+ model_id = new_models[0]
472
+
473
+ for attempt in range(1, 6):
474
+ try:
475
+ self.process_model(self.config.URLS["modelId"] + str(model_id))
476
+ break
477
+ except Exception as e:
478
+ logger.error(f"Failed to process model {model_id} (attempt {attempt}/5): {e}")
479
+ if attempt == 5:
480
+ logger.error(f"Skipping model {model_id} after 5 failures.")
481
+ else:
482
+ await asyncio.sleep(2)
483
+
484
+ # 新モデルをold_modelsに追加し、ログを更新
485
+ old_models.append(model_id)
486
+ with open(self.config.LOG_FILE, "w", encoding="utf-8") as f:
487
+ f.write(json.dumps(old_models) + "\n")
488
+ f.write(f"{self.repo_ids['current']}\n")
489
+ logger.info(f"Updated log with new model ID: {model_id}")
490
+
491
+ # ログファイル & model_list.logをアップロード
492
+ self.upload_file(
493
+ file_path=self.config.LOG_FILE,
494
+ repo_id=self.repo_ids["log"],
495
+ path_in_repo=self.config.LOG_FILE
496
+ )
497
+ self.upload_file(
498
+ file_path=self.config.LIST_FILE,
499
+ repo_id=self.repo_ids["model_list"],
500
+ path_in_repo=self.config.LIST_FILE
501
+ )
502
+ else:
503
+ # 新着なし → ログを最新化してアップロードだけして待機
504
+ with open(self.config.LOG_FILE, "w", encoding="utf-8") as f:
505
+ f.write(json.dumps(latest_model_ids) + "\n")
506
+ f.write(f"{self.repo_ids['current']}\n")
507
+ logger.info(f"No new models. Updated log: {self.config.LOG_FILE}")
508
+ self.upload_file(
509
+ file_path=self.config.LOG_FILE,
510
+ repo_id=self.repo_ids["log"],
511
+ path_in_repo=self.config.LOG_FILE
512
+ )
513
+ logger.info("Uploaded log file.")
514
+ await asyncio.sleep(60)
515
+ continue
516
+
517
+ except Exception as e:
518
+ logger.error(f"Error in crawl loop: {e}")
519
+ await asyncio.sleep(300)
520
+
521
+
522
+ # FastAPIアプリケーション
523
+ config = Config()
524
+ crawler = CivitAICrawler(config)
525
+ app = crawler.app
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.74.*
2
+ sentencepiece==0.1.*
3
+ torch==1.11.*
4
+ transformers==4.*
5
+ uvicorn[standard]==0.17.*
6
+ requests==2.27.*
7
+ beautifulsoup4
8
+ huggingface_hub
9
+ fake-useragent