shadowlilac commited on
Commit
7cc2be8
1 Parent(s): 290eda0
Files changed (4) hide show
  1. config.json +32 -0
  2. inference.ipynb +148 -0
  3. preprocessor_config.json +22 -0
  4. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "shadowlilac/anime-image-quality-v2",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 1536,
11
+ "id2label": {
12
+ "0": "hq",
13
+ "1": "lq"
14
+ },
15
+ "image_size": 1024,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4192,
18
+ "label2id": {
19
+ "hq": "0",
20
+ "lq": "1"
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 16,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 48,
27
+ "patch_size": 64,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.33.2"
32
+ }
inference.ipynb ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "collapsed_sections": [
8
+ "3xnrF3UB6ev0"
9
+ ],
10
+ "gpuType": "T4"
11
+ },
12
+ "kernelspec": {
13
+ "name": "python3",
14
+ "display_name": "Python 3"
15
+ },
16
+ "language_info": {
17
+ "name": "python"
18
+ },
19
+ "accelerator": "GPU"
20
+ },
21
+ "cells": [
22
+ {
23
+ "cell_type": "markdown",
24
+ "source": [
25
+ "# Model Inference"
26
+ ],
27
+ "metadata": {
28
+ "id": "33C47swS80_1"
29
+ }
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "source": [
34
+ "#@title Install Dependencies\n",
35
+ "!pip install transformers -q"
36
+ ],
37
+ "metadata": {
38
+ "cellView": "form",
39
+ "id": "noaoheUjvGbd"
40
+ },
41
+ "execution_count": 1,
42
+ "outputs": []
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {
48
+ "cellView": "form",
49
+ "id": "NZLqjuWEtCDy"
50
+ },
51
+ "outputs": [],
52
+ "source": [
53
+ "#@title Imports\n",
54
+ "import os\n",
55
+ "from transformers import pipeline\n",
56
+ "import shutil\n",
57
+ "from PIL import Image\n",
58
+ "import torch\n",
59
+ "pipe = pipeline(\"image-classification\", model=\"shadowlilac/aesthetic-shadow\", device=0)"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "source": [
65
+ "#@title Inference\n",
66
+ "\n",
67
+ "# Input image file\n",
68
+ "single_image_file = \"image_1.png\" #@param {type:\"string\"}\n",
69
+ "\n",
70
+ "result = pipe(images=[single_image_file])\n",
71
+ "\n",
72
+ "prediction_single = result[0]\n",
73
+ "print(\"Prediction: \" + str(round([p for p in prediction_single if p['label'] == 'hq'][0]['score'], 2)) + \"% High Quality\")\n",
74
+ "Image.open(single_image_file)"
75
+ ],
76
+ "metadata": {
77
+ "cellView": "form",
78
+ "id": "r1R-L2r-0uo2"
79
+ },
80
+ "execution_count": null,
81
+ "outputs": []
82
+ },
83
+ {
84
+ "cell_type": "markdown",
85
+ "source": [
86
+ "# Batch Mode"
87
+ ],
88
+ "metadata": {
89
+ "id": "3xnrF3UB6ev0"
90
+ }
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "source": [
95
+ "#@title Batch parameters\n",
96
+ "# Define the paths for the input folder and output folders\n",
97
+ "input_folder = \"input_folder\" #@param {type:\"string\"}\n",
98
+ "output_folder_hq = \"output_hq_folder\" #@param {type:\"string\"}\n",
99
+ "output_folder_lq = \"output_lq_folder\" #@param {type:\"string\"}\n",
100
+ "# Threshhold\n",
101
+ "batch_hq_threshold = 0.5 #@param {type:\"number\"}\n",
102
+ "# Define the batch size\n",
103
+ "batch_size = 8 #@param {type:\"number\"}"
104
+ ],
105
+ "metadata": {
106
+ "cellView": "form",
107
+ "id": "VlPgrJf4wpHo"
108
+ },
109
+ "execution_count": 4,
110
+ "outputs": []
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "source": [
115
+ "#@title Execute Batch Job\n",
116
+ "\n",
117
+ "# List all image files in the input folder\n",
118
+ "image_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]\n",
119
+ "\n",
120
+ "# Process images in batches\n",
121
+ "for i in range(0, len(image_files), batch_size):\n",
122
+ " batch = image_files[i:i + batch_size]\n",
123
+ "\n",
124
+ " # Perform classification for the batch\n",
125
+ " results = pipe(images=batch)\n",
126
+ "\n",
127
+ " for idx, result in enumerate(results):\n",
128
+ " # Extract the prediction scores and labels\n",
129
+ " predictions = result\n",
130
+ " hq_score = [p for p in predictions if p['label'] == 'hq'][0]['score']\n",
131
+ "\n",
132
+ " # Determine the destination folder based on the prediction and threshold\n",
133
+ " destination_folder = output_folder_hq if hq_score >= batch_hq_threshold else output_folder_lq\n",
134
+ "\n",
135
+ " # Copy the image to the appropriate folder\n",
136
+ " shutil.copy(batch[idx], os.path.join(destination_folder, os.path.basename(batch[idx])))\n",
137
+ "\n",
138
+ "print(\"Classification and sorting complete.\")"
139
+ ],
140
+ "metadata": {
141
+ "cellView": "form",
142
+ "id": "RG01mcYf4DvK"
143
+ },
144
+ "execution_count": null,
145
+ "outputs": []
146
+ }
147
+ ]
148
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTFeatureExtractor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 1024,
20
+ "width": 1024
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eac1cb6aa06d1a82fa162e124bfbcd6aaaa47dcfbcb8d1a628618e3c1d6f581
3
+ size 4365309073