Update artwork_for_sdxl.py
Browse files- artwork_for_sdxl.py +10 -10
artwork_for_sdxl.py
CHANGED
@@ -39,7 +39,7 @@ _DESCRIPTION = """\
|
|
39 |
Artwork Images, to predict the year of the artwork created.
|
40 |
"""
|
41 |
|
42 |
-
_URL = "https://huggingface.co/datasets/
|
43 |
|
44 |
class Artwork(datasets.GeneratorBasedBuilder):
|
45 |
"""Artwork Images - a dataset of centuries of Images classes"""
|
@@ -49,20 +49,20 @@ class Artwork(datasets.GeneratorBasedBuilder):
|
|
49 |
description=_DESCRIPTION,
|
50 |
features=datasets.Features(
|
51 |
{
|
52 |
-
"
|
53 |
-
"image_data":
|
54 |
}
|
55 |
),
|
56 |
supervised_keys=("label","image_data"),
|
57 |
homepage=_HOMEPAGE,
|
58 |
citation=_CITATION,
|
59 |
-
task_templates=[ImageClassification(image_column="image_data", label_column="
|
60 |
)
|
61 |
|
62 |
def _split_generators(self, dl_manager):
|
63 |
data_files = dl_manager.download_and_extract(_URL)
|
64 |
-
df = pd.
|
65 |
-
|
66 |
return [
|
67 |
datasets.SplitGenerator(
|
68 |
name=datasets.Split.TRAIN,
|
@@ -78,11 +78,11 @@ class Artwork(datasets.GeneratorBasedBuilder):
|
|
78 |
print(cnt)
|
79 |
cnt+=1
|
80 |
print(path)
|
81 |
-
print(path.
|
82 |
-
print(type(path.
|
83 |
print(path.image_data)
|
84 |
print(type(path.image_data))
|
85 |
yield {
|
86 |
-
"
|
87 |
-
"image_data": path.image_data,
|
88 |
}
|
|
|
39 |
Artwork Images, to predict the year of the artwork created.
|
40 |
"""
|
41 |
|
42 |
+
_URL = "https://huggingface.co/datasets/wintercoming6/artwork_for_sdxl/resolve/main/metadata.jsonl"
|
43 |
|
44 |
class Artwork(datasets.GeneratorBasedBuilder):
|
45 |
"""Artwork Images - a dataset of centuries of Images classes"""
|
|
|
49 |
description=_DESCRIPTION,
|
50 |
features=datasets.Features(
|
51 |
{
|
52 |
+
"prompt": str,
|
53 |
+
"image_data": Image,
|
54 |
}
|
55 |
),
|
56 |
supervised_keys=("label","image_data"),
|
57 |
homepage=_HOMEPAGE,
|
58 |
citation=_CITATION,
|
59 |
+
task_templates=[ImageClassification(image_column="image_data", label_column="prompt")],
|
60 |
)
|
61 |
|
62 |
def _split_generators(self, dl_manager):
|
63 |
data_files = dl_manager.download_and_extract(_URL)
|
64 |
+
df = pd.read_json(data_files, lines=True)
|
65 |
+
|
66 |
return [
|
67 |
datasets.SplitGenerator(
|
68 |
name=datasets.Split.TRAIN,
|
|
|
78 |
print(cnt)
|
79 |
cnt+=1
|
80 |
print(path)
|
81 |
+
print(path.prompt)
|
82 |
+
print(type(path.prompt))
|
83 |
print(path.image_data)
|
84 |
print(type(path.image_data))
|
85 |
yield {
|
86 |
+
"prompt": path.prompt,
|
87 |
+
"image_data": Image.open(path.image_data),
|
88 |
}
|