Ziyuan111 commited on
Commit
4e819e3
1 Parent(s): c9b831e

Upload durhamtrees.py

Browse files
Files changed (1) hide show
  1. durhamtrees.py +278 -0
durhamtrees.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """DurhamTrees
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1czig7JIbqTKp9wNUIRcdMEDF3pFgtxKv
8
+ """
9
+
10
+ # -*- coding: utf-8 -*-
11
+ """DurhamTrees
12
+ Automatically generated by Colaboratory.
13
+ Original file is located at
14
+ https://colab.research.google.com/drive/1czig7JIbqTKp9wNUIRcdMEDF3pFgtxKv
15
+ """
16
+ import pyarrow.parquet as pq
17
+ import pandas as pd
18
+ import geopandas as gpd
19
+ from datasets import (
20
+ GeneratorBasedBuilder, Version, DownloadManager, SplitGenerator, Split,
21
+ Features, Value, BuilderConfig, DatasetInfo
22
+ )
23
+ import matplotlib.pyplot as plt
24
+ import seaborn as sns
25
+ import csv
26
+ import json
27
+ from shapely.geometry import Point
28
+ import base64
29
+ import matplotlib.pyplot as plt
30
+ import matplotlib.image as mpimg
31
+ import io
32
+ # URL definitions
33
+ _URLS = {
34
+ "first_domain1": {
35
+ "csv_file": "https://drive.google.com/uc?export=download&id=18HmgMbtbntWsvAySoZr4nV1KNu-i7GCy",
36
+ "geojson_file": "https://drive.google.com/uc?export=download&id=1cbn7EY7RofXN7c6Ph0eIGFIZowPZ5lKE",
37
+ "parquet_file": "https://drive.google.com/uc?export=download&id=1RNDLJLoSSV9RJptVyfWFhPra0nh-i_CN",
38
+ },
39
+ "first_domain2": {
40
+ "csv_file2": "https://drive.google.com/uc?export=download&id=1RVdaI5dSTPStjhOHO40ypDv2cAQZpi_Y",
41
+ },
42
+ }
43
+
44
+ # Combined Dataset Class
45
+ class DurhamTrees(GeneratorBasedBuilder):
46
+ VERSION = Version("1.0.0")
47
+
48
+ class MyConfig(BuilderConfig):
49
+ def __init__(self, **kwargs):
50
+ super().__init__(**kwargs)
51
+
52
+ BUILDER_CONFIGS = [
53
+ MyConfig(name="class1_domain1", description="this is combined of csv and geojson"),
54
+ MyConfig(name="class2_domain1", description="this is csv file"),
55
+ ]
56
+
57
+ def _info(self):
58
+ return DatasetInfo(
59
+ description="This dataset combines information from both classes, with additional processing for csv_file2.",
60
+ features=Features({
61
+ "feature1_from_class1": Value("string"),
62
+ "geometry":Value("string"),
63
+ "OBJECTID": Value("int64"),
64
+ "X": Value("float64"),
65
+ "Y": Value("float64"),
66
+ "image": Value("binary"),
67
+ "label": Value("int64"),
68
+ "feature1_from_class2": Value("string"),
69
+ "streetaddress": Value("string"),
70
+ "city": Value("string"),
71
+ "facilityid": Value("int64"),
72
+ "present": Value("string"),
73
+ "genus": Value("string"),
74
+ "species": Value("string"),
75
+ "commonname": Value("string"),
76
+ "diameterin": Value("float64"),
77
+ "condition": Value("string"),
78
+ "neighborhood": Value("string"),
79
+ "program": Value("string"),
80
+ "plantingw": Value("string"),
81
+ "plantingcond": Value("string"),
82
+ "underpwerlins": Value("string"),
83
+ "GlobalID": Value("string"),
84
+ "created_user": Value("string"),
85
+ "last_edited_user": Value("string"),
86
+ "isoprene": Value("float64"),
87
+ "monoterpene": Value("float64"),
88
+ "monoterpene_class2": Value("float64"),
89
+ "vocs": Value("float64"),
90
+ "coremoved_ozperyr": Value("float64"),
91
+ "coremoved_dolperyr": Value("float64"),
92
+ "o3removed_ozperyr": Value("float64"),
93
+ "o3removed_dolperyr": Value("float64"),
94
+ "no2removed_ozperyr": Value("float64"),
95
+ "no2removed_dolperyr": Value("float64"),
96
+ "so2removed_ozperyr": Value("float64"),
97
+ "so2removed_dolperyr": Value("float64"),
98
+ "pm10removed_ozperyr": Value("float64"),
99
+ "pm10removed_dolperyr": Value("float64"),
100
+ "pm25removed_ozperyr": Value("float64"),
101
+ "o2production_lbperyr": Value("float64"),
102
+ "replacevalue_dol": Value("float64"),
103
+ "carbonstorage_lb": Value("float64"),
104
+ "carbonstorage_dol": Value("float64"),
105
+ "grosscarseq_lbperyr": Value("float64"),
106
+ "grosscarseq_dolperyr": Value("float64"),
107
+ "avoidrunoff_ft2peryr": Value("float64"),
108
+ "avoidrunoff_dol2peryr": Value("float64"),
109
+ "polremoved_ozperyr": Value("float64"),
110
+ "polremoved_dolperyr": Value("float64"),
111
+ "totannbenefits_dolperyr": Value("float64"),
112
+ "leafarea_sqft": Value("float64"),
113
+ "potevapotran_cuftperyr": Value("float64"),
114
+ "evaporation_cuftperyr": Value("float64"),
115
+ "transpiration_cuftperyr": Value("float64"),
116
+ "h2ointercept_cuftperyr": Value("float64"),
117
+ "carbonavoid_lbperyr": Value("float64"),
118
+ "carbonavoid_dolperyr": Value("float64"),
119
+ "heating_mbtuperyr": Value("float64"),
120
+ "heating_dolperyrmbtu": Value("float64"),
121
+ "heating_kwhperyr": Value("float64"),
122
+ "heating_dolperyrmwh": Value("float64"),
123
+ "cooling_kwhperyr": Value("float64"),
124
+ "cooling_dolperyr": Value("float64"),
125
+ "totalenerg_dolperyr": Value("float64"),
126
+ }),
127
+ supervised_keys=("image", "label"),
128
+ homepage="https://github.com/AuraMa111?tab=repositories",
129
+ citation="Citation for the combined dataset",
130
+ )
131
+
132
+ def _split_generators(self, dl_manager):
133
+ downloaded_files = dl_manager.download_and_extract(_URLS)
134
+
135
+ return [
136
+ SplitGenerator(
137
+ name=Split.TRAIN,
138
+ gen_kwargs={
139
+ "class1_data_file": downloaded_files["first_domain1"]["csv_file"],
140
+ "class1_geojson_file": downloaded_files["first_domain1"]["geojson_file"],
141
+ "class2_data_file": downloaded_files["first_domain2"]["csv_file2"],
142
+ "parquet_file": downloaded_files["first_domain1"]["parquet_file"],
143
+ "split": Split.TRAIN,
144
+ },
145
+ ),
146
+ ]
147
+
148
+
149
+
150
+
151
+ def _generate_examples(self, class1_data_file, class1_geojson_file, class2_data_file, parquet_file, split):
152
+ class1_examples = list(self._generate_examples_from_class1(class1_data_file, class1_geojson_file))
153
+ class2_examples = list(self._generate_examples_from_class2(class2_data_file))
154
+
155
+ # Load Parquet file
156
+ parquet_data = pq.read_table(parquet_file).to_pandas()
157
+ class1_examples += list(self._generate_examples_from_parquet(parquet_data))
158
+
159
+ examples = class1_examples + class2_examples
160
+ df = pd.DataFrame(examples)
161
+
162
+ for id_, example in enumerate(examples):
163
+ if not isinstance(example, dict):
164
+ # Convert the example to a dictionary if it's not
165
+ example = {"example": example}
166
+ yield id_, example
167
+
168
+ def _generate_examples_from_class1(self, csv_filepath, geojson_filepath):
169
+ columns_to_extract = ["OBJECTID", "X", "Y"] # Remove "geometry" from columns_to_extract
170
+ csv_data = pd.read_csv(csv_filepath)
171
+
172
+ with open(geojson_filepath, 'r') as file:
173
+ geojson_dict = json.load(file)
174
+ gdf = gpd.GeoDataFrame.from_features(geojson_dict['features'], crs="EPSG:4326") # Specify the CRS if known
175
+ merged_data = gdf.merge(csv_data, on='OBJECTID')
176
+ final_data = merged_data[columns_to_extract + ['geometry']] # Include 'geometry' in the final_data
177
+ for id_, row in final_data.iterrows():
178
+ example = row.to_dict()
179
+ yield id_, example
180
+
181
+
182
+
183
+
184
+
185
+ def _generate_examples_from_class2(self, csv_filepath2):
186
+ csv_data2 = pd.read_csv(csv_filepath2)
187
+
188
+
189
+ columns_to_extract = [
190
+ "streetaddress", "city", "facilityid", "present", "genus", "species",
191
+ "commonname", "diameterin", "condition", "neighborhood", "program", "plantingw",
192
+ "plantingcond", "underpwerlins", "GlobalID", "created_user", "last_edited_user", "isoprene", "monoterpene",
193
+ "monoterpene", "vocs", "coremoved_ozperyr", "coremoved_dolperyr",
194
+ "o3removed_ozperyr", "o3removed_dolperyr", "no2removed_ozperyr", "no2removed_dolperyr",
195
+ "so2removed_ozperyr", "so2removed_dolperyr", "pm10removed_ozperyr", "pm10removed_dolperyr",
196
+ "pm25removed_ozperyr", "o2production_lbperyr", "replacevalue_dol", "carbonstorage_lb",
197
+ "carbonstorage_dol", "grosscarseq_lbperyr", "grosscarseq_dolperyr", "polremoved_ozperyr", "polremoved_dolperyr",
198
+ "totannbenefits_dolperyr", "leafarea_sqft", "potevapotran_cuftperyr", "evaporation_cuftperyr",
199
+ "transpiration_cuftperyr", "h2ointercept_cuftperyr",
200
+ "carbonavoid_lbperyr", "carbonavoid_dolperyr", "heating_mbtuperyr",
201
+ "heating_dolperyrmbtu", "heating_kwhperyr", "heating_dolperyrmwh", "cooling_kwhperyr",
202
+ "cooling_dolperyr", "totalenerg_dolperyr",
203
+ ]
204
+
205
+ final_data = csv_data2[columns_to_extract]
206
+ for id_, row in final_data.iterrows():
207
+ example = row.to_dict()
208
+ non_empty_example = {key: value for key, value in example.items() if pd.notna(value)}
209
+ yield id_, example
210
+
211
+
212
+ def _generate_examples_from_parquet(self, parquet_data):
213
+ for id_, row in parquet_data.iterrows():
214
+ # Check if the "image" column is present and not empty
215
+ if "image" in row and "bytes" in row["image"]:
216
+ # Decode the base64-encoded image bytes
217
+ image_data = base64.b64decode(row["image"]["bytes"])
218
+ example = {"image": image_data, "label": row["label"]}
219
+
220
+ # Display the image
221
+ image_bytes = example.get('image', None)
222
+ if image_bytes:
223
+ img = mpimg.imread(io.BytesIO(image_bytes), format='PNG') # Use 'PNG' instead of 'JPG'
224
+ plt.imshow(img)
225
+ plt.show()
226
+
227
+ yield id_, example
228
+ else:
229
+ print(f"Skipping example {id_} as it has missing or invalid image data")
230
+
231
+
232
+
233
+ def _correlation_analysis(self, df):
234
+ correlation_matrix = df.corr()
235
+ sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', linewidths=.5)
236
+ plt.title("Correlation Analysis")
237
+ plt.show()
238
+
239
+
240
+
241
+
242
+
243
+
244
+ # Create an instance of the DurhamTrees class
245
+ durham_trees_dataset = DurhamTrees(name='class1_domain1')
246
+
247
+ # Build the dataset
248
+ durham_trees_dataset.download_and_prepare()
249
+
250
+ # Access the dataset
251
+ dataset = durham_trees_dataset.as_dataset()
252
+
253
+ # Iterate through the dataset and display images
254
+ for example in dataset['train']:
255
+ if "image" in example and example["image"] is not None and "bytes" in example["image"]:
256
+ # Display the image
257
+ image_data = base64.b64decode(example["image"]["bytes"])
258
+ img = mpimg.imread(io.BytesIO(image_data), format='PNG')
259
+ plt.imshow(img)
260
+ plt.show()
261
+
262
+ # Create an instance of the DurhamTrees class for another configuration
263
+ durham_trees_dataset_another = DurhamTrees(name='class2_domain1')
264
+
265
+ # Build the dataset for the new instance
266
+ durham_trees_dataset_another.download_and_prepare()
267
+
268
+ # Access the dataset for the new instance
269
+ dataset_another = durham_trees_dataset_another.as_dataset()
270
+
271
+ # Iterate through the dataset for the new instance and display images
272
+ for example in dataset_another['train']:
273
+ if "image" in example and example["image"] is not None and "bytes" in example["image"]:
274
+ # Display the image
275
+ image_data = base64.b64decode(example["image"]["bytes"])
276
+ img = mpimg.imread(io.BytesIO(image_data), format='PNG')
277
+ plt.imshow(img)
278
+ plt.show()