Ziyuan111 commited on
Commit
a2a3d92
1 Parent(s): e838a26

Upload durhamtrees.py

Browse files
Files changed (1) hide show
  1. durhamtrees.py +146 -0
durhamtrees.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """DurhamTrees
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1W5gDhKokcuqoA8AK4a6JR7PIeCUdmrTU
8
+ """
9
+
10
+ import datasets
11
+ import pandas as pd
12
+ import geopandas as gpd
13
+ from datasets import DatasetBuilder, DownloadManager, DatasetInfo, SplitGenerator, Split
14
+ from datasets.features import Features, Value, ClassLabel
15
+ import matplotlib.pyplot as plt
16
+ import csv
17
+ import json
18
+ import os
19
+ from typing import List
20
+ # URL definitions
21
+ _URLS = {
22
+ "first_domain1": {
23
+ "csv_file": "https://drive.google.com/uc?export=download&id=18HmgMbtbntWsvAySoZr4nV1KNu-i7GCy",
24
+ "geojson_file": "https://drive.google.com/uc?export=download&id=1cbn7EY7RofXN7c6Ph0eIGFIZowPZ5lKE",
25
+ },
26
+ "first_domain2": {
27
+ "csv_file2": "https://drive.google.com/uc?export=download&id=1RVdaI5dSTPStjhOHO40ypDv2cAQZpi_Y",
28
+ },
29
+ }
30
+
31
+ # Combined Dataset Class
32
+ class DurhamTrees(datasets.GeneratorBasedBuilder):
33
+ VERSION = datasets.Version("1.0.0")
34
+
35
+ # Combine BUILDER_CONFIGS from both classes, ensuring they have unique names
36
+ BUILDER_CONFIGS = [
37
+ # Configurations from DatasetClass1
38
+ datasets.BuilderConfig(name="class1_domain1", description="this is combined of csv and geojson"),
39
+ # Configurations from DatasetClass2
40
+ datasets.BuilderConfig(name="class2_domain1", description="this is csv file"),
41
+ # Add other configurations as necessary
42
+ ]
43
+
44
+ def _info(self):
45
+ # Combine the features from both classes into one Features dictionary
46
+ return datasets.DatasetInfo(
47
+ description="This dataset combines information from both classes.",
48
+ features=datasets.Features({
49
+ # Features from DatasetClass1
50
+ "feature1_from_class1": Value("string"),
51
+ "geometry": Value("string"),
52
+ "OBJECTID": Value("int64"),
53
+ "X": Value("float64"),
54
+ "Y": Value("float64"),
55
+
56
+ # Features from DatasetClass2
57
+ "feature1_from_class2": Value("string"),
58
+ "geometry": Value("string"),
59
+ "OBJECTID": Value("int64"),
60
+ "streetaddress": Value("string"),
61
+ "city": Value("string"),
62
+ "facilityid": Value("int64"),
63
+ "present": Value("string"),
64
+ "genus": Value("string"),
65
+ "species": Value("string"),
66
+ "commonname": Value("string"),
67
+ "diameterin": Value("float64"),
68
+ "condition": Value("string"),
69
+ "contractwork": Value("string"),
70
+ "neighborhood": Value("string"),
71
+ "program": Value("string"),
72
+ "plantingw": Value("string"),
73
+ "plantingcond": Value("string"),
74
+ "underpwerlins": Value("string"),
75
+ "GlobalID": Value("string"),
76
+ "created_user": Value("string"),
77
+ "last_edited_user": Value("string"),
78
+ "isoprene": Value("float64"),
79
+ "monoterpene": Value("float64"),
80
+ # ... add other features as needed
81
+ }),
82
+ supervised_keys=None,
83
+ homepage="https://github.com/AuraMa111?tab=repositories",
84
+ citation="Citation for the combined dataset",
85
+ )
86
+
87
+ def _split_generators(self, dl_manager):
88
+ downloaded_files = dl_manager.download_and_extract(_URLS)
89
+
90
+ # Combine the split generators from both classes
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ # Arguments for DatasetClass1
96
+ "class1_data_file": downloaded_files["first_domain1"]["csv_file"],
97
+ "class1_geojson_file": downloaded_files["first_domain1"]["geojson_file"],
98
+
99
+ # Arguments for DatasetClass2
100
+ "class2_data_file": downloaded_files["first_domain2"]["csv_file2"],
101
+
102
+ # Additional arguments if needed
103
+ "split": "train",
104
+ },
105
+ ),
106
+ # Add other splits as necessary, each with its own generator
107
+ ]
108
+
109
+ def _generate_examples(self, class1_data_file, class1_geojson_file, class2_data_file, split):
110
+ # Generate examples from the first dataset class
111
+ for example in self._generate_examples_from_class1(class1_data_file, class1_geojson_file):
112
+ yield example
113
+
114
+ # Generate examples from the second dataset class
115
+ for example in self._generate_examples_from_class2(class2_data_file):
116
+ yield example
117
+
118
+ def _generate_examples_from_class1(self, csv_filepath, geojson_filepath):
119
+ columns_to_extract = ["geometry", "OBJECTID", "X", "Y"]
120
+ csv_data = pd.read_csv(csv_filepath)
121
+
122
+ with open(geojson_filepath, 'r') as file:
123
+ geojson_dict = json.load(file)
124
+ gdf = gpd.GeoDataFrame.from_features(geojson_dict['features'])
125
+ merged_data = gdf.merge(csv_data, on='OBJECTID')
126
+ final_data = merged_data[columns_to_extract]
127
+ for id_, row in final_data.iterrows():
128
+ example = row.to_dict()
129
+ yield id_, example
130
+
131
+ def _generate_examples_from_class2(self, csv_filepath2):
132
+ columns_to_extract = ["geometry", "OBJECTID", "streetaddress", "city", "facilityid", "present", "genus", "species", "commonname", "diameterin", "condition", "contractwork", "neighborhood", "program", "plantingw", "plantingcond", "underpwerlins", "GlobalID", "created_user", "last_edited_user", "isoprene", "monoterpene", "coremoved_ozperyr", "coremoved_dolperyr", "o3removed_ozperyr", "o3removed_dolperyr", "no2removed_ozperyr", "no2removed_dolperyr", "so2removed_ozperyr", "so2removed_dolperyr", "pm10removed_ozperyr", "pm10removed_dolperyr", "pm25removed_ozperyr", "o2production_lbperyr", "replacevalue_dol", "carbonstorage_lb", "carbonstorage_dol", "grosscarseq_lbperyr", "X", "Y"]
133
+
134
+ # Load the CSV data into a pandas DataFrame
135
+ csv_data2 = pd.read_csv(csv_filepath2)
136
+
137
+ # Filter the DataFrame to only include the specified columns
138
+ final_data = csv_data2[columns_to_extract]
139
+
140
+ # Iterate over the rows of the final DataFrame
141
+ for id_, row in final_data.iterrows():
142
+ # Convert the row to a dictionary
143
+ example = row.to_dict()
144
+
145
+ # Yield the example with its index as the identifier
146
+ yield id_, example