Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -75,32 +75,21 @@ else:
|
|
75 |
df_speckle_lu = speckle_utils.get_dataframe(streamData, return_original_df=False)
|
76 |
df_lu = df_speckle_lu.copy()
|
77 |
df_lu = df_lu.astype(str)
|
78 |
-
df_lu = df_lu.set_index("uuid", drop=False)
|
79 |
-
|
80 |
|
81 |
df_dm = matrices[distanceMatrixActivityNodes]
|
82 |
|
83 |
-
|
84 |
-
#matrices_dict = matrices.to_dict('index')
|
85 |
-
|
86 |
-
|
87 |
-
df_dm = matrices[distanceMatrixActivityNodes]
|
88 |
-
|
89 |
-
|
90 |
df_dm_dict = df_dm.to_dict('index')
|
91 |
|
92 |
-
|
93 |
|
94 |
# Replace infinity with 10000 and NaN values with 0, then convert to integers
|
95 |
df_dm = df_dm.replace([np.inf, -np.inf], 10000).fillna(0)
|
96 |
df_dm = df_dm.apply(pd.to_numeric, errors='coerce')
|
97 |
df_dm = df_dm.round(0).astype(int)
|
98 |
-
|
99 |
-
#df_dm_transport = matrices[distanceMatrixTransportStops]
|
100 |
-
#df_dm_transport_dictionary = df_dm_transport.to_dict('index')
|
101 |
-
|
102 |
mask_connected = df_dm.index.tolist()
|
103 |
-
|
|
|
104 |
for name in df_lu.columns:
|
105 |
if name.startswith("lu+"):
|
106 |
lu_columns.append(name)
|
@@ -132,13 +121,13 @@ def test(input_json):
|
|
132 |
|
133 |
from config import useGrasshopperData
|
134 |
|
135 |
-
if useGrasshopperData == True:
|
136 |
matrix = inputs['input']["matrix"]
|
137 |
-
landuses = inputs['input']["landuse_areas"]
|
138 |
|
139 |
dfLanduses = pd.DataFrame(landuses).T
|
140 |
dfLanduses = dfLanduses.apply(pd.to_numeric, errors='coerce')
|
141 |
-
dfLanduses = dfLanduses.replace([np.inf, -np.inf], 0).fillna(0)
|
142 |
dfLanduses = dfLanduses.round(0).astype(int)
|
143 |
|
144 |
dfMatrix = pd.DataFrame(matrix).T
|
@@ -146,7 +135,7 @@ def test(input_json):
|
|
146 |
dfMatrix = dfMatrix.replace([np.inf, -np.inf], 10000).fillna(0)
|
147 |
dfMatrix = dfMatrix.round(0).astype(int)
|
148 |
else:
|
149 |
-
dfLanduses = df_lu_filtered.copy()
|
150 |
dfMatrix = df_dm.copy()
|
151 |
|
152 |
|
@@ -154,7 +143,7 @@ def test(input_json):
|
|
154 |
dm_dictionary = dfMatrix.to_dict('index')
|
155 |
|
156 |
attributeMapperDict_gh = inputs['input']["attributeMapperDict"]
|
157 |
-
landuseMapperDict_gh = inputs['input']["landuseMapperDict"]
|
158 |
|
159 |
|
160 |
from config import alpha as alphaDefault
|
@@ -174,15 +163,10 @@ def test(input_json):
|
|
174 |
|
175 |
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
from imports_utils import splitDictByStrFragmentInColumnName
|
181 |
-
|
182 |
-
|
183 |
|
184 |
# create a mask based on the matrix size and ids, crop activity nodes to the mask
|
185 |
-
mask_connected = dfMatrix.index.tolist()
|
186 |
|
187 |
valid_indexes = [idx for idx in mask_connected if idx in dfLanduses.index]
|
188 |
# Identify and report missing indexes
|
@@ -222,8 +206,8 @@ def test(input_json):
|
|
222 |
#AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
|
223 |
|
224 |
|
225 |
-
if 'jobs' not in subdomainsAccessibility.columns:
|
226 |
-
|
227 |
|
228 |
livability = accessibilityToLivability(dfMatrix,subdomainsAccessibility,livabilityMapperDict,domainsUnique)
|
229 |
|
|
|
75 |
df_speckle_lu = speckle_utils.get_dataframe(streamData, return_original_df=False)
|
76 |
df_lu = df_speckle_lu.copy()
|
77 |
df_lu = df_lu.astype(str)
|
78 |
+
df_lu = df_lu.set_index("uuid", drop=False) # variable, uuid as default
|
|
|
79 |
|
80 |
df_dm = matrices[distanceMatrixActivityNodes]
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
df_dm_dict = df_dm.to_dict('index')
|
83 |
|
|
|
84 |
|
85 |
# Replace infinity with 10000 and NaN values with 0, then convert to integers
|
86 |
df_dm = df_dm.replace([np.inf, -np.inf], 10000).fillna(0)
|
87 |
df_dm = df_dm.apply(pd.to_numeric, errors='coerce')
|
88 |
df_dm = df_dm.round(0).astype(int)
|
89 |
+
|
|
|
|
|
|
|
90 |
mask_connected = df_dm.index.tolist()
|
91 |
+
|
92 |
+
lu_columns = [] # provided by user? or prefix
|
93 |
for name in df_lu.columns:
|
94 |
if name.startswith("lu+"):
|
95 |
lu_columns.append(name)
|
|
|
121 |
|
122 |
from config import useGrasshopperData
|
123 |
|
124 |
+
if useGrasshopperData == True: # grasshopper input
|
125 |
matrix = inputs['input']["matrix"]
|
126 |
+
landuses = inputs['input']["landuse_areas"] # fetch grasshoper data or not
|
127 |
|
128 |
dfLanduses = pd.DataFrame(landuses).T
|
129 |
dfLanduses = dfLanduses.apply(pd.to_numeric, errors='coerce')
|
130 |
+
dfLanduses = dfLanduses.replace([np.inf, -np.inf], 0).fillna(0) # cleaning function?
|
131 |
dfLanduses = dfLanduses.round(0).astype(int)
|
132 |
|
133 |
dfMatrix = pd.DataFrame(matrix).T
|
|
|
135 |
dfMatrix = dfMatrix.replace([np.inf, -np.inf], 10000).fillna(0)
|
136 |
dfMatrix = dfMatrix.round(0).astype(int)
|
137 |
else:
|
138 |
+
dfLanduses = df_lu_filtered.copy() # fetch speckl data or not
|
139 |
dfMatrix = df_dm.copy()
|
140 |
|
141 |
|
|
|
143 |
dm_dictionary = dfMatrix.to_dict('index')
|
144 |
|
145 |
attributeMapperDict_gh = inputs['input']["attributeMapperDict"]
|
146 |
+
landuseMapperDict_gh = inputs['input']["landuseMapperDict"] # if fetch notion data or not, def
|
147 |
|
148 |
|
149 |
from config import alpha as alphaDefault
|
|
|
163 |
|
164 |
|
165 |
|
166 |
+
#from imports_utils import splitDictByStrFragmentInColumnName
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
# create a mask based on the matrix size and ids, crop activity nodes to the mask
|
169 |
+
#mask_connected = dfMatrix.index.tolist()
|
170 |
|
171 |
valid_indexes = [idx for idx in mask_connected if idx in dfLanduses.index]
|
172 |
# Identify and report missing indexes
|
|
|
206 |
#AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
|
207 |
|
208 |
|
209 |
+
#if 'jobs' not in subdomainsAccessibility.columns:
|
210 |
+
# print("Error: Column 'jobs' does not exist in the subdomainsAccessibility.")
|
211 |
|
212 |
livability = accessibilityToLivability(dfMatrix,subdomainsAccessibility,livabilityMapperDict,domainsUnique)
|
213 |
|