prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#GiG
import numpy as np
import pandas as pd
from pathlib import Path
from deep_blocker import DeepBlocker
from tuple_embedding_models import AutoEncoderTupleEmbedding, CTTTupleEmbedding, HybridTupleEmbedding, SIFEmbedding
from vector_pairing_models import ExactTopKVectorPairing
import blocking_utils
from configurations import *
import pickle
def ctt_train_score_with_pred(folder_root, golden_set,left_table_fname, right_table_fname, cols_to_block, tuple_embedding_model, vector_pairing_model):
"""
Inputs table names, model type, ground truth labels.
Outputs evaluation statistics with binary classifier, mainly for CTT
"""
folder_root = Path(folder_root)
left_table_name_csv = left_table_fname+'.csv'
right_table_name_csv = right_table_fname+'.csv'
left_df = pd.read_csv(folder_root / left_table_name_csv)
right_df = pd.read_csv(folder_root / right_table_name_csv)
db = DeepBlocker(tuple_embedding_model, vector_pairing_model)
candidate_set_df,predictions = db.driver(left_df, right_df, cols_to_block,True)
predictions = pd.DataFrame(predictions,columns=['ltable_id','rtable_id','value'])
# print(predictions)
# predictions = predictions[predictions.prediction > 0.1]
# golden_df = pd.read_csv(Path(folder_root) / "matches.csv")
golden_df = filter_golden_set(golden_set,left_table_fname, right_table_fname)
statistics_dict_binary = blocking_utils.compute_blocking_statistics((left_table_fname,right_table_fname), predictions, golden_df, left_df, right_df,CTT_BINARY_THRESHOLD)
statistics_dict = blocking_utils.compute_blocking_statistics((left_table_fname,right_table_fname), candidate_set_df, golden_df, left_df, right_df,CTT_EMBEDDING_THRESHOLD)
return statistics_dict,statistics_dict_binary
def train_score(folder_root, golden_set, left_table_fname, right_table_fname, cols_to_block, tuple_embedding_model, vector_pairing_model,threshold):
"""
Inputs table names, model type, ground truth labels.
Outputs evaluation statistics for any supplied model
"""
folder_root = Path(folder_root)
left_table_name_csv = left_table_fname+'.csv'
right_table_name_csv = right_table_fname+'.csv'
left_df = pd.read_csv(folder_root / left_table_name_csv)
right_df = pd.read_csv(folder_root / right_table_name_csv)
db = DeepBlocker(tuple_embedding_model, vector_pairing_model)
candidate_set_df = db.driver(left_df, right_df, cols_to_block,False)
# golden_df = pd.read_csv(Path(folder_root) / "matches.csv")
golden_df = filter_golden_set(golden_set,left_table_fname, right_table_fname)
statistics_dict = blocking_utils.compute_blocking_statistics((left_table_fname,right_table_fname), candidate_set_df, golden_df, left_df, right_df,threshold)
return statistics_dict
def get_golden_set(left_table_fname, right_table_fname):
"""
Queries golden set from one table (used when not looping/scoring)
"""
output_file = 'nyc_output/'+ left_table_fname + '-output.txt'
with open(output_file) as f:
lines = f.readlines()
line_df = | pd.DataFrame(lines,columns=['full']) | pandas.DataFrame |
import os
import pandas as pd
import copy
import jieba
import numpy as np
import time
def readExcel(url):
df=pd.read_excel(url,na_values='')
return df
def writeExcel(df,url=''):
write = pd.ExcelWriter(url)
df.to_excel(write, sheet_name='Sheet1')
write.save()
def genDF(df_Base,df_IPC):
columns=df_Base['ๅๅนถๅ'].unique()
index=df_IPC['NAME'].unique()
return pd.DataFrame(None,index=index,columns=columns)
pass
def check(df_Input,df_Base,df_result=[]):
''''''
if df_result==[]:
df_result= | pd.DataFrame(None,columns=['Company','Cat']) | pandas.DataFrame |
# Standard library imports
import pandas as pd
import numpy as np
from math import pi
# Local application/library specific imports
from .water_demand import (
set_cropland_share,
get_ky_list,
get_kc_list,
get_evap_i,
get_eto,
get_eff_rainfall_i,
get_effective_rainfall,
get_season_days,
get_calendar_days,
get_kc_values,
get_water_demand,
)
from .least_cost import (
get_wind_cf,
get_pv_cf,
get_installed_capacity,
get_max_capacity,
get_lcoe,
get_least_cost,
get_tech_generation,
get_pumping_cost,
get_unit_pumping_cost,
)
class Model():
# water properties:
eto = 'ETo_'
lat = 'lat'
elevation_diff = 'elevation'
wind = 'wind'
srad = 'srad'
tmin = 'tmin'
tmax = 'tmax'
tavg = 'tavg'
eff = 'eff_'
prec = 'prec'
kc = 'kc_'
crop_share = 'crop_share'
crop_area = 'crop_area'
seasons = ['init', 'dev', 'mid', 'late']
start = '_start'
end = '_end'
acwr = 'ACWR'
pcwr = 'PCWR'
pwd = '<PASSWORD>'
sswd = 'SSWD'
crop_calendar = None
crop_column = 'crop'
ky_dict = {}
kc_dict = {}
pipeline = 'pipeline'
well = 'point'
wwtp = 'point'
desal_plant = 'point'
pumping_hours_per_day = 'pumpng_hours_per_day'
deff = 1
aeff = 0.45
ky_values = {}
kc_values = {}
# energy properties:
gw_depth = 'gw_depth'
D = 'Pipe_diameter'
L = 'Pipeline_length'
A = 'Pipe_area'
mV = 'Flow_velocity' # monthly Velocity mV in (m/sec)
tdh = 'tdh'
des_int = 'Einten_KWh/m3'
des_ener = 'Edesal_GWh'
pp_e = 'PP_E' # Pumping PEAK Electric demand
pa_e = 'PA_E' # Pumping AVERAGE Electric demand
Re = 'Re'
f = 'f'
trans_eff = 0
pump_eff = 'pump_eff'
technologies = {}
discount_rate = 0
g = 9.81 # gravitational acceleration in(m/sec2)
Ken_visc = 1.004e-06
dens = 1000
k = 0.061e-3 # roughness for ductile iron pipe
ww_energy_int = 'ww_energy_int'
desal_energy_int = 'desal_energy_int'
start_year = 0
end_year = 30
def __init__(self, df):
self.df = df
def print_properties(self):
print('Properties names:')
for val, name in zip([self.eto, self.lat, self.elevation_diff, self.wind,
self.srad, self.tmin, self.tmax, self.tavg,
self.crop_share, self.crop_area, self.seasons,
self.start, self.end, self.crop_column,
self.gw_depth, self.tdh],
['Reference evapotranspiration (.eto)',
'Latitude (.lat)', 'Elevation difference (.elevation_diff)',
'Wind speed (.wind)', 'Solar radiation (.srad)',
'Min temperature (.tmin)', 'Max temperature (.tmax)',
'Avegarage temperature (.tavg)',
'Cropland share column (.crop_share)',
'Cropland area column (.crop_area)',
'Harvest seasons names (.seasons)',
'Seasson start suffix (.start)',
'Seasson end suffix (.end)',
'Cropland column (.crop_column)',
'Groundwater table depth (.gw_depth)',
'Total dynamic head ground water (.tdh_gw)',
'Total dynamic head surface water (.tdh_sw)',
'Pipe area (.A)',
'Flow velocity (.V)',
'Friction losses (.f)',
'Reynolds Number (.Re)', ]):
print(' - {}: {}'.format(name, val))
####### water related methods ###########
def set_cropland_share(self, crop_var, geo_boundary='global',
boundary_name=None, inplace=False):
if inplace:
set_cropland_share(self.df, crop_var, geo_boundary=geo_boundary,
boundary_name=boundary_name, crop_share=self.crop_share)
else:
return set_cropland_share(self.df.copy(), crop_var,
geo_boundary=geo_boundary,
boundary_name=boundary_name,
crop_share=self.crop_share)
def get_ky_list(self, inplace=False):
if inplace:
get_ky_list(self.df, crop_share=self.crop_share)
else:
return get_ky_list(self.df.copy(), crop_share=self.crop_share)
def get_kc_list(self, inplace=False):
if inplace:
get_kc_list(self.df, crop_share=self.crop_share)
else:
return get_kc_list(self.df.copy(), crop_share=self.crop_share)
def get_eto(self, inplace=False):
if inplace:
get_eto(self.df, eto=self.eto, lat=self.lat,
elevation=self.elevation_diff, wind=self.wind,
srad=self.srad, tmin=self.tmin,
tmax=self.tmax, tavg=self.tavg)
else:
return get_eto(self.df.copy(), eto=self.eto, lat=self.lat,
elevation=self.elevation_diff, wind=self.wind,
srad=self.srad, tmin=self.tmin,
tmax=self.tmax, tavg=self.tavg)
def get_effective_rainfall(self, inplace=False):
if inplace:
get_effective_rainfall(self.df, eff=self.eff, prec=self.prec,
eto=self.eto)
else:
return get_effective_rainfall(self.df.copy(), eff=self.eff,
prec=self.prec, eto=self.eto)
def get_calendar_days(self, inplace=False):
if inplace:
get_calendar_days(self.crop_calendar, seasons=self.seasons,
start=self.start, end=self.end)
else:
return get_calendar_days(self.crop_calendar.copy(), seasons=self.seasons,
start=self.start, end=self.end)
def get_kc_values(self, inplace=False):
if inplace:
get_kc_values(crop_calendar=self.crop_calendar,
seasons=self.seasons, kc_dict=self.kc_dict,
crop_column=self.crop_column, start=self.start,
end=self.end, kc=self.kc)
else:
return get_kc_values(crop_calendar=self.crop_calendar.copy(),
seasons=self.seasons, kc_dict=self.kc_dict,
crop_column=self.crop_column,
start=self.start, end=self.end,
kc=self.kc)
def get_water_demand(self, inplace=False):
if inplace:
get_water_demand(self.df, self.crop_calendar, self.ky_dict,
self.crop_column, self.aeff, self.deff,
self.seasons[0], self.seasons[3],
self.pumping_hours_per_day,
crop_area=self.crop_area, _eto=self.eto,
_kc=self.kc, _eff=self.eff, _acwr=self.acwr,
_pcwr=self.pcwr, _pwd=self.pwd, _sswd=self.sswd,
start=self.start, end=self.end,
crop_share=self.crop_share)
else:
return get_water_demand(self.df.copy(), self.crop_calendar,
self.ky_dict, self.crop_column, self.aeff,
self.deff, self.seasons[0], self.seasons[3],
self.pumping_hours_per_day,
crop_area=self.crop_area, _eto=self.eto,
_kc=self.kc, _eff=self.eff, _acwr=self.acwr,
_pcwr=self.pcwr, _pwd=self.pwd,
_sswd=self.sswd, start=self.start,
end=self.end, crop_share=self.crop_share)
####### energy related methods ###########
def create_efficiency_goal(self, dff, initial_eff, final_eff,
init_year, end_year):
if initial_eff > final_eff:
final_eff = initial_eff
years = np.arange(end_year - init_year + 1) + init_year
efficiencies = np.linspace(initial_eff, final_eff,
end_year - init_year + 1)
df_eff = pd.DataFrame({'Year': years, self.pump_eff: efficiencies})
dff = dff.copy().drop(columns=[self.pump_eff])
dff = dff.merge(df_eff, how="left")
return np.array(dff[self.pump_eff])
def set_efficiency_goal(self, category, efficiency_goal, names=None):
if category == 'pipeline':
feature = self.pipeline
elif category == 'well':
feature = self.well
if isinstance(names, type(None)):
for index, group in self.df.groupby(feature):
self.df.loc[self.df[feature]==index, self.pump_eff] = \
self.create_efficiency_goal(group,
group[self.pump_eff].min(),
efficiency_goal['efficiency'],
efficiency_goal['init_year'],
efficiency_goal['end_year'])
elif isinstance(names, list):
for name in names:
dff = self.df.loc[self.df[feature]==name]
self.df.loc[self.df[feature]==name, self.pump_eff] = \
self.create_efficiency_goal(dff,
dff[self.pump_eff].min(),
efficiency_goal['efficiency'],
efficiency_goal['init_year'],
efficiency_goal['end_year'])
elif isinstance(names, str):
dff = self.df.loc[self.df[feature]==names]
self.df.loc[self.df[feature]==names, self.pump_eff] = \
self.create_efficiency_goal(dff,
dff[self.pump_eff].min(),
efficiency_goal['efficiency'],
efficiency_goal['init_year'],
efficiency_goal['end_year'])
else:
raise TypeError(f'Invalid type {type(names)} for names. Names should be a string or a list of strings')
def set_specifications(self, category, diameter, pumping_hours_per_day=None,
pump_efficiency=1, efficiency_goal=None, amount=None,
distribute=None, names=None):
'''Sets the specification for pipelines or wells
Parameters
----------
category : ['pipeline', 'well']
The category to configure
diameter : float, int
The diameter of the pipeline or well
pumping_hours_per_day : float, int
The average amount of hours pumped per day
pump_efficiency : float
Pumping energy efficiency
amount : int
Amount of pipelines or wells
distribute : [None, 'proportionally']
If 'proportionally, distributes the amount of specified wells
proportionally based on the water extractions per well field
names: string, list
Contains the name or names of the pipelines/wells to configure
Raises
------
TypeError
Invalid type for names
'''
if category == 'pipeline':
feature = self.pipeline
elif category == 'well':
feature = self.well
if isinstance(names, type(None)):
self.df[self.D] = diameter #in m
self.df[self.pumping_hours_per_day] = pumping_hours_per_day
self.df[self.pump_eff] = pump_efficiency
if distribute=='proportionally':
dff = self.df.groupby(['Year', feature])[['sswd']].sum().reset_index()
df_amount = dff.loc[dff['Year']==2020].copy()
df_amount['pipe_amount'] = np.ceil(df_amount['sswd'] / df_amount['sswd'].sum() * amount)
self.df = self.df.merge(df_amount[[feature, 'pipe_amount']], on=feature)
else:
self.df['pipe_amount'] = amount
elif isinstance(names, list):
for name in names:
self.df.loc[self.df[feature]==name, self.D] = diameter #in m
self.df.loc[self.df[feature]==name, self.pumping_hours_per_day] = pumping_hours_per_day
self.df.loc[self.df[feature]==name, self.pump_eff] = pump_efficiency
if amount:
self.df.loc[self.df[feature]==name, 'pipe_amount'] = amount
elif isinstance(names, str):
self.df.loc[self.df[feature]==names, self.D] = diameter #in m
self.df.loc[self.df[feature]==names, self.pumping_hours_per_day] = pumping_hours_per_day
self.df.loc[self.df[feature]==names, self.pump_eff] = pump_efficiency
if amount:
self.df.loc[self.df[feature]==names, 'pipe_amount'] = amount
else:
raise TypeError(f'Invalid type {type(names)} for names. Names should be a string or a list of strings')
if efficiency_goal:
self.set_efficiency_goal(category, efficiency_goal, names)
def pipe_area(self):
self.df[self.A] = (pi*self.df[self.D]**2)/4
def flow_velocity(self, axis=0):
if axis:
for i in range(1, 13):
_avg_Q = '{}{}'.format(self.sswd, i)
_mV = '{}{}'.format(self.mV, i)
self.df[_mV] = self.df[_avg_Q] / (30 * self.df[self.pumping_hours_per_day] * 60 * 60) / self.df[self.A]
else:
self.df[self.mV] = self.df[self.sswd] / self.df['pipe_amount'] / (30 * self.df[self.pumping_hours_per_day] * 60 * 60) / self.df[self.A] # convert m3/month to m3/s (30day*pump_hours_day*60min/h*60s/min)
def reynolds(self, axis=0):
if axis:
for i in range(1, 13):
_mV = '{}{}'.format(self.mV, i)
_Re = '{}{}'.format(self.Re, i)
self.df[_Re] = (self.df[_mV] * self.df[self.D]) / self.Ken_visc
else:
self.df[self.Re] = self.df[self.mV] * self.df[self.D] / self.Ken_visc
def friction_factor(self, axis=0):
if axis:
for i in range(1, 13):
_Re = '{}{}'.format(self.Re, i)
_f = '{}{}'.format(self.f, i)
self.df[_f] = 0.25 / (np.log((self.k / (3.7 * self.df[self.D])) + (5.74 / (self.df[_Re] ** 0.9))) ** 2)
else:
self.df[self.f] = 0.25 / (np.log((self.k / (3.7 * self.df[self.D])) + (5.74 / (self.df[self.Re] ** 0.9))) ** 2)
def get_tdh(self, friction=True, axis=0):
if axis:
for i in range(1, 13):
_avg_Q = '{}{}'.format(self.sswd, i)
_f = '{}{}'.format(self.f, i)
_tdh_sw = '{}{}'.format(self.tdh, i)
if friction:
f_losses = (self.df[_f] * self.df[self.L] * 16 * ((self.df[_avg_Q] / (30 * self.df[self.pumping_hours_per_day] * 60 * 60)) ** 2)) / ((self.df[self.D] ** 5) * 2 * self.g * (pi ** 2))
else:
f_losses = 0
self.df[_tdh_sw] = self.df[self.elevation_diff] + f_losses
self.df[_tdh_sw].replace(0, np.nan, inplace=True)
else:
if friction:
f_losses = (self.df[self.f] * self.df[self.L] * (self.df[self.mV] ** 2)) / (self.df[self.D] * 2 * self.g)
else:
f_losses = 0
self.df[self.tdh] = self.df[self.elevation_diff] + f_losses
def get_pumping_energy(self, axis=0):
if axis:
for i in range(1, 13):
_swpp_e = '{}{}'.format(self.pp_e, i) # surface water pumping peak electric demand
_swpa_e = '{}{}'.format(self.pa_e, i) # surface water pumping average electric demand
_peak_Q = '{}{}'.format(self.pwd, i) # peak water flow in the pipeline. To be updated WEAP output.
_avg_Q = '{}{}'.format(self.sswd, i) # average water flow in the pipeline. To be updated with WEAP output
_tdh_sw = '{}{}'.format(self.tdh, i)
self.df[_swpp_e] = ((self.df[_peak_Q] * self.df[_tdh_sw] * self.g * self.dens) / (self.df[self.pump_eff] * 1000)) # to convert E from W to KW
self.df[_swpa_e] = ((self.df[_avg_Q] * self.df[_tdh_sw] * self.g * self.dens) / (
self.df[self.pump_eff] * 1000 * 3600)) # to convert E from J to KWh
else:
if self.pwd in self.df.columns:
self.df[self.pp_e] = ((self.df[self.pwd] * self.df[self.tdh] * self.g * self.dens) / (self.df[self.pump_eff] * 1000))
self.df[self.pa_e] = ((self.df[self.sswd] * self.df[self.tdh] * self.g * self.dens) / (self.df[self.pump_eff] * 1000 * 3600))
self.df.loc[self.df[self.pa_e]<0, self.pa_e] = 0
def set_treatment_energy(self, category, energy_int, names=None):
'''Sets the specification for pipelines or wells
Parameters
----------
category : ['wastewater', 'desalination']
The category to configure
energy_int : float
Energy intensity of treatment
names: string, list
Contains the name or names of the treatment plants to configure
Raises
------
TypeError
Invalid type for names
'''
if category == 'wastewater':
feature = self.wwtp
energy_feature = self.ww_energy_int
elif category == 'desalination':
feature = self.desal_plant
energy_feature = self.desal_energy_int
if isinstance(names, type(None)):
self.df[energy_feature] = energy_int
elif isinstance(names, list):
for name in names:
self.df.loc[self.df[feature]==name, energy_feature] = energy_int #in m
elif isinstance(names, str):
self.df.loc[self.df[feature]==names, energy_feature] = energy_int #in m
else:
raise TypeError(f'Invalid type {type(names)} for names. Names should be a string or a list of strings')
def get_treatment_energy(self, category):
if category == 'wastewater':
energy_feature = self.ww_energy_int
elif category == 'desalination':
energy_feature = self.desal_energy_int
self.df[self.pa_e] = self.df[self.sswd] * self.df[energy_feature]
####### technologies and LCOE related methods #########
def create_wind_turbine(self, wind_turbine, life, om_cost,
capital_cost, efficiency):
self.technologies[wind_turbine] = self.WindTurbine(life, om_cost,
capital_cost,
efficiency)
def create_pv_system(self, pv_system, life, om_cost,
capital_cost, efficiency):
self.technologies[pv_system] = self.PVSystem(life, om_cost,
capital_cost,
efficiency, None,
0, 1, 0, 0)
def create_standard_tech(self, tech_name, life, om_cost, capital_cost,
efficiency, cf, fuel_cost, fuel_req, emission_factor,
env_cost):
self.technologies[tech_name] = self.Technology(life, om_cost,
capital_cost, efficiency, cf,
fuel_cost, fuel_req,
emission_factor, env_cost)
def get_cf(self, technologies='all', axis=0):
technologies = self.__check_tech_input(technologies)
for technology in technologies:
if type(self.technologies[technology]) == self.WindTurbine:
self.get_wind_cf(technology, axis)
elif type(self.technologies[technology]) == self.PVSystem:
self.get_pv_cf(technology, axis)
def get_wind_cf(self, wind_turbine, axis=0):
tech = self.technologies[wind_turbine]
self.technologies[wind_turbine].cf = get_wind_cf(self.df, wind=self.wind,
mu=tech.mu, t=tech.t, p_rated=tech.p_rated,
z=tech.z, zr=tech.zr, es=tech.es, u_arr=tech.u_arr,
p_curve=tech.p_curve, axis=axis)
def get_pv_cf(self, pv_system, axis=0):
tech = self.technologies[pv_system]
self.technologies[pv_system].cf = get_pv_cf(self.df, self.srad, axis)
def get_installed_capacity(self, technologies='all', axis=0):
technologies = self.__check_tech_input(technologies)
for technology in technologies:
tech = self.technologies[technology]
self.technologies[technology].df = get_installed_capacity(self.df,
tech.cf,
self.pp_e,
axis)
def get_max_capacity(self, technologies='all', axis=0):
technologies = self.__check_tech_input(technologies)
for technology in technologies:
tech = self.technologies[technology]
if axis:
self.technologies[technology].df = tech.df.join(get_max_capacity(tech.df, axis))
else:
self.technologies[technology].max_cap = get_max_capacity(tech.df, axis)
def get_lcoe(self, technologies='all', years='all', axis=0):
technologies = self.__check_tech_input(technologies)
for technology in technologies:
tech = self.technologies[technology]
if axis:
self.technologies[technology].df['lcoe'] = get_lcoe(
max_capacity=tech.df['max_cap'],
total_demand=self.df['annual_el_demand'],
tech_life=tech.life, om_cost=tech.om_cost,
capital_cost=tech.capital_cost,
discount_rate=self.discount_rate,
project_life=self.end_year - self.start_year,
fuel_cost=tech.fuel_cost,
fuel_req=tech.fuel_req,
efficiency=tech.efficiency,
emission_factor=tech.emission_factor,
env_cost=tech.env_cost,
start_year=self.start_year,
end_year=self.end_year,
axis=axis)
else:
years = self.__get_years(years)
self.technologies[technology].lcoe = pd.DataFrame()
for year in years:
self.technologies[technology].lcoe = \
self.technologies[technology].lcoe.append(get_lcoe(
max_capacity=tech.max_cap.reset_index(),
total_demand=self.df,
tech_life=tech.life, om_cost=tech.om_cost,
capital_cost=tech.capital_cost,
discount_rate=self.discount_rate,
project_life=self.end_year - self.start_year,
fuel_cost=tech.fuel_cost,
fuel_req=tech.fuel_req,
efficiency=tech.efficiency,
emission_factor=tech.emission_factor,
env_cost=tech.env_cost,
start_year=year,
end_year=self.end_year,
axis=axis), ignore_index=True)
# self.technologies[technology].lcoe.reset_index(inplace=True)
def get_least_cost(self, technologies='all', years='all',
geo_boundary=None, axis=0):
if axis:
self.df['least_cost_tech'] = np.nan
self.df['lcoe'] = np.nan
if (geo_boundary != None) and (type(technologies) == dict):
for key, value in technologies.items():
_technologies = self.__check_tech_input(value)
lcoe_df = pd.DataFrame()
lcoe_df[geo_boundary] = self.df[geo_boundary]
for _technology in _technologies:
lcoe_df[_technology] = self.technologies[_technology].df['lcoe']
lcoe = get_least_cost(lcoe_df, geo_boundary, key)
self.df.loc[self.df[geo_boundary] == key, 'least_cost_tech'] = \
lcoe.loc[self.df[geo_boundary] == key, 'least_cost_technology']
self.df.loc[self.df[geo_boundary] == key, 'lcoe'] = \
lcoe.loc[self.df[geo_boundary] == key, 'lcoe']
else:
_technologies = self.__check_tech_input(technologies)
lcoe_df = pd.DataFrame()
for _technology in _technologies:
lcoe_df[_technology] = self.technologies[_technology].df['lcoe']
lcoe = get_least_cost(lcoe_df)
self.df['least_cost_tech'] = lcoe['least_cost_technology']
self.df['lcoe'] = lcoe['lcoe']
else:
_technologies = self.__check_tech_input(technologies)
lcoe_df = pd.DataFrame()
for _technology in _technologies:
dff = self.technologies[_technology].lcoe.set_index(['Demand point', 'year'])
lcoe_df[_technology] = dff['lcoe']
years = self.__get_years(years)
self.lcoe = self.df.loc[self.df.Year.isin(years)]
self.lcoe = self.lcoe.groupby(['Demand point', 'Year']).agg(
{
# 'Supply point': 'first',
# 'links': 'first',
'province': 'first',
'sswd': 'sum',
# 'type': 'first',
'swpp_e': 'max',
'swpa_e': 'sum'})
self.lcoe.rename(columns={'links': 'link', 'sswd': 'water demand',
'swpp_e': 'required capacity',
'swpa_e': 'energy demand'},
inplace=True)
lcoe = get_least_cost(lcoe_df)
self.lcoe['least_cost_technology'] = lcoe['least_cost_technology']
self.lcoe['lcoe'] = lcoe['lcoe']
def get_tech_generation(self):
get_tech_generation(self.df, self.technologies.keys())
def get_pumping_cost(self, inplace=False):
if inplace:
get_pumping_cost(self.df, 'annual_el_demand', 'lcoe')
else:
return get_pumping_cost(self.df.copy(), 'annual_el_demand', 'lcoe')
def get_unit_pumping_cost(self, inplace=False):
if inplace:
get_unit_pumping_cost(self.df, 'pumping_cost',
self.df.filter(like=self.sswd).sum(axis=0))
else:
return get_unit_pumping_cost(self.df.copy(), 'pumping_cost',
self.df.filter(like=self.sswd).sum(axis=0))
####### additional methods #############
def __check_tech_input(self, technologies):
if type(technologies) == str:
if technologies.lower() in ['all', 'a', 'everything']:
technologies = self.technologies.keys()
else:
technologies = [technologies]
return technologies
def __get_years(self, years):
if type(years) == str:
if years.lower() in ['all', 'a', 'everything']:
years = range(self.start_year, self.end_year + 1)
elif type(years) == int:
years = [years]
return years
class Technology():
df = | pd.DataFrame() | pandas.DataFrame |
import os
import json
import random
import numpy as np
import pandas as pd
from copy import deepcopy
from string import ascii_uppercase, digits
from shutil import copyfile, rmtree, copytree
from datetime import datetime
#from ai.bot import Agent
from ai.bot import Agent
from tasks.games.chess.chess import Chess
from skills.chess.game_plumbing import Plumbing
from tools.toolbox import ToolBox
class chess:
"""
Main interface for the AI to play chess
"""
def legal_moves(self, chess_game):
"""
Input: chess_game - object containing the current chess game
Description: returns all legal moves
Output: numpy array containing all legal moves
"""
legal = np.zeros((8, 8, 8, 8))
for cur, moves in chess_game.possible_board_moves(capture=True).items():
if len(moves) > 0 and ((cur[0].isupper() and chess_game.p_move == 1) or (cur[0].islower() and chess_game.p_move == -1)):
cur_pos = chess_game.board_2_array(cur)
for next in moves:
legal[cur_pos[1]][cur_pos[0]][next[1]][next[0]] = 1.
return legal.flatten()
def play_game(
self,
game_name,
epoch,
train = False,
EPD = None,
SILENT = True,
players = [
'skills/chess/data/active_param.json',
'human'
],
tie_min = 100,
game_max = float('inf')
):
"""
Input: game_name - string representing the name of the match
epoch - integer representing the current epoch
train - boolean used as training control (Default = False) [OPTIONAL]
EPD - string representing the EPD hash to load the board into (Default = None) [OPTIONAl]
SILENT - boolean used for control of displaying stats or not (Default = True) [OPTIONAL]
players - list containing the player paramater files (Default = ['skills/chess/data/active_param.json', 'human'] [OPTIONAL]
tie_min - integer representing the minimum amount of moves for an auto tie game to be possible (Default = 100) [OPTIONAL]
game_max - integer representing the maximum amount of moves playable before triggering an auto tie (Default = inf) [OPTIONAL]
Description: play a game of chess
Output: tuple containing the game outcome and a dataframe containing the game log
"""
log = []
human_code = [
'h',
'hum',
'human'
]
end = False
a_players = []
plumbing = Plumbing()
if EPD is None:
chess_game = deepcopy(Chess()) #'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq -'
else:
chess_game = deepcopy(Chess(EPD=EPD)) #'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq -'
while True:
for i, p in enumerate(players):
while True:
if str(p).lower() in human_code and len(log) < len(players):
a_players.append('human')
elif len(log) < len(players):
a_players.append(deepcopy(Agent(param_name = p, train = train)))
if 'human' in a_players:
if chess_game.p_move == 1:
print('\nWhites Turn [UPPER CASE]\n')
else:
print('\nBlacks Turn [LOWER CASE]\n')
chess_game.display()
enc_state = plumbing.encode_state(chess_game)
if a_players[i] == 'human':
cur = input('What piece do you want to move?\n')
next = input('Where do you want to move the piece to?\n')
a_map = np.zeros((8, 8, 8, 8))
a_map[chess_game.y.index(cur[1])][chess_game.x.index(cur[0])][chess_game.y.index(next[1])][chess_game.x.index(next[0])] = 1
a_map = a_map.flatten()
b_a = np.where(a_map == 1)[0][0]
else:
t1 = datetime.now()
legal = self.legal_moves(chess_game) #Filter legal moves for inital state
legal[legal == 0] = float('-inf')
probs, v, r = a_players[i].choose_action(enc_state, legal_moves = legal)
max_prob = max(probs)
if SILENT == False:
print('\n--------------- STATS ---------------')
print(f' Player | {p}')
print(f' Colour | {"white" if chess_game.p_move > 0 else "black"}')
print('- - - - - - - - - - - - - - - - - - -')
print(f' Value | {v}')
print(f' Reward | {r}')
print(f' Probability | {float(max_prob)*100}%')
print('-------------------------------------\n')
a_bank = [j for j, v in enumerate(probs) if v == max_prob]
b_a = random.choice(a_bank)
a_map = np.zeros(4096)
a_map[b_a] = 1
a_map = a_map.reshape((8, 8, 8, 8))
a_index = [(cy, cx, ny, nx) for cy, cx, ny, nx in zip(*np.where(a_map == 1))][0]
cur = f'{chess_game.x[a_index[1]]}{chess_game.y[a_index[0]]}'
next = f'{chess_game.x[a_index[3]]}{chess_game.y[a_index[2]]}'
valid = False
if chess_game.move(cur, next) == False:
if SILENT == False or str(p).lower() in human_code:
print('Invalid move')
else:
valid = True
cur_pos = chess_game.board_2_array(cur)
next_pos = chess_game.board_2_array(next)
if a_players[i] == 'human':
log.append({
**{f'state{i}':float(s) for i, s in enumerate(enc_state[0])},
**{f'prob{x}':1 if x == ((cur_pos[0]+(cur_pos[1]*8))*64)+(next_pos[0]+(next_pos[1]*8)) else 0 for x in range(4096)},
**{'action':b_a}
})
else:
log.append({
**{f'state{i}':float(s) for i, s in enumerate(enc_state[0])},
**{f'prob{x}':p for x, p in enumerate(probs)},
**{'action':b_a, 'time':(datetime.now() - t1).total_seconds()}
})
if SILENT == False or a_players[i] == 'human':
if chess_game.p_move > 0:
print(f'w {cur.lower()}-->{next.lower()} | GAME:{epoch} BOARD:{game_name} MOVE:{len(log)} HASH:{chess_game.EPD_hash()}\n')
else:
print(f'b {cur.lower()}-->{next.lower()} | GAME:{epoch} BOARD:{game_name} MOVE:{len(log)} HASH:{chess_game.EPD_hash()}\n')
if a_players[i] != 'human':
state = chess_game.check_state(chess_game.EPD_hash())
t_code = True if state != False else False
if ((state == '50M' or state == '3F') and len(log) > tie_min) or len(log) >= game_max:
state = [0, 1, 0] #Auto tie
elif state == 'PP':
chess_game.pawn_promotion(n_part='Q') #Auto queen
if state != [0, 1, 0]:
state = chess_game.is_end() if len(log) > tie_min else chess_game.is_end(choice=False)
else:
state = chess_game.is_end()
if state == [0, 0, 0]:
if chess_game.check_state(chess_game.EPD_hash()) == 'PP':
chess_game.pawn_promotion()
if sum(state) > 0:
if SILENT == False or a_players[i] == 'human':
print(f'FINISHED | GAME:{epoch} BOARD:{game_name} MOVE:{len(log)} STATE:{state}\n')
game_train_data = pd.DataFrame(log)
game_train_data = game_train_data.astype(float)
end = True
break
if end == True:
break
if valid == True:
chess_game.p_move = chess_game.p_move * (-1)
if end == True:
break
del a_players
if t_code == True: print(f'Game Code Found = {state}\n')
return state, game_train_data
def traing_session(
self,
loops = 15,
games = 10,
boards = 1,
best_of = 5,
EPD = None,
SILENT = True,
player = 'skills/chess/data/models/test',
tie_min = 100,
game_max = float('inf'),
full_model = False
):
"""
Input: games - integer representing the amount of games to train on (Default = 10) [OPTIONAL]
boards - integer representing the amount of boards to play at once (Default = 1) [OPTIONAL]
best_of = integer representing the amount of games to use in a round-robin (Default = 5) [OPTIONAL]
EPD - string representing the EPD hash to load the board into (Default = None) [OPTIONAl]
SILENT - boolean used for control of displaying stats or not (Default = True) [OPTIONAL]
players - list of player parameters (Default = [{'param':'skills/chess/data/new_param.json', 'train':True}] [OPTIONAL]
tie_min - integer representing the minimum amount of moves for an auto tie game to be possible (Default = 100) [OPTIONAL]
game_max - integer representing the maximum amount of moves playable before triggering an auto tie (Default = inf) [OPTIONAL]
full_model - boolean representing if the full model is being trained every exploration game or not (Default = False) [OPTIONAL]
Description: train ai by playing multiple games of chess
Output: None
"""
#Initalize variables
LOOPS = loops
T_GAMES = games #Games of self play
BOARDS = boards #Amount of boards to play on at a time
BEST_OF = best_of #Amount of games played when evaluating the models
if os.path.exists(f'{player}/logs/training_log.csv'):
t_log = pd.read_csv(f'{player}/logs/training_log.csv')
else:
t_log = pd.DataFrame()
n_player = player + '(temp)'
if os.path.exists(n_player) == False:
print('CREATE NEW')
os.makedirs(n_player) #Create folder
copyfile(f'{player}/parameters.json', f'{n_player}/parameters.json') #Overwrite active model with new model
if os.path.exists(f'{n_player}/parameters.json'):
if os.path.exists(f'{player}/weights') and len(os.listdir(f"{player}/weights")) > 0:
copytree(
f'{player}/weights',
f'{n_player}/weights'
)
else:
os.makedirs(f'{n_player}/weights')
train_data = pd.DataFrame()
#Begin training games
for _ in range(LOOPS):
for t, g_count in enumerate([T_GAMES, BEST_OF]):
if t == 0:
a_players = [f'{n_player}/parameters.json'] #Self-play
game_results = {'white':0, 'black':0, 'tie':0}
pass
else:
a_players = [f'{player}/parameters.json', f'{n_player}/parameters.json'] #Evaluate
game_results = {f'{player}/parameters.json':0, f'{n_player}/parameters.json':0, 'tie':0}
pass
for g in range(g_count):
#PLAY GAME
print(f'\nSTARTING GAMES\n')
state, train_data = self.play_game(
'TEST',
g,
train = True if t == 0 else False,
EPD = EPD,
SILENT = SILENT,
players = a_players,
tie_min = tie_min,
game_max = game_max
)
if t == 0:
if state == [1, 0, 0]:
print(f'WHITE WINS')
game_results['white'] += 1
train_data['value'] = np.where(train_data['state0'] == 0., 1., -1.)
elif state == [0, 0, 1]:
print(f'BLACK WINS')
game_results['black'] += 1
train_data['value'] = np.where(train_data['state0'] == 0., -1., 1.)
else:
print('TIE GAME')
game_results['tie'] += 1
train_data['value'] = [0.] * len(train_data)
b_elo = ''
else:
print(game_results)
if state == [1, 0, 0]:
print(f'{a_players[0]} WINS')
game_results[a_players[0]] += 1
train_data['value'] = np.where(train_data['state0'] == 0., 1., -1.)
b_elo = 0 if n_player == a_players[0] else 1
elif state == [0, 0, 1]:
print(f'{a_players[-1]} WINS')
game_results[a_players[-1]] += 1
train_data['value'] = np.where(train_data['state0'] == 0., -1., 1.)
b_elo = 0 if n_player == a_players[-1] else 1
else:
print('TIE GAME')
game_results['tie'] += 1
train_data['value'] = [0.] * len(train_data)
b_elo = 0
print(game_results)
print()
#UPDATE TRAINING STATE
if t == 0 and sum([v for v in game_results.values()]) >= g_count:
game_results = {f'{player}/parameters.json':0, f'{n_player}/parameters.json':0, 'tie':0}
elif t == 0:
pass
elif sum([v for v in game_results.values()]) >= g_count:
m_wins = max(game_results.values())
winners = [p for p in game_results if game_results[p] == m_wins]
print(winners)
if os.path.exists(f'{player}/weights') == False:
os.makedirs(f'{player}/weights') #Create folder
if len(os.listdir(f'{player}/weights')) == 0 or len(winners) == 1 and winners[0] == n_player:
print('OVERWRITE')
ToolBox.overwrite_model(
n_player,
player
)
game_results = {'white':0, 'black':0, 'tie':0}
else:
a_players.reverse()
#LOG TRAINING DATA
train_data['reward'] = [0.] * len(train_data)
if os.path.exists(f'{player}/logs/game_log.csv'):
g_log = pd.read_csv(f'{player}/logs/game_log.csv')
else:
g_log = pd.DataFrame()
if t == 0:
train_data['ELO'] = [''] * len(train_data)
else:
cur_ELO = g_log['ELO'].dropna().iloc[-1] if 'ELO' in g_log and len(g_log['ELO'].dropna()) > 0 else 0
ELO = ToolBox.update_ELO(
cur_ELO, #ELO_p1,
cur_ELO, #ELO_p2
tie = True if state == [0, 0, 0] else False
)
train_data['ELO'] = [ELO[b_elo]] * len(train_data)
#print(cur_ELO, ELO, b_elo)
train_data['Game-ID'] = ''.join(random.choices(ascii_uppercase + digits, k=random.randint(15, 15)))
train_data['Date'] = [datetime.now()] * len(train_data)
g_log = g_log.append(train_data, ignore_index=True)
if os.path.exists(f'{player}/logs') == False:
os.makedirs(f'{player}/logs') #Create folder
g_log.to_csv(f'{player}/logs/game_log.csv', index=False)
if t == 0:
if full_model == True:
s_headers = [h for h in g_log if 'state' in h]
#m_log = pd.DataFrame(Agent(param_name = f'{n_player}/parameters.json', train = False).train(g_log[g_log['value']!=0.0].drop_duplicates(subset=s_headers, keep='last'), folder=n_player))
m_log = pd.DataFrame(Agent(param_name = f'{n_player}/parameters.json', train = False).train(g_log.drop_duplicates(subset=s_headers, keep='last'), folder=n_player))
del s_headers
else:
if g == g_count - 1:
s_headers = [h for h in g_log if 'state' in h]
m_log = pd.DataFrame(Agent(param_name = f'{n_player}/parameters.json', train = False).train(g_log.drop_duplicates(subset=s_headers, keep='last'), folder=n_player))
del s_headers
else:
m_log = pd.DataFrame(Agent(param_name = f'{n_player}/parameters.json', train = False).train(train_data, folder=n_player, encoder=False))
if os.path.exists(f'{player}/logs/training_log.csv'):
t_log = pd.read_csv(f'{player}/logs/training_log.csv')
else:
t_log = pd.DataFrame()
m_log['model'] = player
t_log = t_log.append(m_log, ignore_index=True)
del m_log
t_log.to_csv(f'{player}/logs/training_log.csv', index=False)
del t_log
#GARBEGE CLEAN UP
del g_log
train_data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import DataFrame, DatetimeIndex, Series
import pandas._testing as tm
from pandas.core.window import Expanding
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor(frame_or_series):
# GH 12669
c = frame_or_series(range(5)).expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor_invalid(frame_or_series, w):
# not valid
c = frame_or_series(range(5)).expanding
msg = "min_periods must be an integer"
with pytest.raises(ValueError, match=msg):
c(min_periods=w)
msg = "center must be a boolean"
with pytest.raises(ValueError, match=msg):
c(min_periods=1, center=w)
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(method):
# see gh-12811
e = Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
"expander",
[
1,
pytest.param(
"ls",
marks=pytest.mark.xfail(
reason="GH#16425 expanding with offset not supported"
),
),
],
)
def test_empty_df_expanding(expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=DatetimeIndex([]))
result = DataFrame(index=DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_expanding_axis(axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame(
{i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)}
)
else:
# axis == 1
expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
def test_expanding_count_with_min_periods(frame_or_series):
# GH 26996
result = frame_or_series(range(5)).expanding(min_periods=3).count()
expected = frame_or_series([np.nan, np.nan, 3.0, 4.0, 5.0])
tm.assert_equal(result, expected)
def test_expanding_count_default_min_periods_with_null_values(frame_or_series):
# GH 26996
values = [1, 2, 3, np.nan, 4, 5, 6]
expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]
result = frame_or_series(values).expanding().count()
expected = frame_or_series(expected_counts)
tm.assert_equal(result, expected)
def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_series):
# GH 25857
result = frame_or_series(range(5)).expanding(min_periods=6).count()
expected = frame_or_series([np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"df,expected,min_periods",
[
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
1,
),
(DataFrame({"A": [1], "B": [4]}), [], 2),
(DataFrame(), [({}, [])], 1),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
1,
),
],
)
def test_iter_expanding_dataframe(df, expected, min_periods):
# GH 11704
expected = [DataFrame(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, df.expanding(min_periods)):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"ser,expected,min_periods",
[
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 3),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 2),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 1),
( | Series([1, 2]) | pandas.Series |
import io
import json
import os
import re
import pickle
import subprocess
import pandas as pd
import numpy as np
from textblob import TextBlob, Blobber
from textblob_de import TextBlobDE as TextBlobDE
from textblob_fr import PatternTagger as PatternTaggerFR, PatternAnalyzer as PatternAnalyzerFR
import nltk
nltk.download('punkt')
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
supported_languages = ['EN','DE','FR']
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
if port == outports[1]['name']:
api.queue.append(msg)
def set_config(config):
api.config = config
class config:
## Meta data
config_params = dict()
tags = {'sdi_utils': '','textblob':''}
version = "0.1.0"
operator_name = "text_sentiment"
operator_description = "Text Sentiment Analysis"
operator_description_long = "Text Sentiment Analysis using Textblob. "
add_readme = dict()
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
last_msg = None
id_set = set()
operator_name = 'sentiment analysis'
logger, log_stream = slog.set_logging(operator_name, loglevel=api.config.debug_mode)
logger.info("Process started")
time_monitor = tp.progress()
def get_sentiment(text, language):
if isinstance(text,str) :
if language == 'DE':
blob = TextBlobDE(text)
return [blob.sentiment.polarity, blob.sentiment.subjectivity]
elif language == 'FR' :
tb = Blobber(pos_tagger=PatternTaggerFR(), analyzer=PatternAnalyzerFR())
blob = tb(text)
return blob.sentiment
else:
blob = TextBlob(text)
return [blob.sentiment.polarity, blob.sentiment.subjectivity]
def process(msg):
global setup_data
global last_msg
global hash_text_list
df = msg.body
att_dict = msg.attributes
df['polarity'] = np.nan
df['subjectivity'] = np.nan
df[['polarity','subjectivity']] = df.apply(lambda row: pd.Series(get_sentiment(row['text'], row['language']),dtype='object'),axis=1)
df['polarity'] = df['polarity'].round(2)
df['subjectivity'] = df['subjectivity'].round(2)
logger.info('Text processed: {}'.format(df.shape[0]))
logger.debug('Process ended{}'.format(time_monitor.elapsed_time()))
api.send(outports[0]['name'], log_stream.getvalue())
api.send(outports[1]['name'], api.Message(attributes=att_dict,body=df[['text_id','polarity','subjectivity']]))
inports = [{'name': 'sentiment_list', 'type': 'message.DataFrame', "description": "Sentiment list"},
{'name': 'texts', 'type': 'message.DataFrame', "description": "DataFrame with texts"}]
outports = [{'name': 'log', 'type': 'string', "description": "Logging data"},
{'name': 'data', 'type': 'message.DataFrame', "description": "data of sentiments"}]
# api.set_port_callback(inports[0]['name'], setup_sentiment_list)
# api.set_port_callback(inports[1]['name'], process)
def test_operator():
config = api.config
config.debug_mode = True
config.language_filter = 'DE'
config.use_sentiment_word_list = True
api.set_config(config)
doc_file = '/Users/Shared/data/onlinemedia/data/doc_data_cleansed.csv'
df = pd.read_csv(doc_file, sep=',', nrows=1000000000)
msg = api.Message(attributes={'file': {'path': doc_file}, 'format': 'pandas'}, body=df)
process(msg)
out_file = '/Users/Shared/data/onlinemedia/data/text_sentiment.csv'
df_list = [d.body for d in api.queue]
| pd.concat(df_list) | pandas.concat |
"""
Transfer applications.
|pic1|
.. |pic1| image:: ../images_source/transfer_tools/transfer.png
:width: 30%
"""
import os
import sys
from subprocess import Popen, PIPE
from pathlib import Path
import pandas as pd
import pexpect
import requests
import zipfile
from selenium.webdriver.chrome import webdriver
import urllib3
import pdfplumber
import io
class Access:
"""
Functions for accessing file systems and protocols.
"""
@classmethod
def proxies(cls, domain):
"""
A function to create an http/https proxy address.
:param domain: domain address.
:return: Http/https proxy address.
"""
res = {
'http': 'http://' + \
os.environ['usr'] + \
':' + os.environ['pwd'] + \
f'@proxyfarm.{domain}.com:8080'
,
'https': 'https://' + \
os.environ['usr'] + \
':' + os.environ['pwd'] + \
f'@proxyfarm.{domain}.com:8080'
}
return res
class Local:
"""
Functions for accessing local files.
"""
@classmethod
def zip_dir(cls, directory_list, zipname):
"""
Compress a directory into a single ZIP file.
:param directory_list: List of files to compress into zip file.
:param zipname: Name of zip file to compress files into.
:return: Zip file containing files.
"""
outZipFile = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
for idx, dir in enumerate(directory_list):
# if idx == 0: break
if not os.path.exists(dir):
print(f"Error, directory {dir} does not exist")
continue
# The root directory within the ZIP file.
rootdir = os.path.basename(dir)
try:
os.listdir(dir)
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
# Write the file named filename to the archive,
# giving it the archive name 'arcname'.
filepath = os.path.join(dirpath, filename)
parentpath = os.path.relpath(filepath, dir)
arcname = os.path.join(rootdir, parentpath)
outZipFile.write(filepath, arcname)
except:
# exception means there are no files inside the directory
# so we write the normal file
outZipFile.write(dir, dir.split("/")[-1])
outZipFile.close()
@classmethod
def clear_delete_directory(cls, directory, method="delete"):
"""
Clears and/or deletes a directory and its contents.
:param directory: Filepath of directory.
:param method: Optional delete for the directory folder.
:return: Nothing.
"""
directory = Path(directory)
for item in directory.iterdir():
if item.is_dir():
Local.clear_delete_directory(item)
else:
item.unlink()
if method == "delete":
directory.rmdir()
@classmethod
def fix_user_path(cls, dir):
"""
Fixes a local filepath.
:param dir: Directory to patch.
:return: Patched directory.
"""
dir_components = dir.split("/")
search = "Users"
for idx, elem in enumerate(dir_components):
if elem == search:
break
dir_components[idx + 1] = os.environ['os_name']
r = "/".join(dir_components)
return r
@classmethod
def get_all_filetimes(cls, dir, exclude=False):
"""
Creates a Pandas DataFrame of filenames and file times in a given directory.
:param dir: Directory of files.
:param exclude: A string to search for files to exclude.
:return: Pandas DataFrame of filenames and file times to a directory.
"""
files = os.listdir(dir)
if exclude:
files = [f for f in files if exclude not in f]
times = [os.path.getmtime(dir + f) for f in files]
file_times = pd.DataFrame({"files": files, "times": times})
return file_times
@classmethod
def get_latest_file(cls, name, dir, exclude=False):
"""
Get the latest file in a directory.
:param name: String match name of file(s).
:param dir: Directory for the file search.
:param exclude: A string to search for files to exclude.
:return: Name of most recent file.
"""
# file name str to lowercase
name = name.lower()
# get list of files
files = os.listdir(dir)
if exclude:
files = [f for f in files if exclude not in f]
times = [os.path.getmtime(dir + f) for f in files]
file_times = pd.DataFrame({"files": files, "times": times})
file_times['files_lower'] = file_times['files'].str.lower()
file_times = file_times[file_times['files_lower'].str.contains(name)]
read_file = file_times[file_times['times'] == max(file_times['times'])]['files']
read_file = read_file.values[0]
return read_file
@classmethod
def read_files_like(cls, name, dir, ext_typ, sheet_name=False):
"""
Reads and concatenates files in a directory that match a string. Returns a Pandas DataFrame.
:param name: A string search to match.
:param dir: Directory to search for files in.
:param ext_typ: Extension type to search for (.XLSX OR .CSV)
:param sheet_name: If extension type is not ".CSV", specifies the sheet number or sheet name to read.
:return: Concatenated Pandas DataFrames that match a string.
"""
files = os.listdir(dir)
files = pd.DataFrame({"files": files})
files['files'] = files['files'].str.lower()
files = files[(
files['files'].str.contains(name)
&
files['files'].str.contains(ext_typ)
)]
files.reset_index(inplace=True)
for idx, f in files.iterrows():
if ext_typ == "csv":
dat = pd.read_csv(dir + f['files'])
else:
dat = pd.read_excel(dir + f['files'], sheet_name=sheet_name)
if idx == 0:
dat_all = dat
else:
dat_all = | pd.concat([dat_all, dat]) | pandas.concat |
import sys
import pandas as pd
import numpy as np
from scipy import stats
from itertools import compress
import statsmodels.stats.multitest as smt
import scikits.bootstrap as bootstrap
from sklearn.decomposition import PCA
from .scaler import scaler
from .imputeData import imputeData
class statistics:
usage = """Generate a table of parametric or non-parametric statistics and merges them with the Peak Table (node table).
Initial_Parameters
----------
peaktable : Pandas dataframe containing peak data. Must contain 'Name' and 'Label'.
datatable : Pandas dataframe matrix containing values for statistical analysis
Methods
-------
set_params : Set parameters -
parametric: Perform parametric statistical analysis, assuming the data is normally distributed (default: True)
log_data: Perform a log ('natural', base 2 or base 10) on all data prior to statistical analysis (default: (False, 2))
scale_data: Scale the data ('standard' (centers to the mean and scales to unit variance), 'minmax' (scales between 0 and 1), 'maxabs' (scales to the absolute maximum value), 'robust' (centers to the median and scales to between 25th and 75th quantile range) (default: (True, 'standard'))
impute_data: Impute any missing values using KNN impute with a set number of nearest neighbours (default: (False, 3))
group_column_name: The group column name used in the datatable (default: None)
control_group_name: The control group name in the datatable, if available (default: None)
group_alpha_CI: The alpha value for group confidence intervals (default: 0.05)
fold_change_alpha_CI: The alpha value for mean/median fold change confidence intervals (default: 0.05)
pca_alpha_CI: The alpha value for the PCA confidence intervals (default: 0.05)
total_missing: Calculate the total missing values per feature (Default: False)
group_missing: Calculate the missing values per feature per group (if group_column_name not None) (Default: False)
pca_loadings: Calculate PC1 and PC2 loadings for each feature (Default: True)
normality_test: Determine normal distribution across whole dataset using Shapiro-Wilk test (pvalues < 0.05 ~ non-normal distribution) (default: True)
group_normality_test: Determine normal distribution across each group (if group_column_name not None) using Shapiro-Wilk test (pvalues < 0.05 ~ non-normal distribution) (default: True)
group_mean_CI: Determine the mean with bootstrapped CI across each group (if parametric = True and group_column_name not None) (default: True)
group_median_CI: Determine the median with bootstrapped CI across each group (if parametric = False and group_column_name not None) (default: True)
mean_fold_change: Calculate the mean fold change with bootstrapped confidence intervals (if parametric = True, group_column_name not None and control_group_name not None) (default: False)
median_fold_change: Calculate the median fold change with bootstrapped confidence intervals (if parametric = False, group_column_name not None and control_group_name not None) (default: False)
levene_twoGroup: Test null hypothesis that control group and each of the other groups come from populations with equal variances (if group_column_name not None and control_group_name not None) (default: False)
levene_allGroup: Test null hypothesis that all groups come from populations with equal variances (if group_column_name not None) (default: False)
oneway_Anova_test: Test null hypothesis that all groups have the same population mean, with included Benjamini-Hochberg FDR (if parametric = True and group_column_name not None) (default: False)
kruskal_wallis_test: Test null hypothesis that population median of all groups are equal, with included Benjamini-Hochberg FDR (if parametric = False and group_column_name not None) (default: False)
ttest_oneGroup: Calculate the T-test for the mean across all the data (one group), with included Benjamini-Hochberg FDR (if parametric = True, group_column_name is None or there is only 1 group in the data) (default: False)
ttest_twoGroup: Calculate the T-test for the mean of two groups, with one group being the control group, with included Benjamini-Hochberg FDR (if parametric = True, group_column_name not None and control_group_name not None) (default: False)
mann_whitney_u_test: Compute the Mann-Whitney U test to determine differences in distribution between two groups, with one being the control group, with included Benjamini-Hochberg FDR (if parametric = False, group_column_name not None and control_group_name not None) (default: False)
help : Print this help text
calculate : Performs the statistical calculations and outputs the Peak Table (node table) with the results appended.
"""
def __init__(self, peaktable, datatable):
peaktable = self.__checkPeakTable(self.__checkData(peaktable))
datatable = self.__checkData(datatable)
#Slice the meta-data, and select only peaks from the peaktable for processing, and add the meta-data back
meta = datatable.T[~datatable.T.index.isin(peaktable['Name'])].T.reset_index(drop=True)
dat = datatable[peaktable['Name']].reset_index()
datatable = pd.concat([meta, dat], axis=1).set_index(['index'])
datatable.index.name = None
self.__peaktable = peaktable
self.__datatable = datatable
self.set_params()
def help(self):
print(statistics.usage)
def set_params(self, parametric=True, log_data=(False,2), scale_data=(False, 'standard'), impute_data=(False, 3), group_column_name=None, control_group_name=None, group_alpha_CI=0.05, fold_change_alpha_CI=0.05, pca_alpha_CI=0.05, total_missing=False, group_missing=False, pca_loadings=True, normality_test=True, group_normality_test=True, group_mean_CI=True, group_median_CI=True, mean_fold_change=False, median_fold_change=False, kruskal_wallis_test=False, levene_twoGroup=False, levene_allGroup=False, oneway_Anova_test=False, ttest_oneGroup=False, ttest_twoGroup=False, mann_whitney_u_test=False):
parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test = self.__paramCheck(parametric, log_data, scale_data, impute_data, group_column_name, control_group_name, group_alpha_CI, fold_change_alpha_CI, pca_alpha_CI, total_missing, group_missing, pca_loadings, normality_test, group_normality_test, group_mean_CI, group_median_CI, mean_fold_change, median_fold_change, oneway_Anova_test, kruskal_wallis_test, levene_twoGroup, levene_allGroup, ttest_oneGroup, ttest_twoGroup, mann_whitney_u_test)
self.__parametric = parametric;
self.__log_data = log_data;
self.__scale_data = scale_data;
self.__impute_data = impute_data;
self.__group_column_name = group_column_name;
self.__control_group_name = control_group_name;
self.__group_alpha_CI = group_alpha_CI;
self.__fold_change_alpha_CI = fold_change_alpha_CI;
self.__pca_alpha_CI = pca_alpha_CI;
self.__total_missing = total_missing;
self.__group_missing = group_missing;
self.__pca_loadings = pca_loadings;
self.__normality_test = normality_test;
self.__group_normality_test = group_normality_test;
self.__group_mean_CI = group_mean_CI;
self.__group_median_CI = group_median_CI;
self.__mean_fold_change = mean_fold_change;
self.__median_fold_change = median_fold_change;
self.__oneway_Anova_test = oneway_Anova_test;
self.__kruskal_wallis_test = kruskal_wallis_test;
self.__levene_twoGroup = levene_twoGroup;
self.__levene_allGroup = levene_allGroup;
self.__ttest_oneGroup = ttest_oneGroup;
self.__ttest_twoGroup = ttest_twoGroup;
self.__mann_whitney_u_test = mann_whitney_u_test;
def calculate(self):
peaktable = self.__peaktable
datatable = self.__datatable
parametric = self.__parametric
log_data = self.__log_data
scale_data = self.__scale_data
impute_data = self.__impute_data
group_column_name = self.__group_column_name
control_group_name = self.__control_group_name
group_alpha_CI = self.__group_alpha_CI
fold_change_alpha_CI = self.__fold_change_alpha_CI
pca_alpha_CI = self.__pca_alpha_CI
total_missing = self.__total_missing
group_missing = self.__group_missing
pca_loadings = self.__pca_loadings
normality_test = self.__normality_test
group_normality_test = self.__group_normality_test
group_mean_CI = self.__group_mean_CI
group_median_CI = self.__group_median_CI
mean_fold_change = self.__mean_fold_change
median_fold_change = self.__median_fold_change
kruskal_wallis_test = self.__kruskal_wallis_test
levene_twoGroup = self.__levene_twoGroup
levene_allGroup = self.__levene_allGroup
oneway_Anova_test = self.__oneway_Anova_test
ttest_oneGroup = self.__ttest_oneGroup
ttest_twoGroup = self.__ttest_twoGroup
mann_whitney_u_test = self.__mann_whitney_u_test
peakNames = list(peaktable['Name'].values)
meta = datatable.T[~datatable.T.index.isin(peakNames)].T.reset_index(drop=True)
peakData = datatable[peakNames].reset_index(drop=True)
(log_bool, log_base) = log_data;
if log_bool:
if isinstance(log_base, str) and log_base.lower() == 'natural':
peakData = peakData.applymap(np.log)
elif log_base == 2:
peakData = peakData.applymap(np.log2)
elif log_base == 10:
peakData = peakData.applymap(np.log10)
else:
print("Error: The chosen log type is invalid.")
sys.exit()
(scale_bool, scale_type) = scale_data
if scale_bool:
if isinstance(scale_type, str) and scale_type.lower() == 'standard':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'minmax':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'maxabs':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
elif isinstance(scale_type, str) and scale_type.lower() == 'robust':
peakData = scaler(peakData, type=scale_type.lower()).reset_index(drop=True)
else:
print("Error: The chosen scale type is invalid.")
sys.exit()
(impute_bool, k) = impute_data;
if impute_bool:
peakData = imputeData(peakData, k=k).reset_index(drop=True)
if not isinstance(peakData, pd.DataFrame):
peakData = pd.DataFrame(peakData, columns=list(peakNames)).reset_index(drop=True)
#Add the meta data back in with the logged, scaled, or imputed data
datatable = pd.concat([meta, peakData], axis=1).reset_index(drop=True)
statsData = pd.DataFrame()
if group_column_name is not None:
groups = np.unique(datatable[group_column_name].values)
groupData = []
# Append each group to a list
for group in groups:
groupData.append(datatable.loc[datatable[group_column_name] == group])
#Iterate over each peak/feature and calculate statistics
for peakName in peakNames:
statsDataDict = {}
groupDict = {}
df_totalGrpMissing = pd.DataFrame()
totalGrpMissingTitles = []
df_meanFold = pd.DataFrame()
df_medianFold = pd.DataFrame()
df_mannWhitney = pd.DataFrame()
df_ttest = pd.DataFrame()
df_levene_twoGroup = pd.DataFrame()
df_groupNormality = pd.DataFrame()
df_grpMeanCI = pd.DataFrame()
df_grpMedianCI = pd.DataFrame()
mannWhitneyTitles = []
ttestTitles = []
leveneTwoGroupTitles = []
mannwhitney_pvalue_name = ''
mannwhitney_statistic_name = ''
# for each group populate a group dictionary
if group_column_name is not None:
for grpIdx, group in enumerate(groupData):
# Calculate values missing within each group
if group_missing:
df_totalGrpMissing = self.__GroupMissing_Calc(group, groups, grpIdx, peakName, totalGrpMissingTitles, df_totalGrpMissing)
statsDataDict['GroupMissingValues'] = df_totalGrpMissing
x = group[[peakName]].values
groupDict[groups[grpIdx]] = x[~np.isnan(x)]
if control_group_name is not None:
controlGroup = groupDict[control_group_name];
if group_column_name is not None:
for key, group in groupDict.items():
if group_normality_test:
df_groupNormality = self.__GroupNormality(key, group, df_groupNormality)
statsDataDict['GroupNormality'] = df_groupNormality
if parametric:
if group_mean_CI:
df_grpMeanCI = self.__GroupMeanCI(key, group, df_grpMeanCI, group_alpha_CI)
statsDataDict['GroupMeanCI'] = df_grpMeanCI
else:
if group_median_CI:
df_grpMedianCI = self.__GroupMedianCI(key, group, df_grpMedianCI, group_alpha_CI)
statsDataDict['GroupMedianCI'] = df_grpMedianCI
if key != control_group_name and control_group_name is not None:
# Merge group and control, accounting for different array lengths by replacing with nan (indices need to be the same length for bootstrapping)
groupPairDict = dict(controlGroup=controlGroup, caseGroup=group)
groupPair = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in groupPairDict.items() ]))
controlList = np.array(groupPair['controlGroup'].values)
caseList = np.array(groupPair['caseGroup'].values)
groupList = list(zip(controlList, caseList))
if parametric:
if ttest_twoGroup:
# T-test statistic calculation for two samples (one always being the control)
TTEST_twoGroup_statistic, TTEST_twoGroup_pvalue = self.__TTEST_twoGroup(groupList)
if mean_fold_change:
meanFoldChange = self.__mean_fold(groupList)
# Boostrap for confidence intervals for the mean fold change
if ((len(group) > 2) and (len(controlGroup) > 2)):
meanFold = lambda x: self.__mean_fold(x)
CIs = bootstrap.ci(data=groupList, statfunction=meanFold, n_samples=500, alpha=fold_change_alpha_CI)
else:
CIs = [np.nan, np.nan]
else:
if mann_whitney_u_test:
# Mann-Whitney U statistic calculation for two samples (one always being the control)
MannWhitney_statistic, MannWhitney_pvalue = self.__MANN_WHITNEY_U(groupList)
if median_fold_change:
medianFoldChange = self.__median_fold(groupList)
# Boostrap for confidence intervals for the median fold change
if ((len(group) > 2) and (len(controlGroup) > 2)):
medianFold = lambda x: self.__median_fold(x)
CIs = bootstrap.ci(data=groupList, statfunction=medianFold, n_samples=500, alpha=fold_change_alpha_CI)
else:
CIs = [np.nan, np.nan]
if levene_twoGroup:
# Levene statistic calculation for two samples (one always being the control)
LEVENE_twoGroup_statistic, LEVENE_twoGroup_pvalue = self.__LEVENE_twoGroup(groupList)
if parametric:
ttest_twoGroup_statistics_name = 'TTEST-twoGroup_statistic_' + str(key)
ttest_twoGroup_pvalue_name = 'TTEST-twoGroup_pvalue_' + str(key)
ttestTitles.append(ttest_twoGroup_statistics_name)
ttestTitles.append(ttest_twoGroup_pvalue_name)
mean_fold_change_name = 'MeanFoldChange_' + str(key)
mean_fold_change_name_CIlower = 'MeanFoldChange_CI_lower_' + str(key)
mean_fold_change_name_CIupper = 'MeanFoldChange_CI_upper_' + str(key)
mean_fold_change_name_sig = 'MeanFoldChange_sig_' + str(key)
else:
mannwhitney_statistic_name = 'MannWhitneyU_statistic_' + str(key)
mannwhitney_pvalue_name = 'MannWhitneyU_pvalue_' + str(key)
mannWhitneyTitles.append(mannwhitney_statistic_name)
mannWhitneyTitles.append(mannwhitney_pvalue_name)
median_fold_change_name = 'MedianFoldChange_' + str(key)
median_fold_change_name_CIlower = 'MedianFoldChange_CI_lower_' + str(key)
median_fold_change_name_CIupper = 'MedianFoldChange_CI_upper_' + str(key)
median_fold_change_name_sig = 'MedianFoldChange_sig_' + str(key)
levene_twoGroup_statistics_name = 'LEVENE-twoGroup_statistic_' + str(key)
levene_twoGroup_pvalue_name = 'LEVENE-twoGroup_pvalue_' + str(key)
leveneTwoGroupTitles.append(levene_twoGroup_statistics_name)
leveneTwoGroupTitles.append(levene_twoGroup_pvalue_name)
if ttest_twoGroup and parametric:
if df_ttest.empty:
df_ttest = pd.DataFrame({ttest_twoGroup_statistics_name: [TTEST_twoGroup_statistic], ttest_twoGroup_pvalue_name: [TTEST_twoGroup_pvalue]})
else:
df_ttest = pd.concat([df_ttest, pd.DataFrame({ttest_twoGroup_statistics_name: [TTEST_twoGroup_statistic], ttest_twoGroup_pvalue_name: [TTEST_twoGroup_pvalue]})], axis=1).reset_index(drop=True)
statsDataDict['TTEST-twoGroup'] = df_ttest
if mann_whitney_u_test and not parametric:
if df_mannWhitney.empty:
df_mannWhitney = pd.DataFrame({mannwhitney_statistic_name: [MannWhitney_statistic], mannwhitney_pvalue_name: [MannWhitney_pvalue]})
else:
df_mannWhitney = pd.concat([df_mannWhitney, pd.DataFrame({mannwhitney_statistic_name: [MannWhitney_statistic], mannwhitney_pvalue_name: [MannWhitney_pvalue]})], axis=1).reset_index(drop=True)
statsDataDict['MannWhitneyU'] = df_mannWhitney
if mean_fold_change and parametric:
sigMeanFold = np.add(np.sign(np.multiply(CIs[0], CIs[1])), 1).astype(bool);
if df_meanFold.empty:
df_meanFold = pd.DataFrame({mean_fold_change_name: [meanFoldChange], mean_fold_change_name_CIlower: CIs[0], mean_fold_change_name_CIupper: CIs[1], mean_fold_change_name_sig: [sigMeanFold]})
else:
df_meanFold = pd.concat([df_meanFold, | pd.DataFrame({mean_fold_change_name: [meanFoldChange], mean_fold_change_name_CIlower: CIs[0], mean_fold_change_name_CIupper: CIs[1], mean_fold_change_name_sig: [sigMeanFold]}) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update( | pd.Series(['foo'], index=[0]) | pandas.Series |
from datetime import date
import unittest
import dolphindb as ddb
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
import dolphindb.settings as keys
import statsmodels.api as sm
def createdata():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
stript = '''
time1=10:01:01 join 10:01:03 join 10:01:05 join 10:01:05
symbol1=take(`X`Z,4)
price1=3 3.3 3.2 3.1
size1=100 200 50 10
Trade=table(time1 as time,symbol1 as symbol,price1 as price,size1 as size)
time2=10:01:01 join 10:01:02 join 10:01:02 join 10:01:03
symbol2=take(`X`Z,4)
ask=90 150 100 52
bid=70 200 200 68
Quote=table(time2 as time,symbol2 as symbol,ask as ask,bid as bid)
share Trade as shareTrade
share Quote as shareQuote
login("admin", "123456")
if(existsDatabase("dfs://testmergepart"))
dropDatabase("dfs://testmergepart")
db = database("dfs://testmergepart", VALUE, "X" "Z")
pt1 = db.createPartitionedTable(Trade,`pt1,`symbol).append!(Trade)
pt2 = db.createPartitionedTable(Quote,`pt2,`symbol).append!(Quote)
'''
s.run(stript)
s.close()
class TestTable(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
createdata()
cls.pd_left = pd.DataFrame({'time': pd.to_datetime(
['1970-01-01T10:01:01', '1970-01-01T10:01:03', '1970-01-01T10:01:05', '1970-01-01T10:01:05']),
'symbol': ["X", "Z", "X", "Z"],
'price': [3, 3.3, 3.2, 3.1],
'size': [100, 200, 50, 10]})
cls.pdf_right = pd.DataFrame({'time': pd.to_datetime(
['1970-01-01T10:01:01', '1970-01-01T10:01:02', '1970-01-01T10:01:02', '1970-01-01T10:01:03']),
'symbol': ["X", "Z", "X", "Z"],
'ask': [90, 150, 100, 52],
'bid': [70, 200, 200, 68]})
@classmethod
def tearDownClass(cls):
script='''
undef((exec name from objs(true) where shared=1),SHARED)
if(existsDatabase('dfs://testmergepart')){
dropDatabase('dfs://testmergepart')
}
'''
cls.s.run(script)
def test_create_table_by_python_dictionary(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
tmp = self.s.table(data=data, tableAliasName="tmp")
re = self.s.run("tmp")
df = pd.DataFrame(data)
assert_frame_equal(tmp.toDF(), df)
assert_frame_equal(re, df)
def test_create_table_by_pandas_dataframe(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
df = pd.DataFrame(data)
tmp = self.s.table(data=df, tableAliasName="tmp")
re = self.s.run("tmp")
assert_frame_equal(tmp.toDF(), df)
assert_frame_equal(re, df)
def test_table_toDF(self):
tmp = self.s.loadText(DATA_DIR + "/USPrices_FIRST.csv")
df = self.s.run("select * from loadText('{data}')".format(data=DATA_DIR + "/USPrices_FIRST.csv"))
self.assertEqual(len(tmp.toDF()), len(df))
assert_frame_equal(tmp.toDF(), df)
tbName = tmp.tableName()
self.s.run("undef", tbName)
def test_table_showSQL(self):
tmp = self.s.loadText(DATA_DIR + "/USPrices_FIRST.csv")
sql = tmp.showSQL()
tbName = tmp.tableName()
self.assertEqual(sql, 'select PERMNO,date,SHRCD,TICKER,TRDSTAT,HEXCD,CUSIP,DLSTCD,DLPRC,'
'DLRET,BIDLO,ASKHI,PRC,VOL,RET,BID,ASK,SHROUT,CFACPR,CFACSHR,OPENPRC '
'from {tbName}'.format(tbName=tbName))
self.s.run("undef", tbName)
def test_table_sql_select_where(self):
data = DATA_DIR + "/USPrices_FIRST.csv"
tmp = self.s.loadText(data)
re = tmp.select(['PERMNO', 'date']).where(tmp.date > '2010.01.01')
df = self.s.run("select PERMNO,date from loadText('{data}') where date>2010.01.01".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
re = tmp.select(['PERMNO', 'date']).where(tmp.date > '2010.01.01').sort(['date desc'])
df = self.s.run(
"select PERMNO,date from loadText('{data}') where date>2010.01.01 order by date desc".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
re = tmp[tmp.date > '2010.01.01']
df = self.s.run("select * from loadText('{data}') where date>2010.01.01".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
tbName = tmp.tableName()
self.s.run("undef", tbName)
def test_table_sql_groupby(self):
data = DATA_DIR + "/USPrices_FIRST.csv"
tmp = self.s.loadText(data)
origin = tmp.toDF()
re = tmp.groupby('PERMNO').agg({'bid': ['sum']}).toDF()
df = self.s.run("select sum(bid) from loadText('{data}') group by PERMNO".format(data=data))
self.assertEqual((re['PERMNO'] == 10001).all(), True)
self.assertAlmostEqual(re['sum_bid'][0], 59684.9775)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO', 'date']).agg({'bid': ['sum']}).toDF()
df = self.s.run("select sum(bid) from loadText('{data}') group by PERMNO,date".format(data=data))
self.assertEqual(re.shape[1], 3)
self.assertEqual(len(re), 6047)
self.assertEqual((origin['BID'] == re['sum_bid']).all(), True)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO', 'date']).agg({'bid': ['sum'], 'ask': ['sum']}).toDF()
df = self.s.run("select sum(bid),sum(ask) from loadText('{data}') group by PERMNO,date".format(data=data))
self.assertEqual(re.shape[1], 4)
self.assertEqual(len(re), 6047)
self.assertEqual((origin['BID'] == re['sum_bid']).all(), True)
self.assertEqual((origin['ASK'] == re['sum_ask']).all(), True)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO']).agg2([ddb.wsum, ddb.wavg], [('bid', 'ask')]).toDF()
df = self.s.run("select wsum(bid,ask),wavg(bid,ask) from loadText('{data}') group by PERMNO".format(data=data))
assert_frame_equal(re, df)
def test_table_sql_contextby(self):
data = {'sym': ['A', 'B', 'B', 'A', 'A'], 'vol': [1, 3, 2, 5, 4], 'price': [16, 31, 28, 19, 22]}
dt = self.s.table(data=data, tableAliasName="tmp")
re = dt.contextby('sym').agg({'price': [ddb.sum]}).toDF()
df = self.s.run("select sym,sum(price) from tmp context by sym")
self.assertEqual((re['sym'] == ['A', 'A', 'A', 'B', 'B']).all(), True)
self.assertEqual((re['sum_price'] == [57, 57, 57, 59, 59]).all(), True)
assert_frame_equal(re, df)
re = dt.contextby(['sym', 'vol']).agg({'price': [ddb.sum]}).toDF()
df = self.s.run("select sym,vol,sum(price) from tmp context by sym,vol")
self.assertEqual((re['sym'] == ['A', 'A', 'A', 'B', 'B']).all(), True)
self.assertEqual((re['vol'] == [1, 4, 5, 2, 3]).all(), True)
self.assertEqual((re['sum_price'] == [16, 22, 19, 28, 31]).all(), True)
assert_frame_equal(re, df)
re = dt.contextby('sym').agg2([ddb.wsum, ddb.wavg], [('price', 'vol')]).toDF()
df = self.s.run("select sym,vol,price,wsum(price,vol),wavg(price,vol) from tmp context by sym")
assert_frame_equal(re, df)
def test_table_sql_pivotby(self):
dt = self.s.table(data={'sym': ['C', 'MS', 'MS', 'MS', 'IBM', 'IBM', 'C', 'C', 'C'],
'price': [49.6, 29.46, 29.52, 30.02, 174.97, 175.23, 50.76, 50.32, 51.29],
'qty': [2200, 1900, 2100, 3200, 6800, 5400, 1300, 2500, 8800],
'timestamp': pd.date_range('2019-06-01', '2019-06-09')}, tableAliasName="tmp")
re = dt.pivotby(index='timestamp', column='sym', value='price').toDF()
expected = self.s.run('select price from tmp pivot by timestamp,sym')
self.assertEqual(re.equals(expected), True)
assert_frame_equal(re, expected)
re = dt.pivotby(index='timestamp.month()', column='sym', value='last(price)').toDF()
expected = self.s.run('select last(price) from tmp pivot by timestamp.month(),sym')
self.assertEqual(re.equals(expected), True)
assert_frame_equal(re, expected)
re = dt.pivotby(index='timestamp.month()', column='sym', value='count(price)').toDF()
expected = self.s.run('select count(price) from tmp pivot by timestamp.month(),sym')
self.assertEqual(re.equals(expected), True)
assert_frame_equal(re, expected)
tbName = dt.tableName()
self.s.run("undef", tbName)
def test_table_sql_merge(self):
dt1 = self.s.table(data={'id': [1, 2, 3, 3], 'value': [7, 4, 5, 0]}, tableAliasName="t1")
dt2 = self.s.table(data={'id': [5, 3, 1], 'qty': [300, 500, 800]}, tableAliasName="t2")
re = dt1.merge(right=dt2, on='id').toDF()
expected = self.s.run('select * from ej(t1,t2,"id")')
assert_frame_equal(re, expected)
re = dt1.merge(right=dt2, on='id', how='left').toDF()
expected = self.s.run('select * from lj(t1,t2,"id")')
re.fillna(0, inplace=True)
expected.fillna(0, inplace=True)
assert_frame_equal(re, expected)
re = dt1.merge(right=dt2, on='id', how='outer').toDF()
expected = self.s.run('select * from fj(t1,t2,"id")')
re.fillna(0, inplace=True)
expected.fillna(0, inplace=True)
assert_frame_equal(re, expected)
re = dt2.merge(right=dt1, on='id', how='left semi').toDF()
expected = self.s.run('select * from lsj(t2,t1,"id")')
re.fillna(0, inplace=True)
expected.fillna(0, inplace=True)
assert_frame_equal(re, expected)
self.s.run("undef", dt1.tableName())
self.s.run("undef", dt2.tableName())
def test_table_sql_mergr_asof(self):
dt1 = self.s.table(data={'id': ['A', 'A', 'A', 'B', 'B'],
'date': pd.to_datetime(
['2017-02-06', '2017-02-08', '2017-02-10', '2017-02-07', '2017-02-09']),
'price': [22, 23, 20, 100, 102]},
tableAliasName="t1")
dt2 = self.s.table(data={'id': ['A', 'A', 'B', 'B', 'B'],
'date': pd.to_datetime(
['2017-02-07', '2017-02-10', '2017-02-07', '2017-02-08', '2017-02-10'])},
tableAliasName="t2")
re = dt2.merge_asof(right=dt1, on=['id', 'date']).toDF()
expected = self.s.run('select * from aj(t2,t1,`id`date)')
assert_frame_equal(re, expected)
def test_table_sql_merge_cross(self):
dt1 = self.s.table(data={'year': [2010, 2011, 2012]}, tableAliasName="t1")
dt2 = self.s.table(data={'ticker': ['IBM', 'C', 'AAPL']}, tableAliasName="t2")
re = dt1.merge_cross(dt2).toDF()
expected = self.s.run('select * from cj(t1,t2)')
assert_frame_equal(re, expected)
def test_table_sql_merge_window(self):
dt1 = self.s.table(data={'sym': ["A", "A", "B"],
'time': [np.datetime64('2012-09-30 09:56:06'), np.datetime64('2012-09-30 09:56:07'),
np.datetime64('2012-09-30 09:56:06')],
'price': [10.6, 10.7, 20.6]},
tableAliasName="t1")
dt2 = self.s.table(
data={'sym': ["A", "A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "B", "B", "B", "B", "B", "B", "B"],
'time': pd.date_range(start='2012-09-30 09:56:01', end='2012-09-30 09:56:10', freq='s').append(
pd.date_range(start='2012-09-30 09:56:01', end='2012-09-30 09:56:10', freq='s')),
'bid': [10.05, 10.15, 10.25, 10.35, 10.45, 10.55, 10.65, 10.75, 10.85, 10.95, 20.05, 20.15, 20.25,
20.35, 20.45, 20.55, 20.65, 20.75, 20.85, 20.95],
'offer': [10.15, 10.25, 10.35, 10.45, 10.55, 10.65, 10.75, 10.85, 10.95, 11.05, 20.15, 20.25, 20.35,
20.45, 20.55, 20.65, 20.75, 20.85, 20.95, 21.01],
'volume': [100, 300, 800, 200, 600, 100, 300, 800, 200, 600, 100, 300, 800, 200, 600, 100, 300, 800,
200, 600]},
tableAliasName="t2")
re = dt1.merge_window(right=dt2, leftBound=-5, rightBound=0, aggFunctions="avg(bid)", on=['sym', 'time']).toDF()
expected = self.s.run('select * from wj(t1,t2,-5:0,<avg(bid)>,`sym`time)')
assert_frame_equal(re, expected)
re = dt1.merge_window(right=dt2, leftBound=-5, rightBound=-1,
aggFunctions=["wavg(bid,volume)", "wavg(offer,volume)"], on=["sym", "time"]).toDF()
expected = self.s.run('select * from wj(t1,t2,-5:-1,<[wavg(bid,volume), wavg(offer,volume)]>,`sym`time)')
assert_frame_equal(re, expected)
def test_table_chinese_column_name(self):
df = pd.DataFrame({'็ผๅท':[1, 2, 3, 4, 5], 'ๅบๅท':['ๅฃน','่ดฐ','ๅ','่','ไผ']})
tmp = self.s.table(data=df, tableAliasName="chinese_t")
res=tmp.toDF()
assert_array_equal(res['็ผๅท'], [1, 2, 3, 4, 5])
assert_array_equal(res['ๅบๅท'], ['ๅฃน','่ดฐ','ๅ','่','ไผ'])
def test_table_top_with_other_clause(self):
df = pd.DataFrame({'id': [10, 8, 5, 6, 7, 9, 1, 4, 2, 3], 'date': pd.date_range('2012-01-01', '2012-01-10', freq="D"), 'value': np.arange(0, 10)})
tmp = self.s.table(data=df, tableAliasName="top_t")
re = tmp.top(3).sort("id").toDF()
assert_array_equal(re['id'], [1, 2, 3])
assert_array_equal(re['date'], np.array(['2012-01-07', '2012-01-09', '2012-01-10'], dtype="datetime64[D]"))
assert_array_equal(re['value'], [6, 8, 9])
re = tmp.top(3).where("id>5").toDF()
assert_array_equal(re['id'], [10, 8, 6])
assert_array_equal(re['date'], np.array(['2012-01-01', '2012-01-02', '2012-01-04'], dtype="datetime64[D]"))
assert_array_equal(re['value'], [0, 1, 3])
df = pd.DataFrame({'sym': ["C", "MS", "MS", "MS", "IBM", "IBM", "C", "C", "C"],
'price': [49.6, 29.46, 29.52, 30.02, 174.97, 175.23, 50.76, 50.32, 51.29],
'qty':[2200, 1900, 2100, 3200, 6800, 5400, 1300, 2500, 8800]})
tmp = self.s.table(data=df, tableAliasName="t1")
re = tmp.top(2).contextby("sym").sort("sym").toDF()
assert_array_equal(re['sym'], ["C", "C", "IBM", "IBM", "MS", "MS"])
assert_array_almost_equal(re['price'], [49.6, 50.76, 174.97, 175.23, 29.46, 29.52])
assert_array_equal(re['qty'], [2200, 1300, 6800, 5400, 1900, 2100])
def test_table_sql_update_where(self):
n = pd.DataFrame({'timestamp' : pd.to_datetime(['09:34:07','09:36:42','09:36:51','09:36:59','09:32:47','09:35:26','09:34:16','09:34:26','09:38:12']),
'sym' : ['C','MS','MS','MS','IBM','IBM','C','C','C'],
'price' : [49.6 ,29.46 ,29.52 ,30.02 ,174.97 ,175.23 ,50.76 ,50.32 ,51.29],
'qty' : [2200 ,1900 ,2100 ,3200 ,6800 ,5400 ,1300 ,2500 ,8800]})
dt1 = self.s.table(data=n, tableAliasName="t1")
re = dt1.update(["price"], ["price*10"]).where("sym=`C").execute().toDF()
assert_array_almost_equal(re["price"], [496,29.46,29.52,30.02,174.97,175.23,507.6,503.2,512.9])
def test_table_twice(self):
data = {'id': [1, 2, 2, 3],
'date': np.array(['2019-02-04', '2019-02-05', '2019-02-09', '2019-02-13'], dtype='datetime64[D]'),
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22, 3.5, 21, 26]}
dt = self.s.table(data=data, tableAliasName="t1")
dt = self.s.table(data=data, tableAliasName="t1")
re = self.s.loadTable("t1").toDF()
assert_array_equal(data['id'], re['id'])
assert_array_equal(data['date'], re['date'])
assert_array_equal(data['ticker'], re['ticker'])
assert_array_equal(data['price'], re['price'])
def test_table_repeatedly(self):
data = {'id': [1, 2, 2, 3],
'date': np.array(['2019-02-04', '2019-02-05', '2019-02-09', '2019-02-13'], dtype='datetime64[D]'),
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22, 3.5, 21, 26]}
for i in range(1,100):
dt = self.s.table(data=data, tableAliasName="t1")
re = self.s.loadTable("t1").toDF()
assert_array_equal(data['id'], re['id'])
assert_array_equal(data['date'], re['date'])
assert_array_equal(data['ticker'], re['ticker'])
assert_array_equal(data['price'], re['price'])
def test_table_csort(self):
script = '''
sym = `C`MS`MS`MS`IBM`IBM`C`C`C$SYMBOL
price= 49.6 29.46 29.52 30.02 174.97 175.23 50.76 50.32 51.29
qty = 2200 1900 2100 3200 6800 5400 1300 2500 8800
timestamp = [09:34:07,09:36:42,09:36:51,09:36:59,09:32:47,09:35:26,09:34:16,09:34:26,09:38:12]
t1 = table(timestamp, sym, qty, price);
'''
self.s.run(script)
tb = self.s.loadTable(tableName="t1")
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort("timestamp").top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort("timestamp").limit(-2).toDF()
expected = self.s.run("select timestamp, sym, qty, price from t1 context by sym csort timestamp limit -2")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"]).top(2).toDF()
expected = self.s.run("select timestamp, sym, qty, price from t1 context by sym csort timestamp, qty limit 2")
| assert_frame_equal(re, expected) | pandas.testing.assert_frame_equal |
from itertools import product
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.validation import quality_mapping
def test_ok_user_flagged():
assert quality_mapping.DESCRIPTION_MASK_MAPPING['OK'] == 0
assert quality_mapping.DESCRIPTION_MASK_MAPPING['USER FLAGGED'] == 1
def test_description_dict_version_compatibility():
for dict_ in quality_mapping.BITMASK_DESCRIPTION_DICT.values():
assert dict_['VERSION IDENTIFIER 0'] == 1 << 1
assert dict_['VERSION IDENTIFIER 1'] == 1 << 2
assert dict_['VERSION IDENTIFIER 2'] == 1 << 3
def test_latest_version_flag():
# test valid while only identifiers 0 - 2 present
last_identifier = max(
int(vi.split(' ')[-1]) for vi in
quality_mapping.DESCRIPTION_MASK_MAPPING.keys() if
vi.startswith('VERSION IDENTIFIER'))
assert last_identifier == 2
assert (quality_mapping.LATEST_VERSION_FLAG ==
quality_mapping.LATEST_VERSION << 1)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask(flag_val):
flag, mask = flag_val
mask |= quality_mapping.LATEST_VERSION_FLAG
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([
mask, mask, quality_mapping.LATEST_VERSION_FLAG, mask,
quality_mapping.LATEST_VERSION_FLAG]))
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_none(flag_invert):
assert quality_mapping.convert_bool_flags_to_flag_mask(
None, *flag_invert) is None
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_adds_latest_version(flag_invert):
ser = | pd.Series([0, 0, 0, 1, 1]) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# https://github.com/pandas-dev/pandas/issues/20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with tm.assert_produces_warning(None):
result = mdf.agg(['min', 'sum'])
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min'])
# For backwards compatibility, the result's index is
# still sorted by function name, so it's ['min', 'sum']
# not ['sum', 'min'].
expected = expected[['D', 'C', 'B', 'A']]
tm.assert_frame_equal(result, expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
# all reducers
expected = pd.concat([self.frame.mean(axis=axis),
self.frame.max(axis=axis),
self.frame.sum(axis=axis),
], axis=1)
expected.columns = ['mean', 'max', 'sum']
expected = expected.T if axis in {0, 'index'} else expected
result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
result = self.frame.agg(func, axis=axis)
expected = Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name2].sum()],
index=[name1, name2])
assert_series_equal(result, expected)
# dict input with lists
func = OrderedDict([(name1, ['mean']), (name2, ['sum'])])
result = self.frame.agg(func, axis=axis)
expected = DataFrame({
name1: Series([self.frame.loc(other_axis)[name1].mean()],
index=['mean']),
name2: Series([self.frame.loc(other_axis)[name2].sum()],
index=['sum'])})
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
# dict input with lists with multiple
func = OrderedDict([(name1, ['mean', 'sum']), (name2, ['sum', 'max'])])
result = self.frame.agg(func, axis=axis)
expected = DataFrame(OrderedDict([
(name1, Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name1].sum()],
index=['mean', 'sum'])),
(name2, Series([self.frame.loc(other_axis)[name2].sum(),
self.frame.loc(other_axis)[name2].max()],
index=['sum', 'max'])),
]))
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', | pd.Timestamp('20130101') | pandas.Timestamp |
#!/usr/bin/env python
import pandas as pd
import argparse
import datetime
import time
import sys
import investpy
scrap_delay = 2
def main():
parser = argparse.ArgumentParser(description='scrap investing.com daily close')
parser.add_argument('-input_file', type=str, default='data_tickers/investing_stock_info.csv', help='input file')
parser.add_argument('-output_prefix', type=str, default='../stock_data/raw_daily_investing_stock/investing_stock_', help='prefix of the output file')
parser.add_argument('-date', type=str, help='Specify the date')
args = parser.parse_args()
if args.date is None:
scrap_date = datetime.date.today()
args.date = str(scrap_date)
filename = args.output_prefix + args.date + '.csv'
df_input = pd.read_csv(args.input_file)
info_list = []
print('number of tickers:', len(df_input.index))
for index,row in df_input.iterrows():
print('downloading...', row['symbol'], row['country'], '-', index)
try:
info = investpy.get_stock_information(row['symbol'],row['country'])
info_list.append(info)
except:
print('failed')
time.sleep(scrap_delay)
df = | pd.concat(info_list) | pandas.concat |
##############################################################
# Author: <NAME>
##############################################################
'''
Module : create_kallisto_ec_count_matrix
Description : Create equivalence class matrix from kallisto.
Copyright : (c) <NAME>, Dec 2018
License : MIT
Maintainer : <EMAIL>
Portability : POSIX
Take equivalence class output from kallisto's batch mode
(matrix.ec) and create an EC matrix that can be used for DE/DTU
'''
import os
import argparse
import re
import pandas as pd
import numpy as np
import gc
parser = argparse.ArgumentParser()
parser.add_argument(dest='ec_file', help="Kallisto equivalence class file (matrix.ec).")
parser.add_argument(dest='counts_file', help="Kallisto counts file (matrix.tsv).")
parser.add_argument(dest='samples_file', help="Kallisto samples file (matrix.cells).")
parser.add_argument(dest='tx_ids_file',
help='''File containing one transcript ID per line,
in same order as the fasta reference used for kallisto.''')
parser.add_argument(dest='out_file', help="Output file.")
args = parser.parse_args()
ec_file = args.ec_file
counts_file = args.counts_file
samples_file = args.samples_file
tx_ids_file = args.tx_ids_file
out_file = args.out_file
ec_df = pd.read_csv(ec_file, header=None, sep='\t', names=['ec_names', 'tx_ids'])
counts = pd.read_csv(counts_file, header=None, sep='\t', names=['ec_names', 'sample_id', 'count'])
samples = pd.read_csv(samples_file, header=None, sep='\t')[0].values
tx_ids = pd.read_csv(tx_ids_file, header=None)[0].values
print('restructuring EC counts...')
counts = pd.merge(counts, ec_df, on='ec_names')
counts = counts.pivot_table(index=['ec_names', 'tx_ids'], columns=['sample_id'], fill_value=0)
counts = counts.reset_index()
counts.columns = counts.columns.droplevel()
counts.columns = np.concatenate([['ec_names', 'tx_ids'], samples])
print('separating transcript IDs...')
ec_tmp = ec_df[ec_df.ec_names.isin(counts.ec_names)]
tx_stack = ec_tmp['tx_ids'].str.split(',').apply(pd.Series,1).stack()
tx_stack = pd.DataFrame(tx_stack, columns=['tx_id'])
tx_stack['ec_names'] = [i[0] for i in tx_stack.index]
counts = | pd.merge(counts, tx_stack, left_on='ec_names', right_on='ec_names') | pandas.merge |
from datetime import timedelta
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
to_datetime,
)
@pytest.fixture(params=[True, False])
def raw(request):
"""raw keyword argument for rolling.apply"""
return request.param
@pytest.fixture(
params=[
"triang",
"blackman",
"hamming",
"bartlett",
"bohman",
"blackmanharris",
"nuttall",
"barthann",
]
)
def win_types(request):
return request.param
@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"])
def win_types_special(request):
return request.param
@pytest.fixture(
params=[
"sum",
"mean",
"median",
"max",
"min",
"var",
"std",
"kurt",
"skew",
"count",
"sem",
]
)
def arithmetic_win_operators(request):
return request.param
@pytest.fixture(
params=[
"sum",
"mean",
"median",
"max",
"min",
]
)
def arithmetic_numba_supported_operators(request):
return request.param
@pytest.fixture(params=["right", "left", "both", "neither"])
def closed(request):
return request.param
@pytest.fixture(params=[True, False])
def center(request):
return request.param
@pytest.fixture(params=[None, 1])
def min_periods(request):
return request.param
@pytest.fixture(params=[True, False])
def parallel(request):
"""parallel keyword argument for numba.jit"""
return request.param
# Can parameterize nogil & nopython over True | False, but limiting per
# https://github.com/pandas-dev/pandas/pull/41971#issuecomment-860607472
@pytest.fixture(params=[False])
def nogil(request):
"""nogil keyword argument for numba.jit"""
return request.param
@pytest.fixture(params=[True])
def nopython(request):
"""nopython keyword argument for numba.jit"""
return request.param
@pytest.fixture(params=[True, False])
def adjust(request):
"""adjust keyword argument for ewm"""
return request.param
@pytest.fixture(params=[True, False])
def ignore_na(request):
"""ignore_na keyword argument for ewm"""
return request.param
@pytest.fixture(
params=[pytest.param("numba", marks=td.skip_if_no("numba", "0.46.0")), "cython"]
)
def engine(request):
"""engine keyword argument for rolling.apply"""
return request.param
@pytest.fixture(
params=[
pytest.param(("numba", True), marks=td.skip_if_no("numba", "0.46.0")),
("cython", True),
("cython", False),
]
)
def engine_and_raw(request):
"""engine and raw keyword arguments for rolling.apply"""
return request.param
@pytest.fixture
def times_frame():
"""Frame for testing times argument in EWM groupby."""
return DataFrame(
{
"A": ["a", "b", "c", "a", "b", "c", "a", "b", "c", "a"],
"B": [0, 0, 0, 1, 1, 1, 2, 2, 2, 3],
"C": to_datetime(
[
"2020-01-01",
"2020-01-01",
"2020-01-01",
"2020-01-02",
"2020-01-10",
"2020-01-22",
"2020-01-03",
"2020-01-23",
"2020-01-23",
"2020-01-04",
]
),
}
)
@pytest.fixture(params=["1 day", timedelta(days=1)])
def halflife_with_times(request):
"""Halflife argument for EWM when times is specified."""
return request.param
@pytest.fixture(
params=[
"object",
"category",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"m8[ns]",
"M8[ns]",
pytest.param(
"datetime64[ns, UTC]",
marks=pytest.mark.skip(
"direct creation of extension dtype datetime64[ns, UTC] "
"is not supported ATM"
),
),
]
)
def dtypes(request):
"""Dtypes for window tests"""
return request.param
@pytest.fixture(
params=[
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", "C"]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1.0, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0.0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", 1]),
| DataFrame([[2.0, 4.0], [1.0, 2.0], [5.0, 2.0], [8.0, 1.0]], columns=[1, 0.0]) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with | tm.assert_raises_regex(ValueError, msg) | pandas.util.testing.assert_raises_regex |
import submodels_module as modelbank
import numpy as np
from itertools import combinations
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import load_format_data
#Determine the most generalizable model from the top CV models
def get_loss_list(model_list):
model_loss_list,model_loss_std_list,model_name_list=[],[],[]
for model in model_list:
# model.get_best_trial()
model_name_list.append(model.model_name)
model_loss_list.append(model.model_stats['cv_avg_loss'])
model_loss_std_list.append(model.model_stats['cv_std_loss'])
results= | pd.DataFrame([model_name_list,model_loss_list,model_loss_std_list]) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert | is_integer(result) | pandas.core.dtypes.common.is_integer |
import unittest
from context import grama as gr
from context import data
from numpy import NaN
from pandas import DataFrame, RangeIndex
from pandas.testing import assert_frame_equal
class TestPivotLonger(unittest.TestCase):
"""Test implementation of pivot_longer
"""
def test_pivot_longer(self):
""" Test basic functionality of pivot_longer
"""
wide = gr.df_make(One=[1,2,3], Two=[4,5,6])
long = gr.tran_pivot_longer(
wide,
columns=("One","Two"),
names_to="columns",
values_to="values"
)
expected = gr.df_make(
columns=["One","One","One","Two","Two","Two"],
values=[1,2,3,4,5,6]
)
assert_frame_equal(long, expected)
def test_pivot_longer_index_not_rangeindex(self):
""" Test if pivot_longer makes a RangeIndex if current index is not
a RangeIndex and preserves the orignal as new column named "index"
"""
wide = DataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
long = gr.tran_pivot_longer(
wide,
columns=("One","Two"),
names_to="columns",
values_to="values"
)
expected = DataFrame(
{
"index": ["A", "B", "C", "A", "B", "C"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
}
)
assert_frame_equal(long, expected)
def test_pivot_longer_rename_not_rangeindex(self):
""" Test if pivot_longer makes a RangeIndex if current index is not
a RangeIndex and preserves the orignal as a new column "index_to"
"""
wide = DataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
long = gr.tran_pivot_longer(
wide,
columns=("One","Two"),
index_to="idx",
names_to="columns",
values_to="values"
)
expected = DataFrame(
{
"idx": ["A", "B", "C", "A", "B", "C"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
}
)
| assert_frame_equal(long, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Supports Kp index values. Downloads data from ftp.gfz-potsdam.de or SWPC.
Parameters
----------
platform
'sw'
name
'kp'
tag
- '' : Standard Kp data
- 'forecast' : Grab forecast data from SWPC (next 3 days)
- 'recent' : Grab last 30 days of Kp data from SWPC
Note
----
Standard Kp files are stored by the first day of each month. When downloading
use kp.download(start, stop, freq='MS') to only download days that could
possibly have data. 'MS' gives a monthly start frequency.
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
kp = pysat.Instrument('sw', 'kp', tag='recent')
kp.download()
kp.load(date=kp.tomorrow())
Recent data is also stored by the generation date from the SWPC. Each file
contains 30 days of Kp measurements. The load date issued to pysat corresponds
to the generation date.
The recent and forecast data should not be used with the data padding option
available from pysat.Instrument objects.
Warnings
--------
The 'forecast' Kp data loads three days at a time. The data padding feature
and multi_file_day feature available from the pyast.Instrument object
is not appropriate for Kp 'forecast' data.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Custom Functions
----------------
filter_geoquiet
Filters pysat.Instrument data for given time after Kp drops below gate.
"""
import functools
import numpy as np
import os
import pandas as pds
import pysat
import logging
logger = logging.getLogger(__name__)
platform = 'sw'
name = 'kp'
tags = {'': '',
'forecast': 'SWPC Forecast data next (3 days)',
'recent': 'SWPC provided Kp for past 30 days'}
sat_ids = {'': ['', 'forecast', 'recent']}
# generate todays date to support loading forecast data
now = pysat.datetime.now()
today = pysat.datetime(now.year, now.month, now.day)
# set test dates
_test_dates = {'': {'': pysat.datetime(2009, 1, 1),
'forecast': today + pds.DateOffset(days=1)}}
def load(fnames, tag=None, sat_id=None):
"""Load Kp index files
Parameters
------------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
sat_id : str or NoneType
satellite id or None (default=None)
Returns
---------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
from pysat.utils.time import parse_date
meta = pysat.Meta()
if tag == '':
# Kp data stored monthly, need to return data daily
# the daily date is attached to filename
# parse off the last date, load month of data, downselect to desired
# day
data = pds.DataFrame()
# set up fixed width format for these files
colspec = [(0, 2), (2, 4), (4, 6), (7, 10), (10, 13), (13, 16),
(16, 19), (19, 23), (23, 26), (26, 29), (29, 32), (32, 50)]
for filename in fnames:
# the daily date is attached to filename
# parse off the last date, load month of data, downselect to the
# desired day
fname = filename[0:-11]
date = pysat.datetime.strptime(filename[-10:], '%Y-%m-%d')
temp = pds.read_fwf(fname, colspecs=colspec, skipfooter=4,
header=None, parse_dates=[[0, 1, 2]],
date_parser=parse_date, index_col='0_1_2')
idx, = np.where((temp.index >= date) &
(temp.index < date + pds.DateOffset(days=1)))
temp = temp.iloc[idx, :]
data = pds.concat([data, temp], axis=0)
# drop last column as it has data I don't care about
data = data.iloc[:, 0:-1]
# each column increments UT by three hours
# produce a single data series that has Kp value monotonically
# increasing in time with appropriate datetime indices
s = pds.Series()
for i in np.arange(8):
temp = pds.Series(data.iloc[:, i].values,
index=data.index+pds.DateOffset(hours=int(3*i)))
s = s.append(temp)
s = s.sort_index()
s.index.name = 'time'
# now, Kp comes in non-user friendly values
# 2-, 2o, and 2+ relate to 1.6, 2.0, 2.3
# will convert for user friendliness
first = np.array([float(x[0]) for x in s])
flag = np.array([x[1] for x in s])
ind, = np.where(flag == '+')
first[ind] += 1.0 / 3.0
ind, = np.where(flag == '-')
first[ind] -= 1.0 / 3.0
result = pds.DataFrame(first, columns=['Kp'], index=s.index)
fill_val = np.nan
elif tag == 'forecast':
# load forecast data
result = pds.read_csv(fnames[0], index_col=0, parse_dates=True)
fill_val = -1
elif tag == 'recent':
# load recent Kp data
result = pds.read_csv(fnames[0], index_col=0, parse_dates=True)
fill_val = -1
# Initalize the meta data
for kk in result.keys():
initialize_kp_metadata(meta, kk, fill_val)
return result, meta
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
sat_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : pysat._files.Files
A class containing the verified available files
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == '':
# files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'kp{year:2d}{month:02d}.tab'
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str,
two_digit_year_break=94)
if not out.empty:
out.loc[out.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
elif tag == 'forecast':
format_str = 'kp_forecast_{year:04d}-{month:02d}-{day:02d}.txt'
files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# pad list of files data to include most recent file under tomorrow
if not files.empty:
pds_offset = pds.DateOffset(days=1)
files.loc[files.index[-1] + pds_offset] = files.values[-1]
files.loc[files.index[-1] + pds_offset] = files.values[-1]
return files
elif tag == 'recent':
format_str = 'kp_recent_{year:04d}-{month:02d}-{day:02d}.txt'
files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# pad list of files data to include most recent file under tomorrow
if not files.empty:
pds_offset = pds.DateOffset(days=1)
files.loc[files.index[-1] + pds_offset] = files.values[-1]
files.loc[files.index[-1] + pds_offset] = files.values[-1]
return files
else:
raise ValueError('Unrecognized tag name for Space Weather Index ' +
'Kp')
else:
raise ValueError('A data_path must be passed to the loading routine ' +
'for Kp')
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download Kp index data
Parameters
-----------
tag : string or NoneType
Denotes type of file to load. Accepted types are '' and 'forecast'.
(default=None)
sat_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard Kp data
if tag == '':
import ftplib
from ftplib import FTP
import sys
ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/home/obs/kp-ap/tab')
dnames = list()
for date in date_array:
fname = 'kp{year:02d}{month:02d}.tab'
fname = fname.format(year=(date.year - date.year//100*100),
month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
if not fname in dnames:
try:
logger.info('Downloading file for '+date.strftime('%b %Y'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname, 'wb').write)
dnames.append(fname)
except ftplib.error_perm as exception:
if str(exception.args[0]).split(" ", 1)[0] != '550':
# leaving a bare raise below so that ftp errors
# are properly reported as coming from ftp
# and gives the correct line number.
# We aren't expecting any 'normal' ftp errors
# here, other than a 550 'no file' error, thus
# accurately raising FTP issues is the way to go
raise
else:
# file isn't actually there, just let people know
# then continue on
os.remove(saved_fname)
logger.info('File not available for '+date.strftime('%x'))
ftp.close()
elif tag == 'forecast':
import requests
logger.info('This routine can only download the current forecast, ' +
'not archived forecasts')
# download webpage
furl = 'https://services.swpc.noaa.gov/text/3-day-geomag-forecast.txt'
r = requests.get(furl)
# parse text to get the date the prediction was generated
date_str = r.text.split(':Issued: ')[-1].split(' UTC')[0]
date = pysat.datetime.strptime(date_str, '%Y %b %d %H%M')
# data is the forecast value for the next three days
raw_data = r.text.split('NOAA Kp index forecast ')[-1]
# get date of the forecasts
date_str = raw_data[0:6] + ' ' + str(date.year)
forecast_date = pysat.datetime.strptime(date_str, '%d %b %Y')
# strings we will use to parse the downloaded text
lines = ['00-03UT', '03-06UT', '06-09UT', '09-12UT', '12-15UT',
'15-18UT', '18-21UT', '21-00UT']
# storage for daily forecasts
# get values for each day, then combine together
day1 = []
day2 = []
day3 = []
for line in lines:
raw = raw_data.split(line)[-1].split('\n')[0]
day1.append(int(raw[0:10]))
day2.append(int(raw[10:20]))
day3.append(int(raw[20:]))
times = | pds.date_range(forecast_date, periods=24, freq='3H') | pandas.date_range |
# Version 2.0 of the t-SNE Stock Market Example: with triggers and better defined functions.
# Try with simulated data so we know that they are clustered
#___________________________________________________________________________________________________ Import packages
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib import colors
from fbm import FBM
# Use the normalization? Shouldn't be needed
# from sklearn.preprocessing import normalize
import imageio
from timeit import default_timer as timer
import umap
#___________________________________________________________________________________________________ Control triggers
####################### Triggers ########################
# 0: no, 1: yes
plot_bar = 0 # Bar plots of returns for all companies for a given number of days (randomized)
compute_tsne = 0
plot_tsne_synth = 0 # Plots a single plot of t-SNE for the snythetic dataset
plot_tsne_gif = 0 # Plots a gif of all the possible windows of t-SNE
compute_umap = 0
plot_umap = 0
###################### Parameters ########################
window_time = 40 # Window of time for the "dynamic t-SNE"
days_bar_plot = 3 # Number of bar plots (also number of days it will be plotted for)
#___________________________________________________________________________________________________ Import data
raw_data = np.asarray( | pd.read_csv('all_stocks_5yr.csv') | pandas.read_csv |
from nose.tools import with_setup
import pandas as pd
from ..widget import utils as utils
from ..widget.encoding import Encoding
df = None
encoding = None
def _setup():
global df, encoding
records = [
{
"buildingID": 0,
"date": "6/1/13",
"temp_diff": 12,
"mystr": "alejandro",
"mystr2": "1",
},
{
"buildingID": 1,
"date": "6/1/13",
"temp_diff": 0,
"mystr": "alejandro",
"mystr2": "1",
},
{
"buildingID": 2,
"date": "6/1/14",
"temp_diff": 11,
"mystr": "alejandro",
"mystr2": "1",
},
{
"buildingID": 0,
"date": "6/1/15",
"temp_diff": 5,
"mystr": "alejandro",
"mystr2": "1.0",
},
{
"buildingID": 1,
"date": "6/1/16",
"temp_diff": 19,
"mystr": "alejandro",
"mystr2": "1",
},
{
"buildingID": 2,
"date": "6/1/17",
"temp_diff": 32,
"mystr": "alejandro",
"mystr2": "1",
},
]
df = pd.DataFrame(records)
encoding = Encoding(chart_type="table", x="date", y="temp_diff")
def _teardown():
pass
@with_setup(_setup, _teardown)
def test_on_render_viz():
df["date"] = pd.to_datetime(df["date"])
df["mystr2"] = pd.to_numeric(df["mystr2"])
assert utils.infer_vegalite_type(df["buildingID"]) == "Q"
assert utils.infer_vegalite_type(df["date"]) == "T"
assert utils.infer_vegalite_type(df["temp_diff"]) == "Q"
assert utils.infer_vegalite_type(df["mystr"]) == "N"
assert utils.infer_vegalite_type(df["mystr2"]) == "Q"
def test_select_x():
assert utils.select_x(None) is None
def _check(d, expected):
x = utils.select_x(d)
assert x == expected
data = dict(
col1=[1.0, 2.0, 3.0], # Q
col2=["A", "B", "C"], # N
col3=pd.date_range("2012", periods=3, freq="A"),
) # T
_check(data, "col3")
data = dict(col1=[1.0, 2.0, 3.0], col2=["A", "B", "C"]) # Q # N
_check(data, "col2")
data = dict(col1=[1.0, 2.0, 3.0]) # Q
_check(data, "col1")
# Custom order
data = dict(
col1=[1.0, 2.0, 3.0], # Q
col2=["A", "B", "C"], # N
col3=pd.date_range("2012", periods=3, freq="A"), # T
col4=pd.date_range("2012", periods=3, freq="A"),
) # T
selected_x = utils.select_x(data, ["N", "T", "Q", "O"])
assert selected_x == "col2"
# Len < 1
assert utils.select_x(dict()) is None
def test_select_y():
def _check(d, expected):
x = "col1"
y = utils.select_y(d, x)
assert y == expected
data = dict(
col1=[1.0, 2.0, 3.0], # Chosen X
col2=["A", "B", "C"], # N
col3=pd.date_range("2012", periods=3, freq="A"), # T
col4=pd.date_range("2012", periods=3, freq="A"), # T
col5=[1.0, 2.0, 3.0],
) # Q
_check(data, "col5")
data = dict(
col1=[1.0, 2.0, 3.0], # Chosen X
col2=["A", "B", "C"], # N
col3= | pd.date_range("2012", periods=3, freq="A") | pandas.date_range |
import pandas as pd
df = pd.read_csv('D:/5674-833_4th/part5/stock-data.csv')
#๋ฌธ์์ด์ธ ๋ ์ง ๋ฐ์ดํฐ๋ฅผ ํ๋ค์ค Timestamp๋ก ๋ณํ
df['new_date']=pd.to_datetime(df['Date'])
df.set_index('new_date',inplace= True)
# print(df.loc['2018'].head())
# df_ym = df.loc['2018-07']
# print(df_ym)
#
today = | pd.to_datetime('2018-12-25') | pandas.to_datetime |
import pandas as pd
from src.features import build_features
from sklearn.model_selection import train_test_split
def make_dataset():
"""
This function laods the raw data, builds some features and saves the df.
It is not meant to be called but once to produce the dataset.
"""
raw_data = pd.read_csv("C:/Users/groes/OneDrive/Documents/701CW_2/Intro_to_AI/data/raw/df_all_data_w_desc_2021-06-22.csv")
df = build_features.build_features(raw_data)
df.to_csv("C:/Users/groes/OneDrive/Documents/701CW_2/Intro_to_AI/data/processed/4.0-processed_data_w_listprice.csv")
###### SPLITTING DATASET INTO TRAINING AND VALIDATION #######
# Create an array that for each home makes a boolean. The number of trues are percentage_trainingset %
def create_training_validation_set(df, percentage_trainingset):
mask = np.random.rand(len(df)) < percentage_trainingset
trainDF = | pd.DataFrame(df[mask]) | pandas.DataFrame |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Sample_exist(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Label_exist(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Label(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Sample(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_missing_aw_col(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3", "10-U2H-20T-3"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
}
df = pd.DataFrame(data)
expected = | pd.DataFrame(data) | pandas.DataFrame |
"""
Script to processes basic data from all query
files to notebooks1.csv. After notebooks1.csv is created, files
can be downloaded with download.py.
"""
import time
import os
import datetime
import json
import sys
import argparse
import requests
import pandas as pd
from consts import (
URL,
COUNT_TRIGGER,
BREAK,
JSON_PATH,
PATH,
HEADERS,
TOKENS,
NUM_WORKERS,
s3
)
from funcs import (
debug_print,
write_to_log,
df_to_s3,
s3_to_df,
list_s3_dir
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--update", action="store_const",
dest="updating", const=True, default=False,
help=(
"Search notebooks that have been added "
+ "or updated since last search, along with new "
+ "notebooks"
)
)
parser.add_argument(
"--local", action="store_const",
dest="local", const=True, default=False,
help="Saves output locally instead of in S3."
)
parser.add_argument(
"--needed", metavar = "num_needed", type = int
)
args = parser.parse_args()
updating = args.updating
local = args.local
num_needed = args.needed
if num_needed == None:
print('test')
num_needed = 0
for i in range(NUM_WORKERS):
try:
with open('num_needed_{0}.save'.format(i),'r') as f:
num_needed += int(f.readlines()[0])
except:
print("Parallelize_query.py was not completed.")
print("Please complete query and try again.")
sys.exit(0)
clean_metadata(num_needed, updating, local)
debug_print(
"Notebooks1, Owners1, and Repos1 were created and saved. "
)
def clean_metadata(num_needed, updating, local):
"""
Extract information from metadata JSON files and save to CSVs.
Equivalent to Adam's 1_nb_metadata_cleaning.ipynb.
"""
try:
if local:
pass
else:
notebooks_done = s3_to_df("csv/notebooks1.csv")
owners_done = s3_to_df("csv/owners1.csv")
repos_done = s3_to_df("csv/repos1.csv")
notebook_files_done = set(notebooks_done.file)
owner_ids_done = set(owners_done.owner_id)
repo_ids_done = set(repos_done.repo_id)
print('Metadata already processed for {0} notebooks, {1} owners, and {2} repos.'.format(
len(notebook_files_done),
len(owner_ids_done),
len(repo_ids_done)
))
except:
notebook_files_done = []
owner_ids_done = []
repo_ids_done = []
print("Metadata not processed for any files.")
# Get all query files.
if local:
nb_search_files = os.listdir(JSON_PATH)
else:
nb_search_files = list_s3_dir('json/')
# Sort query files by size then by page number.
nb_search_files = sorted(
nb_search_files,
key = lambda x: (
int(x.split("_")[2].split("..")[0]),
int(x.split("_")[3][1:].split(".")[0])
)
)
debug_print("We have {0} query files.".format(len(nb_search_files)))
notebooks = {}
repos = {}
owners = {}
for j, json_file_name in enumerate(nb_search_files):
# Keep track of progress.
if (j+1) % COUNT_TRIGGER/100 == 0 or j+1 == len(nb_search_files):
debug_print("{0} / {1} data files processed".format(
j+1, len(nb_search_files)
))
file_components = json_file_name.replace(".json","").split("_")
filesize = file_components[2]
query_page = int(file_components[3][1:])
if local:
with open(JSON_PATH+json_file_name, "r") as json_file:
# Parse file name to get size and query page.
file_dict = json.load(json_file)
else:
obj = s3.Object(
"notebook-research",
"json/{0}".format(json_file_name)
)
file_dict = json.loads(obj.get()["Body"].read().decode("UTF-8"))
# Report missing data.
if "incomplete_results" in file_dict:
if file_dict["incomplete_results"] == True:
msg = "{0} has incomplete results".format(json_file_name)
write_to_log("../logs/nb_metadata_cleaning_log.txt", msg)
days_since = file_dict["days_since"]
if "items" in file_dict:
if len(file_dict["items"]) == 0:
msg = "{0} has 0 items".format(json_file_name)
write_to_log("../logs/nb_metadata_cleaning_log.txt", msg)
else:
# Save data for each item.
for i in range(len(file_dict["items"])):
item = file_dict["items"][i]
item_repo = item["repository"]
repo_id = item_repo["id"]
owner_id = item_repo["owner"]["id"]
# Don"t save forked notebooks.
if item_repo["fork"]:
continue
# Full path is unique for each file.
name = "{0}/{1}/{2}".format(
item_repo["owner"]["login"],
item_repo["name"],
item["path"]
).replace("/","..")
if name not in notebook_files_done:
notebook = {
"file": name,
"html_url": item["html_url"],
"name" : item["name"],
"path": item["path"],
"repo_id": repo_id,
"owner_id": owner_id,
"filesize": filesize,
"query_page": query_page,
"days_since": days_since
}
notebooks[name] = notebook
if repo_id not in repos and repo_id not in repo_ids_done:
repo = {
"repo_name": item_repo["name"],
"owner_id": owner_id,
"repo_description": item_repo["description"],
"repo_fork": item_repo["fork"],
"repo_html_url": item_repo["html_url"],
"repo_private": item_repo["private"],
}
repos[repo_id] = repo
if owner_id not in owners and owner_id not in owner_ids_done:
owner = {
"owner_html_url": item_repo["owner"]["html_url"],
"owner_login": item_repo["owner"]["login"],
}
owners[owner_id] = owner
# If updating we dont always need the full page.
if updating and len(notebooks) == num_needed:
break
else:
msg = "{0} has no items object".format(json_file_name)
write_to_log("../logs/nb_metadata_cleaning_log.txt", msg)
if updating and len(notebooks) == num_needed:
break
# Display status
debug_print(("\nAfter processing all query files, "
"we have {0} new notebooks.").format(len(notebooks)))
debug_print("Written by {0} owners.".format(len(owners)))
debug_print("Held in {0} repositories.".format(len(repos)))
# Translate dictionaries to DataFrames and save to CSV.
# Ordered by days since, if duplicates keep the most recent
# (i.e. keep last, which was found more days since 1-1-19).
notebooks_df = pd.DataFrame(notebooks).transpose()\
.sort_values(by=["days_since","file"]).drop_duplicates(
subset =["file"],
keep="last"
)
owners_df = pd.DataFrame(owners).transpose().reset_index().rename(
columns = {"index":"owner_id"}, index = str
)
repos_df = pd.DataFrame(repos).transpose().reset_index().rename(
columns = {"index":"repo_id"}, index = str
)
if local:
pd.concat([notebooks_df, notebooks_done]).to_csv("{0}/notebooks1.csv".format(PATH), index = False)
pd.concat([owners_df, owners_done]).to_csv("{0}/owners1.csv".format(PATH), index = False)
pd.concat([repos_df, repos_done]).to_csv("{0}/repos1.csv".format(PATH), index = False)
else:
df_to_s3(pd.concat([notebooks_df, notebooks_done]), "csv/notebooks1.csv")
df_to_s3(pd.concat([owners_df, owners_done]), "csv/owners1.csv")
df_to_s3( | pd.concat([repos_df, repos_done]) | pandas.concat |
"""Mock data for bwaw.insights tests."""
import pandas as pd
ACTIVE_BUSES = pd.DataFrame([
['213', 21.0921481, '1001', '2021-02-09 15:45:27', 52.224536, '2'],
['213', 21.0911025, '1001', '2021-02-09 15:46:22', 52.2223788, '2'],
['138', 21.0921481, '1001', '2021-02-09 15:45:27', 52.224536, '05'],
['138', 21.0911025, '1001', '2021-02-09 15:46:22', 52.2223788, '05']
], columns=['Lines', 'Lon', 'VehicleNumber', 'Time', 'Lat', 'Brigade'])
ACTIVE_BUSES['Time'] = pd.to_datetime(ACTIVE_BUSES['Time'])
COORDINATES = pd.DataFrame([
['1001', '01', 52.224536, 21.0921481, 'al.Zieleniecka', '2020-10-12 00:00:00.0']
], columns=['ID', 'Number', 'Latitude', 'Longitude', 'Destination', 'Validity'])
TIMETABLE = [{'Brigade': '2', 'Destination': 'al.Zieleniecka', 'Time': '15:46:00'}]
SPEED_INCIDENT = pd.DataFrame([
[16.378041, 52.223457, 21.091625, '2021-02-09 15:45:54.500']
], columns=['Speed', 'Lat', 'Lon', 'Time'])
SPEED_INCIDENT['Time'] = | pd.to_datetime(SPEED_INCIDENT['Time']) | pandas.to_datetime |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sys
import pandas as pd
import requests
from datetime import datetime
def prod40(fte, prod):
df = pd.read_csv(fte, encoding='latin-1')
#drop Region = Nan: includes all invalid dates
df = df[df['Region_origen'].notna()]
df['Cod_region_origen'] = df['Cod_region_origen'].astype(int)
df['Cod_region_destino'] = df['Cod_region_destino'].astype(int)
#stardardize fechas
df['Inicio_semana'] = pd.to_datetime(df['Inicio_semana'], format='%d-%m-%Y')
df['Fin_semana'] = pd.to_datetime(df['Fin_semana'], format='%d-%m-%Y')
df['Inicio_semana'] = df['Inicio_semana'].astype(str)
df['Fin_semana'] = df['Fin_semana'].astype(str)
#drop columnas Ano y mes
df.drop(columns=['Aรฑo', 'Mes'], inplace=True)
print(df.to_string())
df.to_csv(prod + 'TransporteAereo_std.csv', index=False)
def prod40_from_API(url, api_key, prod):
print('Generating prod40 from API')
response = requests.get(url + api_key)
my_list = response.json()['aรฉreo nacional - movimientos y pasajeros']
#print(my_list)
df = pd.DataFrame(my_list, dtype=str)
#print(list(df))
# hay que comparar el mes con el principio de inicioSemana y finsemana:
# Si son iguales, corresponde al mes
# si no, corresponde al dia.
for i in range(len(df)):
mes = df.loc[i, 'mes']
iniSemana = df.loc[i, 'inicioSemana']
finDe = df.loc[i, 'finsemana']
anio = df.loc[i,'anio']
print('mes: ' + mes)
print('iniSemana: ' + iniSemana[:2])
print('finDe: ' + finDe[:2])
if int(mes) == int(iniSemana[:2]):
# print('mes primero en inisemana')
df.loc[i, 'inicioSemana'] = pd.to_datetime(df.loc[i, 'inicioSemana'], dayfirst=False)
else:
# print('dia primero en inisemana')
df.loc[i, 'inicioSemana'] = pd.to_datetime(df.loc[i, 'inicioSemana'], dayfirst=True)
if int(mes) == int(finDe[:2]):
# print('mes primero en finde')
df.loc[i, 'finsemana'] = pd.to_datetime(df.loc[i, 'finsemana'], dayfirst=False)
else:
# print('dia primero en finde')
df.loc[i, 'finsemana'] = pd.to_datetime(df.loc[i, 'finsemana'], dayfirst=True)
df['inicioSemana'] = pd.to_datetime(df['inicioSemana'], dayfirst=True)
df['finsemana'] = pd.to_datetime(df['finsemana'], dayfirst=True)
# drop unused columns
df.drop(columns=['anio', 'mes'], inplace=True)
df_localidades = pd.read_csv('../input/JAC/JAC_localidades.csv')
# add to origen codigo_region, y region
df_aux = | pd.merge(df, df_localidades, left_on='origen', right_on='Localidad') | pandas.merge |
"""Store the data in a nice big dataframe"""
import sys
from datetime import datetime, timedelta
import pandas as pd
import geopandas as gpd
import numpy as np
class Combine:
"""Combine defined countries together"""
THE_EU = [ 'Austria', 'Italy', 'Belgium', 'Latvia',
'Bulgaria', 'Lithuania', 'Croatia',
'Luxembourg', 'Cyprus', 'Malta',
'Czechia', 'Netherlands', 'Denmark',
'Poland', 'Estonia', 'Portugal',
'Finland', 'Romania', 'France',
'Slovakia', 'Germany', 'Slovenia',
'Greece', 'Spain', 'Hungary',
'Sweden', 'Ireland' ]
def __init__(self, options):
"""Init"""
self.options = options
self.timeseries = []
self.countries = None
self.description = None
self.merged = None
self.cc = None
self.populations = []
self.national_populations = None
self.get_populations()
self.countries_long = {'nl': 'The Netherlands', 'sco': 'Scotland', 'eng': 'England',
'wal': 'Wales', 'ni': 'Northern Ireland'}
self.jhu = JHU(self)
def judat(self):
"""Dumb helper for another library"""
self.timeseries.append(NLTimeseries(False).national(False))
self.combine_national(False)
#self.merged['Week'] = self.merged.index.strftime('%U')
#self.merged = self.merged.groupby(['Week']) \
#.agg({'Aantal': 'sum'})
print(self.merged)
def process(self):
"""Do it"""
cumulative = False
if self.options.pivot:
cumulative = True
for nation in self.cc:
usejhu = True
if self.options.nation:
print(f'Processing National data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(False).national(nation,cumulative))
usejhu = False
#if nation == 'nl':
#self.timeseries.append(NLTimeseries(False).national(cumulative))
#usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(False,
{nation: self.countries_long[nation]}).national(cumulative))
else:
print(f'Processing combined data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(True).get_data())
usejhu = False
if nation == 'nl':
self.timeseries.append(NLTimeseries(True).get_data())
usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(True).get_data())
if len(self.timeseries) == 0:
print('No country Data to process')
sys.exit()
if self.options.pivot:
self.combine_pivot()
return
if self.options.nation:
self.combine_national()
return
self.get_combined_data()
def combine_pivot(self):
"""Pivot data for pandas_alive"""
print('Pivotting data')
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
# So we can add it as an option later
column = 'Overleden'
#column = 'Aantal'
# Convert to 100K instead of millions
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
# Per-Capita
self.merged[column] = self.merged[column] / self.merged['population']
self.merged = self.merged.pivot(index='Datum',
columns='country',
values=column).fillna(0)
self.trim_data()
print(self.merged)
def combine_national(self, trim=True):
"""Combine national totals"""
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
self.merged = self.merged.set_index('Datum')
self.merged.sort_index(inplace=True)
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
self.merged.loc[(self.merged.country == country), 'cname'] \
= self.countries_long[country]
for column in ['Aantal', 'Ziekenhuisopname', 'Overleden']:
if column not in self.merged.columns:
continue
pgpd = f"{column}-gpd"
radaily = f"{column}-radaily"
raweekly = f"{column}-raweekly"
ranonpc = f"{column}-ranonpc"
self.merged[pgpd] = self.merged[column] / self.merged['population']
self.merged[radaily] = self.merged.groupby('country',
sort=False)[pgpd] \
.transform(lambda x: x.rolling(7, 1).mean())
self.merged[raweekly] = self.merged.groupby('country',
sort=False)[pgpd] \
.transform(lambda x: x.rolling(7).sum())
self.merged[ranonpc] = self.merged.groupby('country',
sort=False)[column] \
.transform(lambda x: x.rolling(7).sum())
if(trim):
self.trim_data()
def trim_data(self):
if self.options.startdate is not None:
self.merged = self.merged.query(f'{self.options.startdate} <= Datum')
if self.options.enddate is not None:
self.merged = self.merged.query(f'Datum <= {self.options.enddate}')
def get(self):
"""Return the data set"""
return self.merged
def get_populations(self):
"""National populations for the calculations that need it"""
self.national_populations = pd.read_csv('data/populations.csv', delimiter=',',
index_col=0, header=None, squeeze=True).to_dict()
def get_max(self, column):
"""Max value in df"""
return self.merged[column].max()
def get_combined_data(self):
"""Get a single dataframe containing all countries we deal with
I did this so I could draw combined chorpleths but that has Proven
to be somewhat more challenging than I originally thought
"""
print('Calculating combined data')
dataframe = pd.concat(self.timeseries)
dataframe = dataframe.set_index('Datum')
dataframe = dataframe.sort_index()
dataframe['pop_pc'] = dataframe['population'] / 1e5
# Filter out countries we do not want
for country in self.countries:
dataframe = dataframe[~dataframe['country'].isin([country])]
# Finally create smoothed columns
dataframe['radaily'] = dataframe.groupby('Gemeentecode',
sort=False)['Aantal'] \
.transform(lambda x: x.rolling(7, 1).mean())
dataframe['weekly'] = dataframe.groupby('Gemeentecode',
sort=False)['Aantal'] \
.transform(lambda x: x.rolling(7).sum())
dataframe['radaily_pc'] = dataframe['radaily'] / dataframe['pop_pc']
dataframe['weekly_pc'] = dataframe['weekly'] / dataframe['pop_pc']
if self.options.startdate is not None:
dataframe = dataframe.query(f'{self.options.startdate} <= Datum')
if self.options.enddate is not None:
dataframe = dataframe.query(f'Datum <= {self.options.enddate}')
print('Finished calculating combined data')
self.merged = dataframe
def parse_countries(self, country_str):
"""Sort out country data"""
ret = []
if country_str is None:
country_list = self.countries_long.keys()
else:
country_list = country_str.split(',')
if 'eu' in country_list:
country_list.remove('eu')
country_list += self.THE_EU
print('Setting EU')
for country in country_list:
country = country.lower()
count = None
#if 'nether' in country:
#count = 'nl'
if 'scot' in country:
count = 'sco'
if 'eng' in country:
count = 'eng'
if 'wal' in country:
count = 'wal'
#if 'ni' in country:
# count = 'ni'
if count is not None:
ret.append(count)
else:
retcountry = self.jhu.get_country(country)
if retcountry:
ret.append(retcountry)
self.cc = ret
self.countries = list(set(self.countries_long.keys()) - set(ret))
self.description = '_'.join(ret)
def project_for_date(self, date):
"""Project infections per Gemeente and make league table"""
if date is None:
date = self.merged.index.max().strftime('%Y%m%d')
datemax = datetime.datetime.strptime(date, '%Y%m%d')
datemin = (datemax - timedelta(days=4)).strftime('%Y%m%d')
self.merged = self.merged.query(f'{datemin} <= Datum <= {date}')
self.merged = self.merged.groupby(['Gemeentecode']) \
.agg({'Aantal': 'sum', 'Gemeentenaam': 'first',
'pop_pc': 'first', 'population': 'first', 'country': 'first'})
self.merged['percapita'] = self.merged['Aantal'] / self.merged['pop_pc']
self.merged.sort_values(by=['percapita'], ascending=False, inplace=True)
class Timeseries:
"""Abstract class for timeseries"""
def __init__(self, process=True):
self.merged = None
self.cumulative = False
if process:
self.get_pop()
self.get_map()
self.get_source_data()
def get_data(self):
"""Pass back the data series"""
return self.merged
def get_source_data(self):
"""Placeholder"""
def get_pop(self):
"""Placeholder"""
def get_map(self):
"""Placeholder"""
def set_cumulative(self, value):
"""Daily or cumulative"""
self.cumulative = value
class JHU:
"""Get data from <NAME>"""
JHD = '../COVID-19/csse_covid_19_data'
def __init__(self, combined):
"""Init"""
self.dataframe = None
self.combined = combined
self.load()
def get_country(self, country):
"""Check Country is in JHU data"""
row = self.dataframe.loc[self.dataframe['Combined_Key'] == country]
if len(row) == 0:
return False
self.combined.countries_long[row['iso2'].values[0].lower()] = country
self.combined.national_populations[row['iso2'].values[0].lower()] \
= row['Population'].values[0]
return row['iso2'].values[0].lower()
def load(self):
"""Load JHU lookup table"""
dataframe = pd.read_csv(f'{self.JHD}/UID_ISO_FIPS_LookUp_Table.csv',
delimiter=',')
dataframe['Combined_Key'] = dataframe['Combined_Key'].str.lower()
dataframe['Population'] = dataframe['Population'] / 1e6
self.dataframe = dataframe
class XXTimeseries(Timeseries):
"""Generic JHU Data class"""
# TODO: Duplicated code
JHD = '../COVID-19/csse_covid_19_data'
def __init__(self, process=True, country=None):
"""Init"""
Timeseries.__init__(self, process)
print(country.keys())
self.countrycode = list(country.keys())[0]
self.country = country[self.countrycode]
self.cumullative = False
def national(self, cumulative):
self.set_cumulative(cumulative)
"""Get columns"""
timeseries = 'csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
overleden = self.calculate(timeseries, 'Overleden')
timeseries = 'csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
aantal = self.calculate(timeseries, 'Aantal')
aantal['Overleden'] = overleden['Overleden']
return aantal.assign(country=self.countrycode)
def calculate(self, timeseries, column):
"""Get national totals"""
file = f'{self.JHD}/{timeseries}'
dataframe = pd.read_csv(file, delimiter=',')
dataframe['Country/Region'] = dataframe['Country/Region'].str.lower()
row = dataframe.loc[dataframe['Country/Region'] == self.country]
row = row.loc[row['Province/State'].isnull()]
row = row.reset_index(drop=True)
row.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
row.set_index('Country/Region', inplace=True)
dataframe = row.T
if not self.cumulative:
dataframe[column] = dataframe[self.country] - dataframe[self.country].shift(1)
else:
dataframe[column] = dataframe[self.country]
dataframe.drop(columns=[self.country], inplace=True)
dataframe.dropna(inplace=True)
dataframe = dataframe.reset_index()
dataframe.rename(columns={'index': 'Datum'}, inplace=True)
return dataframe
class BETimeseries(Timeseries):
"""Belgium data"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, cumulative):
"""Get national totals"""
self.set_cumulative(cumulative)
dataframe = pd.read_csv('data/belgiumt.csv',
delimiter=',')
dataframe.dropna(inplace=True)
dataframe.rename(columns={'CASES': 'Aantal', 'DATE': 'Datum'}, inplace=True)
dataframe = dataframe.groupby(['Datum']).agg({'Aantal': 'sum'})
dataframe = dataframe.reset_index()
dataframe = dataframe.assign(country='be')
return dataframe
def get_source_data(self):
"""Get BE source data for infections"""
dataframe = | pd.read_csv('data/belgium.csv', delimiter=',') | pandas.read_csv |
'''
Functions for the neural network
'''
import os
import numpy as np
import numba as nb
import pandas as pd
# Project imports
from utils import tickers, strategy, dates, fundamentals, io
# Other imports and type-hinting
from pandas import DataFrame as pandasDF
def main(nn_config: dict,
strat_config: dict):
'''
Main function to run the nn
Parameters
----------
nn_config : dict
Configuration parameters for the nn
strat_config : dict
Configuration parameters for the solver
Returns
-------
None
'''
# Check if the data-folder has been made
check_folder()
for case in ['training', 'testing']:
print('Obtaining the ' + case + ' data')
# Get a list of tickers, based on the available data
if case == 'training':
ticker_list = tickers.get_random_tickers(nn_config['tickers'])
else:
ticker_list = tickers.get_tickers_exc_sample(nn_config['testing tickers'],
ticker_list)
# Generate the nn input
df = get_nn_input(ticker_list,
nn_config,
strat_config,
)
# Calculate the P/L mean value so that it is saved for future use in
# testing and training the NN
if case == 'training':
nn_config['mean trade'] = df['Profit/Loss'].mean()
# Save the configuration settings as a json file
io.save_dict(nn_config,
'nn/data/' + nn_config['strat name'],
)
# Produce the labels for each trade
df['labels'] = np.where(
df['Profit/Loss'].values < nn_config['mean trade'],
0,
1,
)
# Save the input data into the data-folder
df.to_csv('nn/data/' + nn_config['strat name'] + ' ' + case + '.csv',
index = False,
)
return
def get_nn_input(tickers: list,
nn_config: dict,
strat_config: dict) -> pandasDF:
'''
Get the dataframe of all inputs to the nn
Parameters
----------
tickers : list
A list of tickers to generate the inputs for
nn_config : dict
Configuration parameters for the nn
strat_config : dict
Configuration parameters for the solver
Returns
-------
df : pandasDF
The processed inputs for the nn
'''
# Get the backseries of data for each trade per each ticker
df = get_time_series_data(nn_config,
strat_config,
tickers,
)
# Drop the columns we no longer want for the nn
df = df.drop(columns = (strat_config['return feats']
+ strat_config['time series feats']
+ strat_config['drop cols']
),
)
if nn_config['include fundamentals']:
df = include_fundamentals(df)
# Drop the fiscal quarter column since it is no longer required
df = df.drop(columns = 'fiscal_quarter')
# Rearrange the columns such that ticker, Profit/Loss and labels appear
# at the end, this is helpful for sorting the data in the nn code
cols = [col for col in df.columns.tolist()
if col not in ['Profit/Loss', 'ticker']]
df = df[cols + ['Profit/Loss', 'ticker']]
# Drop any null entries, to stop any bad-data getting into the nn
return df.dropna()
def include_fundamentals(df: pandasDF):
dfs = []
for ticker in df['ticker'].unique():
df_tmp = df[df['ticker'] == ticker]
try:
fund = fundamentals.get_all_fundamentals(ticker)
df_tmp = dates.backwards_date_merge(df_tmp,
fund,
'Date',
'fiscal_quarter',
'date')
dfs.append(df_tmp)
except Exception as e:
print(ticker, ' : ', e)
return pd.concat(dfs)
def single_stock_data(ticker: str,
nn_config: dict) -> pandasDF:
'''
Get a single stock's time-series history as a nn feature. I.e. one could
use the spy as a feature to the NN.
Parameters
----------
ticker : str
The ticker to get the time-series data for
nn_config : dict
The config params for the nn.
Returns
-------
df : pandasDF
The time series data for a single ticker.
'''
df = pd.read_csv(f'data/{ticker}.csv')
# Add the time-series columns, and then normalise them
for col in ['Open', 'Low', 'High', 'Close', 'Volume']:
df = time_series_cols(df, col, nn_config['time lags'])
# Drop the unncessary columns when returning
return df.drop(columns = ['Open', 'Low', 'High', 'Close', 'Volume',
'Adj Close']
)
def normalise_prices(df: pandasDF,
price_cols: list):
'''
Normalise all price related time-series on the same scale.
'''
# Find all columns which are price related
cols = []
for feat in price_cols:
cols += get_lagged_col_names(df, feat)
# Normalise these cols agaisnt the max value in each row
return normalise_time_series(df, cols)
def normalise_non_price(df: pandasDF,
other_feats: list):
'''
For each non-price related time-series, normalise them
'''
for feat in other_feats:
cols = get_lagged_col_names(df, feat)
df = normalise_time_series(df, cols)
return df
def get_lagged_col_names(df: pandasDF,
feat: str) -> list:
'''
Get all lagged column names for the specific feature
'''
return [col for col in df.columns.tolist() if feat in col]
def normalise_time_series(df: pandasDF,
cols: list) -> pandasDF:
'''
For a set of columns normalise by using a standard scaling approach
'''
df[cols] = df[cols].apply(lambda x: (x-x.mean())/x.std(ddof = 0),
axis = 1,
)
return df
def get_time_series_data(nn_config: dict,
strat_config: dict,
ticker_list: list) -> pandasDF:
'''
Produce a dataframe containing the time-series data for a random selection
of tickers. The output df contains the backseries of data for each trade
made in the buying and selling algorithm.
Parameters
----------
nn_config : dict
The config params for the nn
strat_config : dict
The config params for the trading strategy
ticker_list : list
A list of tickers to run the buy/sell for, and produce input data for
the nn
date_filter : str
The earliest date to consider across all stocks for training
Returns
-------
dfs : list
A dataframe containing all trades
'''
# Open an empty list to store the processed dataframes per each ticker
dfs = []
# For each ticker in the ticker list, run the buy/sell and pre-process
# the data for input into the nn
for ticker in ticker_list:
df = | pd.read_csv(f'data/{ticker}.csv') | pandas.read_csv |
import pytest
import datetime
from pymapd._loaders import _build_input_rows
from pymapd import _pandas_loaders
from omnisci.mapd.MapD import TStringRow, TStringValue, TColumn, TColumnData
import pandas as pd
import numpy as np
from omnisci.mapd.ttypes import TColumnType
from omnisci.common.ttypes import TTypeInfo
def assert_columnar_equal(result, expected):
for i, (a, b) in enumerate(zip(result, expected)):
np.testing.assert_array_equal(a.nulls, b.nulls)
np.testing.assert_array_equal(a.data.int_col, b.data.int_col)
np.testing.assert_array_equal(a.data.real_col, b.data.real_col)
np.testing.assert_array_equal(a.data.str_col, b.data.str_col)
class TestLoaders:
def test_build_input_rows(self):
data = [(1, 'a'), (2, 'b')]
result = _build_input_rows(data)
expected = [TStringRow(cols=[TStringValue(str_val='1', is_null=None),
TStringValue(str_val='a', is_null=None)]),
TStringRow(cols=[TStringValue(str_val='2', is_null=None),
TStringValue(str_val='b', is_null=None)])]
assert result == expected
def test_build_input_rows_with_array(self):
data = [(1, 'a'), (2, 'b'), (3, ['c', 'd', 'e'])]
result = _build_input_rows(data)
expected = [TStringRow(cols=[TStringValue(str_val='1', is_null=None),
TStringValue(str_val='a', is_null=None)]),
TStringRow(cols=[TStringValue(str_val='2', is_null=None),
TStringValue(str_val='b', is_null=None)]),
TStringRow(cols=[TStringValue(str_val='3', is_null=None),
TStringValue(str_val='{c,d,e}',
is_null=None)])]
assert result == expected
def test_build_table_columnar(self):
from pymapd._pandas_loaders import build_input_columnar
data = pd.DataFrame({"a": [1, 2, 3], "b": [1.1, 2.2, 3.3]})
nulls = [False] * 3
result = build_input_columnar(data, preserve_index=False)
expected = [
TColumn(TColumnData(int_col=[1, 2, 3]), nulls=nulls),
TColumn(TColumnData(real_col=[1.1, 2.2, 3.3]), nulls=nulls)
]
assert_columnar_equal(result[0], expected)
def test_build_table_columnar_pandas(self):
data = pd.DataFrame({
"boolean_": [True, False],
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), | pd.Timestamp("2017") | pandas.Timestamp |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import itertools
from abc import ABCMeta, abstractmethod
from concurrent.futures import Future
from copy import copy
from typing import Iterable, Optional, Tuple, Union
import gs_quant
import pandas as pd
from gs_quant.base import RiskKey
from gs_quant.config import DisplayOptions
from gs_quant.datetime import point_sort_order
__column_sort_fns = {
'label1': point_sort_order,
'mkt_point': point_sort_order,
'point': point_sort_order
}
__risk_columns = ('date', 'time', 'mkt_type', 'mkt_asset', 'mkt_class', 'mkt_point')
class ResultInfo(metaclass=ABCMeta):
def __init__(
self,
risk_key: RiskKey,
unit: Optional[dict] = None,
error: Optional[Union[str, dict]] = None,
request_id: Optional[str] = None
):
self.__risk_key = risk_key
self.__unit = unit
self.__error = error
self.__request_id = request_id
@property
@abstractmethod
def raw_value(self):
...
@property
def risk_key(self) -> RiskKey:
return self.__risk_key
@property
def unit(self) -> dict:
"""The units of this result"""
return self.__unit
@property
def error(self) -> Union[str, dict]:
"""Any error associated with this result"""
return self.__error
@property
def request_id(self) -> Optional[str]:
"""The request Id associated with this result"""
return self.__request_id
@staticmethod
def composition_info(components: Iterable):
from gs_quant.markets.markets import historical_risk_key
dates = []
values = []
errors = {}
risk_key = None
unit = None
for component in components:
date = component.risk_key.date
risk_key = historical_risk_key(component.risk_key) if risk_key is None else risk_key
if risk_key.market.location != component.risk_key.market.location:
raise ValueError('Cannot compose results with different markets')
if isinstance(component, (ErrorValue, Exception)):
errors[date] = component
else:
values.append(component.raw_value)
dates.append(date)
unit = unit or component.unit
return dates, values, errors, risk_key, unit
@abstractmethod
def _get_raw_df(self, display_options: DisplayOptions = None):
...
class ErrorValue(ResultInfo):
def __init__(self, risk_key: RiskKey, error: Union[str, dict], request_id: Optional[str] = None):
super().__init__(risk_key, error=error, request_id=request_id)
def __repr__(self):
return self.error
def _get_raw_df(self, display_options: DisplayOptions = None):
return pd.DataFrame(self, index=[0], columns=['value'])
@property
def raw_value(self):
return None
class UnsupportedValue(ResultInfo):
def __init__(self, risk_key: RiskKey, request_id: Optional[str] = None):
super().__init__(risk_key, request_id=request_id)
def __repr__(self):
return 'Unsupported Value'
def _get_raw_df(self, display_options: DisplayOptions = None):
options = display_options if display_options is not None else gs_quant.config.display_options
show_na = options.show_na
if show_na:
return pd.DataFrame(None, index=[0], columns=['value'])
else:
return None
@property
def raw_value(self):
return 'Unsupported Value'
class ScalarWithInfo(ResultInfo, metaclass=ABCMeta):
def __init__(self,
risk_key: RiskKey,
value: Union[float, str],
unit: Optional[dict] = None,
error: Optional[Union[str, dict]] = None,
request_id: Optional[str] = None):
float.__init__(value)
ResultInfo.__init__(self, risk_key, unit=unit, error=error, request_id=request_id)
@property
@abstractmethod
def raw_value(self):
...
@staticmethod
def compose(components: Iterable):
dates, values, errors, risk_key, unit = ResultInfo.composition_info(components)
return SeriesWithInfo(pd.Series(index=pd.DatetimeIndex(dates).date, data=values),
risk_key=risk_key,
unit=unit,
error=errors)
def _get_raw_df(self, display_options: DisplayOptions = None):
return pd.DataFrame(self, index=[0], columns=['value'])
class FloatWithInfo(ScalarWithInfo, float):
def __new__(cls,
risk_key: RiskKey,
value: Union[float, str],
unit: Optional[str] = None,
error: Optional[str] = None,
request_id: Optional[str] = None):
return float.__new__(cls, value)
@property
def raw_value(self) -> float:
return float(self)
def __repr__(self):
return self.error if self.error else float.__repr__(self)
def __add__(self, other):
if isinstance(other, FloatWithInfo):
if self.unit == other.unit:
return FloatWithInfo(combine_risk_key(self.risk_key, other.risk_key), self.raw_value + other.raw_value,
self.unit)
else:
raise ValueError('FloatWithInfo unit mismatch')
return super(FloatWithInfo, self).__add__(other)
def __mul__(self, other):
if isinstance(other, FloatWithInfo):
return FloatWithInfo(combine_risk_key(self.risk_key, other.risk_key), self.raw_value * other.raw_value,
self.unit)
else:
return FloatWithInfo(self.risk_key, self.raw_value * other, self.unit)
def to_frame(self):
return self
class StringWithInfo(ScalarWithInfo, str):
def __new__(cls,
risk_key: RiskKey,
value: Union[float, str],
unit: Optional[dict] = None,
error: Optional[str] = None,
request_id: Optional[str] = None):
return str.__new__(cls, value)
@property
def raw_value(self) -> str:
return str(self)
def __repr__(self):
return self.error if self.error else str.__repr__(self)
class SeriesWithInfo(pd.Series, ResultInfo):
_internal_names = pd.DataFrame._internal_names + \
['_ResultInfo__' + i for i in dir(ResultInfo) if isinstance(getattr(ResultInfo, i), property)]
_internal_names_set = set(_internal_names)
def __init__(
self,
*args,
risk_key: Optional[RiskKey] = None,
unit: Optional[dict] = None,
error: Optional[Union[str, dict]] = None,
request_id: Optional[str] = None,
**kwargs
):
pd.Series.__init__(self, *args, **kwargs)
ResultInfo.__init__(self, risk_key, unit=unit, error=error, request_id=request_id)
def __repr__(self):
if self.error:
return pd.Series.__repr__(self) + "\nErrors: " + str(self.error)
return pd.Series.__repr__(self)
@property
def _constructor(self):
return SeriesWithInfo
@property
def _constructor_expanddim(self):
return DataFrameWithInfo
@property
def raw_value(self) -> pd.Series:
return pd.Series(self)
def _get_raw_df(self, display_options: DisplayOptions = None):
df = pd.DataFrame(self).reset_index()
df.columns = ['dates', 'value']
return df
class DataFrameWithInfo(pd.DataFrame, ResultInfo):
_internal_names = pd.DataFrame._internal_names + \
['_ResultInfo__' + i for i in dir(ResultInfo) if isinstance(getattr(ResultInfo, i), property)]
_internal_names_set = set(_internal_names)
def __init__(
self,
*args,
risk_key: Optional[RiskKey] = None,
unit: Optional[dict] = None,
error: Optional[Union[str, dict]] = None,
request_id: Optional[str] = None,
**kwargs
):
pd.DataFrame.__init__(self, *args, **kwargs)
ResultInfo.__init__(self, risk_key, unit=unit, error=error, request_id=request_id)
def __repr__(self):
if self.error:
return pd.DataFrame.__repr__(self) + "\nErrors: " + str(self.errors)
return pd.DataFrame.__repr__(self)
@property
def _constructor(self):
return DataFrameWithInfo
@property
def _constructor_sliced(self):
return SeriesWithInfo
@property
def raw_value(self) -> pd.DataFrame:
if self.empty:
return pd.DataFrame(self)
df = self.copy()
if isinstance(self.index.values[0], dt.date):
df.index.name = 'dates'
df.reset_index(inplace=True)
return | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.core.display import display
from scipy.stats import chi2_contingency
import glob
import os
import warnings
warnings.filterwarnings("ignore")
pd.options.display.float_format = '{:.4f}'.format
class DataExplorer():
def load_data_from_folder(self, folderpath = "data/"):
"""
Import all csv.file from the folder with given extension
Returns the dictionary of pandas
folderpath: example "data/"
"""
my_dict = {}
encodings = ['utf-8', 'cp1252','iso-8859-1','unicode_escape','ascii']
for filename in glob.glob(os.path.join(folderpath, '*.csv')):
filename = filename.replace("\\","/")
loaded = False
i = 0
while loaded == False:
if i == len(encodings):
print("[WARNING] The file named %s could not be loaded.\n" % filename)
break
else:
try:
df = pd.read_csv(filename, encoding = encodings[i])
my_dict[filename.split(".")[0].split("/")[1]] = df
print("Filename:",filename.split(".")[0].split("/")[1],", loaded with encoding:",
encodings[i])
print("Shape:", df.shape)
display(df.head(5))
loaded = True
break
except:
i += 1
return my_dict
def describe(self, df, cols = None):
"""
Data description (min, max, quantiles, missing data...)
"""
if cols == None:
cols = df.columns
print(df.columns)
print("\n Info ---------------------------")
df[cols].info()
# Description
print("\n Description ---------------------------")
description = df[cols].describe([0.1, 0.25, 0.75, 0.9], include = "all")
description.loc['unique'] = df.nunique()
display(description)
# Unique values
print("\n Unique values ---------------------------")
display(df[cols].nunique())
print("\n Missing ---------------------------")
# Missing data
total = df[cols].isnull().sum().sort_values(ascending=False)
percent = (df[cols].isnull().sum()/df[cols].isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['N_MissingData', 'MissingDataPercent'])
display(missing_data)
def freq_analysis(self, df, cols = None, show_text = True):
"""
Plot distribution for all columns
show_text: If true, for categorical variables print the quantity
"""
if cols == None:
cols = df.columns
for col in cols:
if np.issubdtype(df[col].dtype, np.number):
sns.distplot(df[col].dropna(), kde = False)
plt.title(col)
plt.show()
else:
if len(df[col].unique()) <= 100: # Categorical varialbe with few categories
fig, ax = plt.subplots(figsize = (8,8))
count = df[col].value_counts(ascending = False)
count.iloc[::-1].plot(kind = 'barh')
total = count.sum()
cumsum = count.cumsum()
textstr = "Nยฐ of categories to represent 40%: {0:.0f} \n Nยฐ of categories to represent 60%: {1:.0f} \n Nยฐ categories to represent 80%: {2:.0f}".format( \
len(count.loc[:np.argmax(cumsum>total*0.4)]),len(count.loc[:np.argmax(cumsum>total*0.6)]),len(count.loc[:np.argmax(cumsum>total*0.8)]))
ax.text(0.3, 0.05, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='bottom', bbox=dict(boxstyle='round', facecolor='white', edgecolor = 'k',alpha=0.5))
if show_text:
for i, v in enumerate(count.iloc[::-1].values):
#ax.text(v+3, i-0.25, str(v),color='blue')
if count.nunique()>2:
ax.text(v+2, i-0.25, "{0:.1f}%".format(cumsum[-i-1]/total*100),color='blue')
plt.title(col)
plt.show()
def correlation_analysis(self, df, cols = None, pairplot = False):
"""
Visualize correlation between columns
Correlation pair plot between highest corr pairs
"""
if cols == None:
cols = df.columns
corr = df[cols].corr()
annot = (len(corr) <= 15)
# Only lower diag
sns.set(style="white")
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot = annot)
ax.set_ylim(corr.shape[0]-1e-9, -1e-9)
plt.show()
# Another one
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corr, vmax=.8, square=True, annot = annot, linewidth = 0.2);
ax.set_ylim(corr.shape[0]-1e-9, -1e-9)
plt.show()
# Cluster map
clustergrid = sns.clustermap(corr, square = True, annot = annot,
linewidth = 0.2, figsize = (8,8), fmt = ".2f")
clustergrid.ax_heatmap.set_ylim(corr.shape[0]-1e-9, -1e-9)
plt.show()
# Pair plot for highest correlation pairs
if pairplot == True:
corr_matrix = corr.abs()
sol = (corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
.stack()
.sort_values(ascending=False))
highest_corr_pair = sol.index.values[:3]
highest_corr_cols = []
for pair in highest_corr_pair:
for x in pair:
if not x in highest_corr_cols:
highest_corr_cols.append(x)
sns.pairplot(df[highest_corr_cols])
plt.title("Highest correlation pairs")
plt.show()
def correlation_analysis_with_target(self, df, target, k = 10):
"""
Correlation plot of columns that have the largest correlation with target
target: target column
k: number of columns to consider, default 5
"""
fig, ax = plt.subplots()
corrmat = df.corr().abs()
cols = corrmat.nlargest(k, target)[target].index
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
ax.set_ylim(cm.shape[0]-1e-9, -1e-9)
plt.show()
def outlier_analysis(self, df, cols = None):
"""
Box plot outliers
"""
if cols == None:
cols = df.columns
for col in cols:
if np.issubdtype(df[col].dtype, np.number) and not (df[col].isnull().values.any()):
sns.boxplot(df[col])
plt.title(col)
plt.show()
def boxplot(self, xcol, ycol, data):
"""
xcol, ycol: string, column name
Box plot
"""
plt.subplots(figsize=(16, 8))
sns.boxplot(x=xcol,y=ycol, data = data)
plt.show()
def pairplot(self, data, cols):
"""
Correlation plot between all pairs of columns
"""
sns.pairplot(data[cols])
def catplot(self, x, hue, col, data):
"""
x: value to count
hue: color category
col: type category
Plot the count of categories
"""
sns.catplot(x=x, hue = hue, col = col, data = data, kind = "count")
def datetime_analysis(self, df, colname):
"""
df: dataframe
date_col is a STRING corresponding to a column name that represents a date
Count plot all frequencies
"""
date_df = df[[colname]].copy()
date_df[colname] = | pd.to_datetime(date_df[colname]) | pandas.to_datetime |
import os.path
import json
import zipfile
import numpy as np
import pandas as pd
import requests
from openpyxl import load_workbook
import ukcensusapi.Nomisweb as Api
import ukpopulation.utils as utils
class SNPPData:
"""
Functionality for downloading and collating UK Subnational Population Projection (NPP) data
Nomisweb stores the England data (only)
Wales/Scotland/NI are not the responsiblity of ONS and are made avilable online by the relevant statistical agency
"""
def __init__(self, cache_dir=utils.default_cache_dir()):
self.cache_dir = cache_dir
self.data_api = Api.Nomisweb(self.cache_dir)
self.data = {}
self.data[utils.EN] = self.__do_england()
self.data[utils.WA] = self.__do_wales()
self.data[utils.SC] = self.__do_scotland()
self.data[utils.NI] = self.__do_nireland()
# LADs * 26 years * 91 ages * 2 genders
# assert len(self.data) == (326+22+32+11) * 26 * 91 * 2
def min_year(self, code):
"""
Returns the first year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return min(self.data[code].PROJECTED_YEAR_NAME.unique())
def max_year(self, code):
"""
Returns the final year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return max(self.data[code].PROJECTED_YEAR_NAME.unique())
def all_lads(self, countries):
"""
Returns all the LAD codes in the country or countries specified
Supports EN WA SC NI EW GB UK
"""
if isinstance(countries, str):
countries = [countries]
lads = []
for country in countries:
if country in self.data:
lads.extend(self.data[country].GEOGRAPHY_CODE.unique())
else:
# warn if missing or invalid
print("WARNING: no LAD codes for country %s", country)
return lads
def filter(self, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):
# convert inputs to arrays if single values supplied (for isin)
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
if np.isscalar(ages):
ages = [ages]
if np.isscalar(genders):
genders = [genders]
# Handle problem with empty list not being recognised as Null, was causing problem in utils.trim_range() below
if not years:
years = None
countries = utils.country(geog_codes)
# TODO fix incorrect assumption is that all countries have the same year range
years = utils.trim_range(years, self.min_year(countries[0]), self.max_year(countries[0]))
retval = pd.DataFrame() # {"GEOGRAPHY_CODE": [], "PROJECTED_YEAR_NAME": [], "C_AGE": [], "GENDER":[], "OBS_VALUE": []})
# loop over datasets as needed
for country in countries:
# apply filters
retval = retval.append(self.data[country][(self.data[country].GEOGRAPHY_CODE.isin(geog_codes)) &
(self.data[country].PROJECTED_YEAR_NAME.isin(years)) &
(self.data[country].C_AGE.isin(ages)) &
(self.data[country].GENDER.isin(genders))], ignore_index=True,
sort=False)
# check for any codes requested that werent present (this check is far easier to to on the result)
invalid_codes = np.setdiff1d(geog_codes, retval.GEOGRAPHY_CODE.unique())
if len(invalid_codes) > 0:
raise ValueError("Filter for LAD code(s): %s for years %s returned no data (check also age/gender filters)"
% (str(invalid_codes), str(years)))
return retval
def aggregate(self, categories, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):
data = self.filter(geog_codes, years, ages, genders)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
# year_range can include year that dont need to be extrapolated
# Filtering age and gender is not (currently) supported
def extrapolate(self, npp, geog_codes, year_range):
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
geog_codes = utils.split_by_country(geog_codes)
all_codes_all_years = pd.DataFrame()
for country in geog_codes:
if not geog_codes[country]: continue
max_year = self.max_year(country)
last_year = self.filter(geog_codes[country], max_year)
(in_range, ex_range) = utils.split_range(year_range, max_year)
# years that dont need to be extrapolated
all_years = self.filter(geog_codes[country], in_range) if in_range else pd.DataFrame()
for year in ex_range:
data = last_year.copy()
scaling = npp.year_ratio("ppp", country, max_year, year)
data = data.merge(scaling[["GENDER", "C_AGE", "OBS_VALUE"]], on=["GENDER", "C_AGE"])
data["OBS_VALUE"] = data.OBS_VALUE_x * data.OBS_VALUE_y
data.PROJECTED_YEAR_NAME = year
all_years = all_years.append(data.drop(["OBS_VALUE_x", "OBS_VALUE_y"], axis=1), ignore_index=True,
sort=False)
all_codes_all_years = all_codes_all_years.append(all_years, ignore_index=True, sort=False)
return all_codes_all_years
def extrapolagg(self, categories, npp, geog_codes, year_range):
"""
Extrapolate and then aggregate
"""
data = self.extrapolate(npp, geog_codes, year_range)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
def create_variant(self, variant_name, npp, geog_codes, year_range):
"""
Apply NPP variant to SNPP: SNPP(v) = SNPP(0) * sum(a,g) [ NPP(v) / NPP(0) ]
Preserves age-gender structure of SNPP data
"""
result = pd.DataFrame()
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
for geog_code in geog_codes:
(pre_range, in_range) = utils.split_range(year_range, npp.min_year() - 1)
# for any years prior to NPP we just use the SNPP data as-is (i.e. "ppp")
pre_data = self.filter(geog_code, pre_range) if pre_range else pd.DataFrame()
if len(pre_data) > 0:
print("WARNING: variant {} not applied for years {} that predate the NPP data".format(variant_name,
pre_range))
# return if there's nothing in the NPP range
if not in_range:
result.append(pre_data)
continue
data = self.extrapolate(npp, geog_code, in_range).sort_values(
["C_AGE", "GENDER", "PROJECTED_YEAR_NAME"]).reset_index(drop=True)
scaling = npp.variant_ratio(variant_name, utils.country(geog_code), year_range).reset_index().sort_values(
["C_AGE", "GENDER", "PROJECTED_YEAR_NAME"])
# scaling.to_csv(variant_name + ".csv", index=False)
print("DF: ", len(data), ":", len(scaling))
assert (len(data) == len(scaling))
data.OBS_VALUE = data.OBS_VALUE * scaling.OBS_VALUE
# prepend any pre-NPP data
result = result.append(pre_data.append(data))
return result
def __do_england(self):
# return self.__do_england_ons() # 2014
return self.__do_england_nomisweb() # 2018
# nomisweb data is now 2018-based
def __do_england_nomisweb(self):
print("Collating SNPP data for England...")
# need to do this in 2 batches as entire table has >1000000 rows
table_internal = "NM_2006_1" # SNPP
query_params = {
"gender": "1,2",
"c_age": "101...191",
"MEASURES": "20100",
"date": "latest", # 2018-based
"projected_year": "2018...2031",
"select": "geography_code,projected_year_name,gender,c_age,obs_value",
"geography": "1946157057...1946157382"
}
snpp_e = self.data_api.get_data(table_internal, query_params)
query_params["projected_year"] = "2032...2043"
snpp_e = snpp_e.append(self.data_api.get_data(table_internal, query_params))
# make age actual year
snpp_e.C_AGE = snpp_e.C_AGE - 101
# snpp_e[(snpp_e.GEOGRAPHY_CODE=="E08000021") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv("snpp_ncle_2016.csv")
# assert(len(snpp_e) == 26*2*91*326) # 326 LADs x 91 ages x 2 genders x 26 years
return snpp_e
# Alternative method of downloading the en data from ONS website(Only works with 2014 as it stands).
def __do_england_ons(self):
print("Collating SNPP data for England...")
england_src = "https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/localauthoritiesinenglandz1/2014based/snppz1population.zip"
england_raw = self.cache_dir + "/snpp_e.csv"
england_zip = self.cache_dir + "/snpp_e.zip"
if os.path.isfile(england_raw):
snpp_e = pd.read_csv(england_raw)
else:
response = requests.get(england_src)
with open(england_zip, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", england_zip)
z = zipfile.ZipFile(england_zip)
# print(z.namelist())
snpp_e = pd.DataFrame()
for gender in [1, 2]:
filename = "2014 SNPP Population " + ("males" if gender == 1 else "females") + ".csv"
chunk = pd.read_csv(z.open(filename)) \
.drop(["AREA_NAME", "COMPONENT", "SEX"], axis=1) \
.query('AGE_GROUP != "All ages"')
# .AGE_GROUP.replace({"90 and over": "90"}
chunk.AGE_GROUP = chunk.AGE_GROUP.replace({"90 and over": "90"})
chunk = chunk.melt(id_vars=["AREA_CODE", "AGE_GROUP"])
# chunk = chunk[chunk.AGE_GROUP != "all ages"]
# chunk = chunk.stack().reset_index()
chunk.columns = ["GEOGRAPHY_CODE", "C_AGE", "PROJECTED_YEAR_NAME", "OBS_VALUE"]
chunk["GENDER"] = gender
snpp_e = snpp_e.append(chunk)
# assert(len(snpp_e) == 26*2*91*326) # 326 districts x 91 ages x 2 genders x 26 years
snpp_e.to_csv(england_raw, index=False)
# snpp_e[(snpp_e.GEOGRAPHY_CODE=="E08000021") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv("snpp_ncle_2014.csv")
return snpp_e
# Wales
def __do_wales(self):
print("Collating SNPP data for Wales...")
cache_dir = utils.default_cache_dir()
wales_raw = cache_dir + "/snpp_w.csv"
if os.path.isfile(wales_raw):
snpp_w = pd.read_csv(wales_raw)
else:
fields = ['Area_AltCode1', 'Year_Code', 'Data', 'Gender_Code', 'Age_Code', 'Area_Hierarchy', 'Variant_Code']
# StatsWales is an OData endpoint, so select fields of interest
url = "http://open.statswales.gov.wales/dataset/popu6010?$select={}".format(",".join(fields))
# use OData syntax to filter P (persons), AllAges (all ages), Area_Hierarchy 691 (LADs)
url += "&$filter=Gender_Code ne 'P' and Area_Hierarchy gt 690 and Area_Hierarchy lt 694 and Variant_Code eq 'Principal'"
#
data = []
while True:
print(url)
r = requests.get(url)
r_data = r.json()
data += r_data['value']
if "odata.nextLink" in r_data:
url = r_data["odata.nextLink"]
else:
break
snpp_w = pd.DataFrame(data)
# Remove unwanted and rename wanted columns
snpp_w = snpp_w.drop(["Area_Hierarchy", "Variant_Code"], axis=1)
snpp_w = snpp_w.rename(columns={"Age_Code": "C_AGE",
"Area_AltCode1": "GEOGRAPHY_CODE",
"Data": "OBS_VALUE",
"Gender_Code": "GENDER",
"Year_Code": "PROJECTED_YEAR_NAME"})
# Remove all but SYOA and make numeric
snpp_w = snpp_w[(snpp_w.C_AGE != "AllAges") & (snpp_w.C_AGE != "00To15") & (snpp_w.C_AGE != "16To64") & (
snpp_w.C_AGE != "65Plus")]
snpp_w.loc[snpp_w.C_AGE == "90Plus", "C_AGE"] = "90"
snpp_w.C_AGE = pd.to_numeric(snpp_w.C_AGE)
# convert gender to census convention 1=M, 2=F
snpp_w.GENDER = snpp_w.GENDER.map({"M": 1, "F": 2})
# assert(len(snpp_w) == 26*2*91*22) # 22 LADs x 91 ages x 2 genders x 26 years
print(wales_raw)
snpp_w.to_csv(wales_raw, index=False)
return snpp_w
def __do_scotland(self):
lookup = {
'Aberdeen City': 'S12000033',
'Aberdeenshire': 'S12000034',
'Angus': 'S12000041',
'Argyll & Bute': 'S12000035',
'City of Edinburgh': 'S12000036',
'Clackmannanshire': 'S12000005',
'Dumfries & Galloway': 'S12000006',
'Dundee City': 'S12000042',
'East Ayrshire': 'S12000008',
'East Dunbartonshire': 'S12000045',
'East Lothian': 'S12000010',
'East Renfrewshire': 'S12000011',
'Falkirk': 'S12000014',
'Fife': 'S12000015',
'Glasgow City': 'S12000046',
'Highland': 'S12000017',
'Inverclyde': 'S12000018',
'Midlothian': 'S12000019',
'Moray': 'S12000020',
'Na h-Eileanan Siar': 'S12000013',
'North Ayrshire': 'S12000021',
'North Lanarkshire': 'S12000044',
'Orkney Islands': 'S12000023',
'Perth & Kinross': 'S12000024',
'Renfrewshire': 'S12000038',
'Scottish Borders': 'S12000026',
'Shetland Islands': 'S12000027',
'South Ayrshire': 'S12000028',
'South Lanarkshire': 'S12000029',
'Stirling': 'S12000030',
'West Dunbartonshire': 'S12000039',
'West Lothian': 'S12000040'
}
print("Collating SNPP data for Scotland...")
scotland_raw = self.cache_dir + "/snpp_s.csv"
scotland_src = "https://www.nrscotland.gov.uk/files//statistics/population-projections/sub-national-pp-18/detailed-tables/pop-proj-principal-2018-council-area.zip"
scotland_zip = self.cache_dir + "/snpp_s_2018.zip"
if os.path.isfile(scotland_raw):
snpp_s = pd.read_csv(scotland_raw)
else:
response = requests.get(scotland_src)
with open(scotland_zip, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", scotland_zip)
z = zipfile.ZipFile(scotland_zip)
snpp_s = pd.DataFrame()
for filename in z.namelist():
council_area = filename[37:-4]
if council_area in ["Metadata", "Scotland"]:
continue
GEOGRAPHY_CODE = lookup[council_area]
chunk = pd.read_csv(z.open(filename), encoding="ISO-8859-1", header=102)
# Drop Nan Rows
chunk = chunk.dropna(axis=0, how="all")
# Drop Last row with containing Copyright Cell.
chunk = chunk.drop(chunk.tail(1).index[0])
chunk = chunk.rename(columns={"Unnamed: 0": "C_AGE"})
chunk["GEOGRAPHY_CODE"] = GEOGRAPHY_CODE
chunk["GENDER"] = ''
# Drop rows where C_AGE == "All Ages"
chunk = chunk.drop(chunk.index[chunk["C_AGE"] == "All ages"])
chunk.loc[(chunk.C_AGE == '90 and over'), 'C_AGE'] = 90
chunk = chunk.reset_index(drop=True)
chunk.loc[
chunk.index[(chunk["C_AGE"] == "MALES")][0] + 1:chunk.index[(chunk["C_AGE"] == "FEMALES")][0] - 4,
"GENDER"] = 1
chunk.loc[chunk.index[(chunk["C_AGE"] == "FEMALES")][0] + 1:, "GENDER"] = 2
chunk = chunk[chunk.GENDER != '']
for year in range(2018, 2044):
appendable_chunk = chunk[["GEOGRAPHY_CODE", "C_AGE", str(year), "GENDER"]].rename(
columns={str(year): "OBS_VALUE"})
appendable_chunk["PROJECTED_YEAR_NAME"] = year
snpp_s = snpp_s.append(appendable_chunk)
snpp_s.reset_index(drop=True)
snpp_s['OBS_VALUE'] = snpp_s['OBS_VALUE'].str.replace(',', '')
snpp_s['OBS_VALUE'] = pd.to_numeric(snpp_s['OBS_VALUE'])
snpp_s.to_csv(scotland_raw, index=False)
return snpp_s
def __do_nireland(self):
# Niron
# (1 worksheet per LAD equivalent)
print("Collating SNPP data for Northern Ireland...")
ni_src = "https://www.nisra.gov.uk/sites/nisra.gov.uk/files/publications/SNPP16_LGD14_SYA_1641.xlsx"
ni_raw = self.cache_dir + "/snpp_ni.csv"
if os.path.isfile(ni_raw):
snpp_ni = pd.read_csv(ni_raw)
else:
response = requests.get(ni_src)
with open(self.cache_dir + "/ni_raw.xlsx", 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
# easier to hard-code the worksheet names we need (since unlikely to change frequently)
districts = ["Antrim & Newtownabbey",
"Ards & North Down",
"Armagh Banbridge & Craigavon",
"Belfast",
"Causeway Coast & Glens",
"Derry & Strabane",
"Fermanagh & Omagh",
"Lisburn & Castlereagh",
"Mid & East Antrim",
"Mid Ulster",
"Newry Mourne & Down"]
xls_ni = load_workbook(self.cache_dir + "/ni_raw.xlsx", read_only=True)
snpp_ni = pd.DataFrame()
for d in districts:
# 1 extra row compared to 2014 data (below was A2)
area_code = xls_ni[d]["A3"].value
# 2 extra rows compared to 2014 data (below was A3:A95)
males = utils.read_cell_range(xls_ni[d], "A5", "AA97")
females = utils.read_cell_range(xls_ni[d], "A100", "AA192")
dfm = pd.DataFrame(data=males[1:, 1:], index=males[1:, 0], columns=males[0, 1:]).drop(
["Age"]).stack().reset_index()
dfm.columns = ["C_AGE", "PROJECTED_YEAR_NAME", "OBS_VALUE"]
dfm["GENDER"] = pd.Series(1, dfm.index)
dfm["GEOGRAPHY_CODE"] = pd.Series(area_code, dfm.index)
dfm.loc[dfm.C_AGE == "90+", "C_AGE"] = "90"
dff = | pd.DataFrame(data=females[1:, 1:], index=females[1:, 0], columns=females[0, 1:]) | pandas.DataFrame |
import unittest
import logging
import os
import numpy as np
import pandas as pd
import cmapPy.pandasGEXpress as GCToo
import cmapPy.pandasGEXpress.parse as parse
import broadinstitute_psp.utils.setup_logger as setup_logger
import broadinstitute_psp.tear.continuous_renormalization as renorm
# Setup logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
# Use functional tests assets from the tear directory
if os.path.basename(os.getcwd()) == "tear":
FUNCTIONAL_TESTS_DIR = os.path.join("./functional_tests")
elif os.path.basename(os.getcwd()) == "broadinstitute_psp":
FUNCTIONAL_TESTS_DIR = os.path.join("tear/functional_tests")
class TestContinuousRenormalization(unittest.TestCase):
def test_main(self):
in_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_renorm_main.gct")
out_name = os.path.join(FUNCTIONAL_TESTS_DIR, "test_renorm_out.gct")
# update the args string
args_string = ("-i {} -o {} -gct").format(in_gct_path, out_name)
args = renorm.build_parser().parse_args(args_string.split())
renorm.continuous_renormalization(args)
# Read in result
out_gct = parse.parse(out_name)
e_values = np.array(
[[-0.41, -0.13, 0.07, 0.09, 0.18, 0.24, 0.08],
[0.40, 0.11, 0.06, -0.11, -0.22, -0.26, -0.09],
[0.40, -0.40, 0.30, -0.20, 0.05, -0.10, 0.10],
[0.10, 0.06, -0.07, 0.05, -0.09, 0.08, 0.10]])
self.assertTrue(np.allclose(e_values, out_gct.data_df, atol=1e-2))
# Clean up
os.remove(out_name)
def test_to_log_renorm(self):
slopes = pd.Series([-0.3, 0.1, -0.1, 0.3])
is_log_renormed_return = renorm.is_log_renormed(slopes, 0.2)
is_log_renormed_expected = pd.Series([True, False, False, True])
self.assertTrue((is_log_renormed_return == is_log_renormed_expected).all())
def test_tot_samp_offsets(self):
df_in = pd.DataFrame([[1, -1, 0, 1],
[2, 0, -2, 2],
[0, 0, 0, 0],
[-0.5, 0.5, 0.5, 0]])
return_tot_samp_offsets = renorm.calculate_total_sample_offsets(df_in)
expect_tot_samp_offsets = pd.Series([3.5, 1.5, 2.5, 3])
self.assertTrue(np.allclose(return_tot_samp_offsets,
expect_tot_samp_offsets,
atol=1e-6))
def test_calc_out_mat(self):
df_in = pd.DataFrame([[1, 2, 3, 4],
[4, 3, 2, 1],
[0, 0, 0, 0],
[1, 1, 1, 1]])
offset_in = pd.DataFrame([[0, 0, 0, 0],
[3, 2, 1, 0],
[0, 1, 2, 3],
[0, 0, 0, 0]])
return_out_df = renorm.calculate_out_matrix(df_in, offset_in)
expect_out_df = pd.DataFrame([[1, 2, 3, 4],
[1, 1, 1, 1],
[0, -1, -2, -3],
[1, 1, 1, 1]])
self.assertTrue(np.allclose(return_out_df, expect_out_df,
atol=1e-6))
def test_calc_pep_samp_offsets(self):
data_df_in = pd.DataFrame([[0.8, 0.6, 0.5, 0.36],
[1, 1, 1, 1]])
row_metadata_df_in = pd.DataFrame([[True, True],
[False, False]],
columns = ["is_log_renormed", "whatever"])
es_in = pd.Series([0.2, 0.5, 0.6, 1.0])
pep_y_offsets_in = pd.Series([0.4, 0])
fit_params_in = pd.DataFrame([[(1, 1), (1, 1)],
[(1, 1), (1, 1)]],
columns = ["deg1", "log"])
func_return = renorm.calculate_peptide_sample_offsets(data_df_in, row_metadata_df_in,
es_in, fit_params_in,
pep_y_offsets_in)
expected_return = pd.DataFrame([[0.85, 0.78, 0.75, 0.67],
[0, 0, 0, 0]])
self.assertTrue(np.allclose(expected_return, func_return, atol=1e-2))
def test_calc_fit(self):
data_df_in = pd.DataFrame([[0.8, 0.6, 0.5, 0.36],
[0.9, 1, 1, 1]])
es_in = pd.Series([0.2, 0.5, 0.6, 1.0])
pep_y_offsets_in = pd.Series([0.1, 0.1])
func_return = renorm.calculate_fit(data_df_in, es_in, pep_y_offsets_in)
expect_return = pd.DataFrame([[[-0.54, 0.88], (1.6, 1.66)],
[[0.11, 0.91], (1.8, 0.03)]],
columns=["deg1", "log"])
for row_idx, vals in expect_return.iterrows():
for col_idx, vals in expect_return.iteritems():
self.assertTrue(np.allclose(expect_return.loc[row_idx, col_idx],
func_return.loc[row_idx, col_idx],
atol=1e-2))
def test_make_y(self):
x_in = pd.Series([0.1, 0.3, 0.5, 0.8])
deg_model = [1, 1]
log_model = (1, 1)
deg_return = renorm.make_y(x_in, deg_model)
log_return = renorm.make_y(x_in, log_model, 1)
expect_deg_return = pd.Series([1, 1, 1, 1])
expect_log_return = | pd.Series([1.47, 1.42, 1.37, 1.31]) | pandas.Series |
import logging
import os
import unittest
import pandas as pd
import moneytrack as mt
logging.basicConfig(level=logging.DEBUG)
field_names = mt.Config.FieldNames
class DatasetTests(unittest.TestCase):
def test_a(self):
sim = mt.simulation.AccountSimulatorFixedRate(date=pd.to_datetime("2021-01-01"), ayr=0.05, balance=100)
for i in range(100):
sim.step(0)
print(sim.get_daily_account_history())
def test_b(self):
sim1 = mt.simulation.AccountSimulatorFixedRate(
date= | pd.to_datetime("2021-01-01") | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: <NAME>
@file: grow_path.py
@time: 2021/01/19/13:42
"""
import os
import time
import argparse
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem.rdMolDescriptors import CalcExactMolWt
import subprocess
from pandarallel import pandarallel
import configparser
from scoring.ranking import read_dock_file
pandarallel.initialize(verbose=0)
def cal_mutation_dic(workdir, max_gen):
mut_dic_all = dict()
while max_gen > 0:
mut_file = os.path.join(workdir, "generation_" + str(max_gen), "filter.csv")
print(mut_file)
with open(mut_file, "r") as f:
lines = f.readlines()
lines = [i.strip().split(",") for i in lines]
mut_dic = {i[-6].split("-dp")[0].split("-C")[0]: [i[0], i[1].split("-dp")[0].split("-C")[0], i[-5], i[-4]] for i
in lines}
mut_dic_all["gen" + str(max_gen)] = mut_dic
max_gen -= 1
return mut_dic_all
def merge_multi_generation(workdir, max_gen, file_path, dl_mode, config_path):
df_lst = [pd.read_csv(os.path.join(workdir, "generation_" + str(i),
"docked_gen_" + str(i) + ".csv")) for i in range(1, max_gen + 1)]
if dl_mode == 2:
dl_df = read_dock_file(os.path.join(workdir, "generation_{}_pre".format(max_gen),
"docking_outputs_with_score.sdf"))
dl_df["le_ln"] = dl_df.apply(
lambda x: x["docking score"] / Chem.MolFromSmiles(x["smiles"]).GetNumHeavyAtoms(),
axis=1)
dl_df.columns = [i.lower() for i in list(dl_df.columns)]
dl_df = dl_df.drop(columns=["molecule"])
dl_df = dl_df.reindex(columns=df_lst[0].columns)
config = configparser.ConfigParser()
config.read(config_path)
score_cutoff = config.getfloat("deep learning", "dl_score_cutoff")
dl_df = dl_df[dl_df["docking score"] < score_cutoff]
df_lst.append(dl_df)
final_df = pd.concat(df_lst, axis=0).drop_duplicates(subset=["smiles"])
final_df.to_csv(file_path, index=False)
return final_df
def grow_path(mut_dic_all, mut_id):
mut_id = mut_id.split("-dp")[0].split("-C")[0]
try:
gen_mol = int(mut_id.split("_")[1])
except IndexError:
print(mut_id)
return None
mut_info_lst = []
while gen_mol > 0:
mut_info = mut_dic_all["gen" + str(gen_mol)][mut_id]
if "." in mut_info[2]:
gen_mol -= 1
continue
mut_info_lst.append(mut_info)
mut_id = mut_info[1]
gen_mol -= 1
return mut_info_lst
def add_prop(merged_df_path):
merged_df = | pd.read_csv(merged_df_path) | pandas.read_csv |
import pymysql
import pandas as pd
import numpy as np
import tushare as ts
from tqdm import tqdm
from sqlalchemy import create_engine
from getdata import read_mysql_and_insert
from datetime import date,timedelta,datetime
pro = ts.pro_api(token='๏ผ๏ผ๏ผ')
def strategy():
"""
auto strategy
"""
# ่ทๅๅค็ๅฅฝๅ็่ก็ฅจไฟกๆฏ
all_stock_info = deal_all_stock_info()
# ๆ็ป็ปๆ่ฎฐๅฝ
result = | pd.DataFrame(index=all_stock_info.ts_code) | pandas.DataFrame |
"""
Functions and methods to extract statistics concerning the most used emojis in twitter datasets.
"""
from collections import Counter
import emoji
import seaborn as sns
import pandas as pd
import sys
import resource
from tqdm import tqdm
from IPython.core.debugger import set_trace
from tqdm import tqdm
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import bz2
import json
from IPython.core.debugger import set_trace
import warnings
import pickle as pk
from numpy.random import permutation
import pdb
import traceback
import sys
from src.constants import (
flags,
em_letters,
em_numbers,
em_hours,
TWEET_EM_COUNT_PATH,
TWEET_PATHS_PATH,
EXPORT_DIR,
TWEET_PATH,
TWEET_SAMPLES_DIR,
COLOR1,
COLOR2,
TITLE_SIZE,
LABEL_SIZE,
)
from src.utils import tononymize_list, genderonymize_list, keep_fe0f_emojis
warnings.filterwarnings("ignore")
sns.set()
def filter_access_paths(tweet_paths):
"""
filter the paths we have rights for
"""
access_paths = []
print(f"Length before filtering{len(tweet_paths)}")
for tweet_path in tqdm(tweet_paths):
try:
bz2.open(tweet_path, "rt")
access_paths.append(tweet_path)
except PermissionError as e:
continue
print(f"Length after filtering: {len(access_paths)}")
return access_paths
def load_or_compute_tweetpaths(tweetpaths_path):
"""
Return a list of the paths we search information in
"""
if Path(tweetpaths_path).exists():
print("Loading precomputed paths structure")
tweet_paths = pk.load(open(tweetpaths_path, "rb"))
else:
print("Computing paths structure")
main_path = TWEET_SAMPLES_DIR
if not main_path.exists():
raise ValueError(
"You need to be on the DLab machine to run the Tweeter function"
)
subpaths = np.random.permutation(list(main_path.iterdir()))
# we are only interested in the twitter_stream directories
subpaths = [
path for path in subpaths if path.stem.startswith("twitter_stream")]
tweet_paths = [
tweet_path
for subpath in subpaths
for tweet_path in list(subpath.rglob("*.json.bz2"))
]
tweet_paths = filter_access_paths(tweet_paths)
pk.dump(tweet_paths, open(tweetpaths_path, "wb"))
return tweet_paths
def compute_twitter_data(save_path, N_LIM=30000, seed=15):
"""
Gather randomized subset of the data present at tweets_pritzer_sample.
Args:
save_path (str): path to save the generated file to
N_LIM (int): number of files we take from each day directory
Returns:
[pd.df]: dataframe with columns 'id','lang', and 'text'
"""
N_SAVE = 100
assert N_LIM > N_SAVE
save_path = Path(save_path)
np.random.seed(seed)
tweet_paths = load_or_compute_tweetpaths(TWEET_PATHS_PATH)
tweet_paths = np.random.permutation(tweet_paths)[:N_LIM]
tweet_df = []
print(f"Analyzing {N_LIM} files")
first_pass = True
tweet_paths = [
Path(
"/dlabdata1/gligoric/spritzer/tweets_pritzer_sample/twitter_stream_2019_03_13/13/09/13.json.bz2"
)
] + tweet_paths.tolist()
for i, tweet_path in enumerate(tqdm(tweet_paths)):
# tweeter file reading
new_tweets = []
try:
with bz2.open(tweet_path, "rt") as bzinput:
for line in bzinput:
try:
tweet = json.loads(line)
except json.JSONDecodeError:
continue
if len(tweet.keys()) < 5:
continue
tweet = {key: tweet[key] for key in ["id", "lang", "text"]}
# we only care for english-labelled data
if tweet["lang"] != "en":
continue
new_tweets.append(tweet)
tweet_df += new_tweets
except KeyboardInterrupt:
sys.exit()
pass
except Exception as e:
print("Exception!", type(e), e)
continue
# RAM saving on the shared machine
if i % N_SAVE == 0 and i != 0:
if first_pass:
mode = "w"
header = True
first_pass = False
else:
mode = "a"
header = False
tweet_df = pd.DataFrame(tweet_df)
tweet_df.to_csv(save_path, mode=mode, header=header)
tweet_df = []
# type conversion and saving
tweet_df = pd.DataFrame(tweet_df)
tweet_df.to_csv(save_path, mode="a", header=False)
print("Tweet information generation terminated successfully.")
def update_emoji_count(dic, text):
"""
Update the counts of each emoji wrt to text
Args:
dic(dict): mapping emojis--> count
text (str): text to use to update dic
"""
for em in extract_emojis(text, True):
if em not in emoji.UNICODE_EMOJI:
print("EMOJI NOT PRESENT")
continue
dic[em] = dic.get(em, 0) + 1
def compute_emdf(path, tweet_df):
"""
compute the em_df from tweet_df. em_df = mapping from emojis to their counts
in tweet_df
Args:
path (str): path to em_df
tweet_df (pd.df): dataframe as returned by read_twitter_data
Returns:
[pd.df]: em_df
"""
emojis_count = {}
for text in tqdm(["text"]):
update_emoji_count(emojis_count, text)
em_df = pd.Series(emojis_count).sort_values(ascending=False)
em_df = em_df.reset_index().dropna().set_index("index")
em_df.to_csv(path)
return em_df
def load_or_compute_em_df(em_path, tweet_path=None):
"""
load or compute the em_df depending if the provided path exists or not
Args:
path (str): path to em_df
tweet_df (pd.df): dataframe as returned by read_twitter_data
Returns:
[pd.df]: em_df
"""
if Path(em_path).exists():
return pd.read_csv(em_path, index_col=0, names=["counts"], header=0)["counts"]
emojis_count = {}
reader = | pd.read_csv(tweet_path, chunksize=10000) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import print_function
import os
import pandas as pd
import numpy as np
import sys
import shutil
from sklearn.preprocessing import MinMaxScaler
def dataframe_from_csv(path, header=0, index_col=False):
return pd.read_csv(path, header=header, index_col=index_col)
var_to_consider = [
"glucose",
"Invasive BP Diastolic",
"Invasive BP Systolic",
"O2 Saturation",
"Respiratory Rate",
"Motor",
"Eyes",
"MAP (mmHg)",
"Heart Rate",
"GCS Total",
"Verbal",
"pH",
"FiO2",
"Temperature (C)",
]
# Filter on useful column for this benchmark
def filter_patients_on_columns_model(patients):
columns = [
"patientunitstayid",
"gender",
"age",
"ethnicity",
"apacheadmissiondx",
"admissionheight",
"hospitaladmitoffset",
"admissionweight",
"hospitaldischargestatus",
"unitdischargeoffset",
"unitdischargestatus",
]
return patients[columns]
# Select unique patient id
def cohort_stay_id(patients):
cohort = patients.patientunitstayid.unique()
return cohort
# Convert gender from F/M to numbers
g_map = {"Female": 1, "Male": 2, "": 0, "NaN": 0, "Unknown": 0, "Other": 0}
def transform_gender(gender_series):
global g_map
return {
"gender": gender_series.fillna("").apply(
lambda s: g_map[s] if s in g_map else g_map[""]
)
}
# Convert ethnicity to numbers
e_map = {
"Asian": 1,
"African American": 2,
"Caucasian": 3,
"Hispanic": 4,
"Native American": 5,
"NaN": 0,
"": 0,
}
def transform_ethnicity(ethnicity_series):
global e_map
return {
"ethnicity": ethnicity_series.fillna("").apply(
lambda s: e_map[s] if s in e_map else e_map[""]
)
}
# Convert hospital/unit discharge status into numbers
h_s_map = {"Expired": 1, "Alive": 0, "": 2, "NaN": 2}
def transform_hospital_discharge_status(status_series):
global h_s_map
return {
"hospitaldischargestatus": status_series.fillna("").apply(
lambda s: h_s_map[s] if s in h_s_map else h_s_map[""]
)
}
def transform_unit_discharge_status(status_series):
global h_s_map
return {
"unitdischargestatus": status_series.fillna("").apply(
lambda s: h_s_map[s] if s in h_s_map else h_s_map[""]
)
}
# Convert diagnosis into numbers
def transform_dx_into_id(df):
df.apacheadmissiondx.fillna("nodx", inplace=True)
dx_type = df.apacheadmissiondx.unique()
dict_dx_key = | pd.factorize(dx_type) | pandas.factorize |
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>
"""Class for benchmarking model templates."""
from typing import Dict
from typing import List
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from tqdm.autonotebook import tqdm
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import PREDICTED_LOWER_COL
from greykite.common.constants import PREDICTED_UPPER_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.evaluation import add_finite_filter_to_scorer
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import get_pattern_cols
from greykite.common.viz.timeseries_plotting import plot_multivariate
from greykite.common.viz.timeseries_plotting import plot_multivariate_grouped
from greykite.framework.benchmark.benchmark_class_helper import forecast_pipeline_rolling_evaluation
from greykite.framework.constants import FORECAST_STEP_COL
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.forecaster import Forecaster
from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
class BenchmarkForecastConfig:
"""Class for benchmarking multiple ForecastConfig on a rolling window basis.
Attributes
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns.
Regressor columns should include future values for prediction.
configs : `Dict` [`str`, `ForecastConfig`]
Dictionary of model configurations.
A model configuration is a ``ForecastConfig``.
See :class:`~greykite.framework.templates.autogen.forecast_config.ForecastConfig` for details on
valid ``ForecastConfig``.
Validity of the ``configs`` for benchmarking is checked via the ``validate`` method.
tscv : `~greykite.sklearn.cross_validation.RollingTimeSeriesSplit`
Cross-validation object that determines the rolling window evaluation.
See :class:`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` for details.
The ``forecast_horizon`` and ``periods_between_train_test`` parameters of ``configs`` are
matched against that of ``tscv``. A ValueError is raised if there is a mismatch.
forecaster : `~greykite.framework.templates.forecaster.Forecaster`
Forecaster used to create the forecasts.
is_run : bool, default False
Indicator of whether the `run` method is executed.
After executing `run`, this indicator is set to True.
Some class methods like ``get_forecast`` requires ``is_run`` to be True
to be executed.
result : `dict`
Stores the benchmarking results. Has the same keys as ``configs``.
forecasts : `pandas.DataFrame`, default None
Merged DataFrame of forecasts, upper and lower confidence interval for all
input ``configs``. Also stores train end date and forecast step for each prediction.
"""
def __init__(
self,
df: pd.DataFrame,
configs: Dict[str, ForecastConfig],
tscv: RollingTimeSeriesSplit,
forecaster: Forecaster = Forecaster()):
self.df = df
self.configs = configs
self.tscv = tscv
self.forecaster = forecaster
self.is_run = False
# output
self.result = dict.fromkeys(configs.keys())
self.forecasts = None
def validate(self):
"""Validates the inputs to the class for the method ``run``.
Raises a ValueError if there is a mismatch between the following parameters
of ``configs`` and ``tscv``:
- ``forecast_horizon``
- ``periods_between_train_test``
Raises ValueError if all the ``configs`` do not have the same ``coverage`` parameter.
"""
coverage_list = []
for config_name, config in self.configs.items():
# Checks forecast_horizon
if config.forecast_horizon != self.tscv.forecast_horizon:
raise ValueError(f"{config_name}'s 'forecast_horizon' ({config.forecast_horizon}) does "
f"not match that of 'tscv' ({self.tscv.forecast_horizon}).")
# Checks periods_between_train_test
if config.evaluation_period_param.periods_between_train_test != self.tscv.periods_between_train_test:
raise ValueError(f"{config_name}'s 'periods_between_train_test' ({config.evaluation_period_param.periods_between_train_test}) "
f"does not match that of 'tscv' ({self.tscv.periods_between_train_test}).")
coverage_list.append(config.coverage)
# Computes pipeline parameters
pipeline_params = self.forecaster.apply_forecast_config(
df=self.df,
config=config)
self.result[config_name] = dict(pipeline_params=pipeline_params)
# Checks all coverages are same
if coverage_list[1:] != coverage_list[:-1]:
raise ValueError("All forecast configs must have same coverage.")
def run(self):
"""Runs every config and stores the output of the
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
This function runs only if the ``configs`` and ``tscv`` are jointly valid.
Returns
-------
self : Returns self. Stores pipeline output of every config in ``self.result``.
"""
self.validate()
with tqdm(self.result.items(), ncols=800, leave=True) as progress_bar:
for (config_name, config) in progress_bar:
# Description will be displayed on the left of progress bar
progress_bar.set_description(f"Benchmarking '{config_name}' ")
rolling_evaluation = forecast_pipeline_rolling_evaluation(
pipeline_params=config["pipeline_params"],
tscv=self.tscv)
config["rolling_evaluation"] = rolling_evaluation
self.is_run = True
def extract_forecasts(self):
"""Extracts forecasts, upper and lower confidence interval for each individual
config. This is saved as a ``pandas.DataFrame`` with the name
``rolling_forecast_df`` within the corresponding config of ``self.result``.
e.g. if config key is "silverkite", then the forecasts are stored in
``self.result["silverkite"]["rolling_forecast_df"]``.
This method also constructs a merged DataFrame of forecasts,
upper and lower confidence interval for all input ``configs``.
"""
if not self.is_run:
raise ValueError("Please execute 'run' method to create forecasts.")
merged_df = pd.DataFrame()
for config_name, config in self.result.items():
rolling_evaluation = config["rolling_evaluation"]
rolling_forecast_df = pd.DataFrame()
for num, (split_key, split_value) in enumerate(rolling_evaluation.items()):
forecast = split_value["pipeline_result"].forecast
# Subsets forecast_horizon rows from the end of forecast dataframe
forecast_df = forecast.df.iloc[-forecast.forecast_horizon:]
forecast_df.insert(0, "train_end_date", forecast.train_end_date)
forecast_df.insert(1, FORECAST_STEP_COL, np.arange(forecast.forecast_horizon) + 1)
forecast_df.insert(2, "split_num", num)
rolling_forecast_df = pd.concat([rolling_forecast_df, forecast_df], axis=0)
rolling_forecast_df = rolling_forecast_df.reset_index(drop=True)
self.result[config_name]["rolling_forecast_df"] = rolling_forecast_df
# Merges the forecasts of individual config
# Augments prediction columns with config name
pred_cols = [PREDICTED_COL]
if PREDICTED_LOWER_COL in rolling_forecast_df.columns:
pred_cols.append(PREDICTED_LOWER_COL)
if PREDICTED_UPPER_COL in rolling_forecast_df.columns:
pred_cols.append(PREDICTED_UPPER_COL)
mapper = {
col: f"{config_name}_{col}" for col in pred_cols
}
if merged_df.empty:
temp_df = rolling_forecast_df.rename(columns=mapper)
else:
temp_df = rolling_forecast_df[pred_cols].rename(columns=mapper)
merged_df = pd.concat([merged_df, temp_df], axis=1)
self.forecasts = merged_df.reset_index(drop=True)
def plot_forecasts_by_step(
self,
forecast_step: int,
config_names: List = None,
xlabel: str = TIME_COL,
ylabel: str = VALUE_COL,
title: str = None,
showlegend: bool = True):
"""Returns a ``forecast_step`` ahead rolling forecast plot.
The plot consists one line for each valid. ``config_names``.
If available, the corresponding actual values are also plotted.
For a more customizable plot, see
:func:`~greykite.common.viz.timeseries_plotting.plot_multivariate`
Parameters
----------
forecast_step : `int`
Which forecast step to plot. A forecast step is an integer between 1 and the
forecast horizon, inclusive, indicating the number of periods from train end date
to the prediction date (# steps ahead).
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
xlabel : `str` or None, default TIME_COL
x-axis label.
ylabel : `str` or None, default VALUE_COL
y-axis label.
title : `str` or None, default None
Plot title. If None, default is based on ``forecast_step``.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objs.Figure`
Interactive plotly graph.
Plots multiple column(s) in ``self.forecasts`` against ``TIME_COL``.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
"""
if self.forecasts is None:
self.extract_forecasts()
if forecast_step > self.tscv.forecast_horizon:
raise ValueError(f"`forecast_step` ({forecast_step}) must be less than or equal to "
f"forecast horizon ({self.tscv.forecast_horizon}).")
config_names = self.get_valid_config_names(config_names)
y_cols = [TIME_COL, ACTUAL_COL] + \
[f"{config_name}_{PREDICTED_COL}" for config_name in config_names]
df = self.forecasts[self.forecasts[FORECAST_STEP_COL] == forecast_step]
df = df[y_cols]
if title is None:
title = f"{forecast_step}-step ahead rolling forecasts"
fig = plot_multivariate(
df=df,
x_col=TIME_COL,
y_col_style_dict="plotly",
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
return fig
def plot_forecasts_by_config(
self,
config_name: str,
colors: List = DEFAULT_PLOTLY_COLORS,
xlabel: str = TIME_COL,
ylabel: str = VALUE_COL,
title: str = None,
showlegend: bool = True):
"""Returns a rolling plot of the forecasts by ``config_name`` against ``TIME_COL``.
The plot consists of one line for each available split. Some lines may overlap if test
period in corresponding splits intersect. Hence every line is given a different color.
If available, the corresponding actual values are also plotted.
For a more customizable plot, see
:func:`~greykite.common.viz.timeseries_plotting.plot_multivariate_grouped`
Parameters
----------
config_name : `str`
Which config result to plot.
The name must match the name of one of the input ``configs``.
colors : [`str`, `List` [`str`]], default ``DEFAULT_PLOTLY_COLORS``
Which colors to use to build the color palette.
This can be a list of RGB colors or a `str` from ``PLOTLY_SCALES``.
To use a single color for all lines, pass a `List` with a single color.
xlabel : `str` or None, default TIME_COL
x-axis label.
ylabel : `str` or None, default VALUE_COL
y-axis label.
title : `str` or None, default None
Plot title. If None, default is based on ``config_name``.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objs.Figure`
Interactive plotly graph.
Plots multiple column(s) in ``self.forecasts`` against ``TIME_COL``.
"""
if self.forecasts is None:
self.extract_forecasts()
config_name = self.get_valid_config_names([config_name])[0]
if title is None:
title = f"Rolling forecast for {config_name}"
fig = plot_multivariate_grouped(
df=self.forecasts,
x_col=TIME_COL,
y_col_style_dict={
ACTUAL_COL: {
"line": {
"width": 1,
"dash": "solid"
}
}
},
grouping_x_col="split_num",
grouping_x_col_values=None,
grouping_y_col_style_dict={
f"{config_name}_{PREDICTED_COL}": {
"name": "split",
"line": {
"width": 1,
"dash": "solid"
}
}
},
colors=colors,
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
return fig
def get_evaluation_metrics(
self,
metric_dict: Dict,
config_names: List = None):
"""Returns rolling train and test evaluation metric values.
Parameters
----------
metric_dict : `dict` [`str`, `callable`]
Evaluation metrics to compute.
- key: evaluation metric name, used to create column name in output.
- value: metric function to apply to forecast df in each split to generate the column value.
Signature (y_true: `str`, y_pred: `str`) -> transformed value: `float`.
For example::
metric_dict = {
"median_residual": lambda y_true, y_pred: np.median(y_true - y_pred),
"mean_squared_error": lambda y_true, y_pred: np.mean((y_true - y_pred)**2)
}
Some predefined functions are available in
`~greykite.common.evaluation`. For example::
metric_dict = {
"correlation": lambda y_true, y_pred: correlation(y_true, y_pred),
"RMSE": lambda y_true, y_pred: root_mean_squared_error(y_true, y_pred),
"Q_95": lambda y_true, y_pred: partial(quantile_loss(y_true, y_pred, q=0.95))
}
As shorthand, it is sufficient to provide the corresponding ``EvaluationMetricEnum``
member. These are auto-expanded into the appropriate function.
So the following is equivalent::
metric_dict = {
"correlation": EvaluationMetricEnum.Correlation,
"RMSE": EvaluationMetricEnum.RootMeanSquaredError,
"Q_95": EvaluationMetricEnum.Quantile95
}
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
Returns
-------
evaluation_metrics_df : pd.DataFrame
A DataFrame containing splitwise train and test evaluation metrics for ``metric_dict``
and ``config_names``.
For example. Let's assume::
metric_dict = {
"RMSE": EvaluationMetricEnum.RootMeanSquaredError,
"Q_95": EvaluationMetricEnum.Quantile95
}
config_names = ["default_prophet", "custom_silverkite"]
These are valid ``config_names`` and there are 2 splits for each.
Then evaluation_metrics_df =
config_name split_num train_RMSE test_RMSE train_Q_95 test_Q_95
default_prophet 0 * * * *
default_prophet 1 * * * *
custom_silverkite 0 * * * *
custom_silverkite 1 * * * *
where * represents computed values.
"""
if not self.is_run:
raise ValueError("Please execute the 'run' method before computing evaluation metrics.")
metric_dict = self.autocomplete_metric_dict(
metric_dict=metric_dict,
enum_class=EvaluationMetricEnum)
config_names = self.get_valid_config_names(config_names=config_names)
evaluation_metrics_df = pd.DataFrame()
for config_name in config_names:
rolling_evaluation = self.result[config_name]["rolling_evaluation"]
for num, (split_key, split_value) in enumerate(rolling_evaluation.items()):
forecast = split_value["pipeline_result"].forecast
split_metrics = {
"config_name": config_name,
"split_num": num}
# Updates train metrics
df_train = forecast.df_train
split_metrics.update({
f"train_{metric_name}": metric_func(
df_train[forecast.actual_col],
df_train[forecast.predicted_col]
) for metric_name, metric_func in metric_dict.items()
})
# Updates test metrics
df_test = forecast.df_test
if df_test.shape[0] > 0 and forecast.test_na_count < df_test.shape[0]:
split_metrics.update({
f"test_{metric_name}": metric_func(
df_test[forecast.actual_col],
df_test[forecast.predicted_col]
) for metric_name, metric_func in metric_dict.items()
})
else:
split_metrics.update({
f"test_{metric_name}": np.nan
for metric_name, metric_func in metric_dict.items()
})
split_metrics_df = pd.DataFrame(split_metrics, index=[num])
evaluation_metrics_df = pd.concat([evaluation_metrics_df, split_metrics_df])
# Resets index and fills missing values (e.g. when correlation is not defined) with np.nan
evaluation_metrics_df = evaluation_metrics_df.reset_index(drop=True).fillna(value=np.nan)
temp_df = evaluation_metrics_df.copy()
# Rearranges columns so that train and test error of a config are side by side
evaluation_metrics_df = pd.DataFrame()
evaluation_metrics_df["config_name"] = temp_df["config_name"]
evaluation_metrics_df["split_num"] = temp_df["split_num"]
for metric_name in metric_dict.keys():
evaluation_metrics_df[f"train_{metric_name}"] = temp_df[f"train_{metric_name}"]
evaluation_metrics_df[f"test_{metric_name}"] = temp_df[f"test_{metric_name}"]
return evaluation_metrics_df
def plot_evaluation_metrics(
self,
metric_dict: Dict,
config_names: List = None,
xlabel: str = None,
ylabel: str = "Metric value",
title: str = None,
showlegend: bool = True):
"""Returns a barplot of the train and test values of ``metric_dict`` of ``config_names``.
Value of a metric for all ``config_names`` are plotted as a grouped bar.
Train and test values of a metric are plot side-by-side for easy comparison.
Parameters
----------
metric_dict : `dict` [`str`, `callable`]
Evaluation metrics to compute. Same as
`~greykite.framework.framework.benchmark.benchmark_class.BenchmarkForecastConfig.get_evaluation_metrics`.
To get the best visualization, keep number of metrics <= 2.
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
xlabel : `str` or None, default None
x-axis label.
ylabel : `str` or None, default "Metric value"
y-axis label.
title : `str` or None, default None
Plot title.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objs.Figure`
Interactive plotly bar plot.
"""
evaluation_metrics_df = self.get_evaluation_metrics(
metric_dict=metric_dict,
config_names=config_names)
# This function groups by config name
evaluation_metrics_df = (evaluation_metrics_df
.drop(columns=["split_num"])
.groupby("config_name")
.mean()
.dropna(how="all"))
# Rearranges columns so that train and test error of a config are side by side
plot_df = pd.DataFrame()
for metric_name in metric_dict.keys():
plot_df[f"train_{metric_name}"] = evaluation_metrics_df[f"train_{metric_name}"]
plot_df[f"test_{metric_name}"] = evaluation_metrics_df[f"test_{metric_name}"]
if title is None:
title = "Average evaluation metric across rolling windows"
data = []
# Each row (index) is a config. Adds each row to the bar plot.
for index in plot_df.index:
data.append(
go.Bar(
name=index,
x=plot_df.columns,
y=plot_df.loc[index].values
)
)
layout = go.Layout(
xaxis=dict(title=xlabel),
yaxis=dict(title=ylabel),
title=title,
showlegend=showlegend,
barmode="group",
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_grouping_evaluation_metrics(
self,
metric_dict: Dict,
config_names: List = None,
which: str = "train",
groupby_time_feature: str = None,
groupby_sliding_window_size: int = None,
groupby_custom_column: pd.Series = None):
"""Returns splitwise rolling evaluation metric values.
These values are grouped by the grouping method chosen by ``groupby_time_feature``,
``groupby_sliding_window_size`` and ``groupby_custom_column``.
See `~greykite.framework.output.univariate_forecast.UnivariateForecast.get_grouping_evaluation`
for details on grouping method.
Parameters
----------
metric_dict : `dict` [`str`, `callable`]
Evaluation metrics to compute. Same as
`~greykite.framework.framework.benchmark.benchmark_class.BenchmarkForecastConfig.get_evaluation_metrics`.
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
which: `str`
"train" or "test". Which dataset to evaluate.
groupby_time_feature : `str` or None, default None
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, default None
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, default None
If provided, groups by this column value. Should be same length as the DataFrame.
Returns
-------
grouped_evaluation_df : `pandas.DataFrame`
A DataFrame containing splitwise train and test evaluation metrics for ``metric_dict``
and ``config_names``. The evaluation metrics are grouped by the grouping method.
"""
if not self.is_run:
raise ValueError("Please execute the 'run' method before computing "
"grouped evaluation metrics.")
metric_dict = self.autocomplete_metric_dict(
metric_dict=metric_dict,
enum_class=EvaluationMetricEnum)
config_names = self.get_valid_config_names(config_names=config_names)
grouped_evaluation_df = pd.DataFrame()
for config_name in config_names:
rolling_evaluation = self.result[config_name]["rolling_evaluation"]
for num, (split_key, split_value) in enumerate(rolling_evaluation.items()):
forecast = split_value["pipeline_result"].forecast
split_evaluation_df = pd.DataFrame()
for metric_name, metric_func in metric_dict.items():
grouped_df = forecast.get_grouping_evaluation(
score_func=metric_func,
score_func_name=metric_name,
which=which,
groupby_time_feature=groupby_time_feature,
groupby_sliding_window_size=groupby_sliding_window_size,
groupby_custom_column=groupby_custom_column)
# Adds grouped_df to split_evaluation_df, handling the case if split_evaluation_df is empty
# If the actual values are missing, grouped_df.shape[0] might be 0
if grouped_df.shape[0] > 0:
if split_evaluation_df.empty:
split_evaluation_df = grouped_df
else:
groupby_col = split_evaluation_df.columns[0]
split_evaluation_df = pd.merge(split_evaluation_df, grouped_df, on=groupby_col)
else:
# This column name is the same as that obtained from
# `~greykite.framework.output.univariate_forecast.UnivariateForecast.get_grouping_evaluation`
split_evaluation_df[f"{which} {metric_name}"] = np.nan
split_evaluation_df.insert(0, "config_name", config_name)
split_evaluation_df.insert(1, "split_num", num)
grouped_evaluation_df = pd.concat([grouped_evaluation_df, split_evaluation_df])
grouped_evaluation_df = grouped_evaluation_df.reset_index(drop=True)
return grouped_evaluation_df
def plot_grouping_evaluation_metrics(
self,
metric_dict: Dict,
config_names: List = None,
which: str = "train",
groupby_time_feature: str = None,
groupby_sliding_window_size: int = None,
groupby_custom_column: pd.Series = None,
xlabel=None,
ylabel="Metric value",
title=None,
showlegend=True):
"""Returns a line plot of the grouped evaluation values of ``metric_dict`` of ``config_names``.
These values are grouped by the grouping method chosen by ``groupby_time_feature``,
``groupby_sliding_window_size`` and ``groupby_custom_column``.
See `~greykite.framework.output.univariate_forecast.UnivariateForecast.get_grouping_evaluation`
for details on grouping method.
Parameters
----------
metric_dict : `dict` [`str`, `callable`]
Evaluation metrics to compute. Same as
`~greykite.framework.framework.benchmark.benchmark_class.BenchmarkForecastConfig.get_evaluation_metrics`.
To get the best visualization, keep number of metrics <= 2.
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
which: `str`
"train" or "test". Which dataset to evaluate.
groupby_time_feature : `str` or None, optional
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, optional
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, optional
If provided, groups by this column value. Should be same length as the DataFrame.
xlabel : `str` or None, default None
x-axis label. If None, label is determined by the groupby column name.
ylabel : `str` or None, default "Metric value"
y-axis label.
title : `str` or None, default None
Plot title. If None, default is based on ``config_name``.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objs.Figure`
Interactive plotly graph.
"""
grouped_evaluation_df = self.get_grouping_evaluation_metrics(
metric_dict=metric_dict,
config_names=config_names,
which=which,
groupby_time_feature=groupby_time_feature,
groupby_sliding_window_size=groupby_sliding_window_size,
groupby_custom_column=groupby_custom_column)
# Figures out groupby_col name by process of elimination
cols = [col for col in grouped_evaluation_df.columns if col not in ["config_name", "split_num"]]
groupby_col = get_pattern_cols(cols, pos_pattern=".*", neg_pattern=which)[0]
plot_df = (grouped_evaluation_df
.drop(columns=["split_num"]) # Drops redundant column
.groupby(["config_name", groupby_col]) # Averages values across splits
.mean()
.dropna(how="all") # Drops rows with all NA values
.unstack(level=0) # Moves config_name from multiindex rows to multiindex columns
.sort_index(axis=1) # Sorts on groupby_col to plot groups in logical order
)
# Flattens and renames multiindex columns
cols = [groupby_col] + ["_".join(v) for v in plot_df.columns]
plot_df = pd.DataFrame(plot_df.to_records())
plot_df.columns = cols
if xlabel is None:
xlabel = groupby_col
if title is None:
title = f"{which} performance by {xlabel} across rolling windows"
fig = plot_multivariate(
df=plot_df,
x_col=groupby_col,
y_col_style_dict="plotly",
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
return fig
def get_runtimes(self, config_names: List = None):
"""Returns rolling average runtime in seconds for ``config_names``.
Parameters
----------
config_names : `list` [`str`], default None
Which config results to plot. A list of config names.
If None, uses all the available config keys.
Returns
-------
runtimes_df : pd.DataFrame
A DataFrame containing splitwise runtime in seconds for ``config_names``.
For example. Let's assume::
config_names = ["default_prophet", "custom_silverkite"]
These are valid ``config_names`` and there are 2 splits for each.
Then runtimes_df =
config_name split_num runtime_sec
default_prophet 0 *
default_prophet 1 *
custom_silverkite 0 *
custom_silverkite 1 *
where * represents computed values.
"""
if not self.is_run:
raise ValueError("Please execute the 'run' method to obtain runtimes.")
config_names = self.get_valid_config_names(config_names=config_names)
runtimes_df = pd.DataFrame()
for config_name in config_names:
rolling_evaluation = self.result[config_name]["rolling_evaluation"]
for num, (split_key, split_value) in enumerate(rolling_evaluation.items()):
split_runtime_df = pd.DataFrame({
"config_name": config_name,
"split_num": num,
"runtime_sec": split_value["runtime_sec"]
}, index=[num])
runtimes_df = | pd.concat([runtimes_df, split_runtime_df]) | pandas.concat |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
| Timestamp('2000-01-31 00:23:00') | pandas.Timestamp |
import pandas as pd
import snowflake.connector
import getpass as pwd
| pd.set_option('display.max_rows', None) | pandas.set_option |
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import multiprocessing
import os
from queue import Empty
import numpy as np
import six
from IProgress import ProgressBar, Bar, ETA
from cameo.parallel import SequentialView
from pandas import DataFrame
from sqlalchemy import and_
from sqlalchemy.orm import sessionmaker
from marsi import config
from marsi.chemistry import SOLUBILITY
from marsi.chemistry import rdkit
from marsi.chemistry.molecule import Molecule
from marsi.config import default_session, engine
from marsi.io.db import Database
from marsi.io.db import Metabolite
from marsi.nearest_neighbors.model import NearestNeighbors, DistributedNearestNeighbors, DBNearestNeighbors
from marsi.utils import data_dir, INCHI_KEY_TYPE, unpickle_large, pickle_large
__all__ = ['build_nearest_neighbors_model', 'load_nearest_neighbors_model']
MODEL_FILE = os.path.join(data_dir, "fingerprints_default_%s_sol_%s.pickle")
class FeatureReader(object):
"""
Accessory class to build fingerprints in chunks.
Attributes
----------
db : str
The name of the database to connect to.
fpformat : str
The fingerprint format (see pybel.fps).
solubility : str
One of 'high', 'medium', 'low', 'all'.
connection_args : dict
Other arguments of connect
"""
def __init__(self, db, fpformat='maccs', solubility='high', **connection_args):
self.db = db
self.fpformat = fpformat
self.solubility = solubility
self.connection_args = connection_args
def __call__(self, index):
subset = Database.metabolites[index[0]:index[1]]
indices = []
fingerprints = []
fingerprint_lengths = []
for m in subset:
if SOLUBILITY[self.solubility](m.solubility):
fingerprint = m.fingerprint(fpformat=self.fpformat)
fingerprints.append(fingerprint)
indices.append(m.inchi_key)
fingerprint_lengths.append(len(fingerprint))
_indices = np.ndarray((len(indices), 1), dtype=INCHI_KEY_TYPE)
for i in range(_indices.shape[0]):
_indices[i] = indices[i]
del indices
return _indices, fingerprints, fingerprint_lengths
def build_feature_table(database, fpformat='ecfp10', chunk_size=None, solubility='high',
database_name=config.db_name, view=SequentialView()):
reader = FeatureReader(database_name, fpformat=fpformat, solubility=solubility)
chunk_size = math.ceil(chunk_size)
n_chunks = math.ceil(len(database) / chunk_size)
chunks = [((i - 1) * chunk_size, i * chunk_size) for i in range(1, n_chunks + 1)]
res = view.map(reader, chunks)
indices = np.ndarray((0, 1), dtype=INCHI_KEY_TYPE)
fingerprints = []
fingerprint_lengths = []
for r in res:
indices = np.concatenate([indices, r[0]])
fingerprints += r[1]
fingerprint_lengths += r[2]
return indices, fingerprints, fingerprint_lengths
def _build_nearest_neighbors_model(indices, features, lengths, n_models):
chunk_size = math.ceil(len(indices) / n_models)
chunks = [((i - 1) * chunk_size, i * chunk_size) for i in range(1, n_models + 1)]
models = []
for start, end in chunks:
models.append(NearestNeighbors(indices[start:end], features[start:end], lengths[start:end]))
return DistributedNearestNeighbors(models)
def build_nearest_neighbors_model(database, fpformat='fp4', solubility='high', n_models=5,
chunk_size=1e6, view=SequentialView):
"""
Loads a NN model.
If a 'default_model.pickle' exists in data it will load the model. Otherwise it will build a model from the
Database. This can take several hours depending on the size of the database.
Parameters
----------
database : marsi.io.mongodb.CollectionWrapper
A Database interface to the metabolites.
chunk_size : int
Maximum number of entries per chunk.
fpformat : str
The format of the fingerprint (see pybel.fps)
solubility : str
One of high, medium, low or all.
view : cameo.parallel.SequentialView, cameo.parallel.MultiprocesingView
A view to control parallelization.
n_models : int
The number of NearestNeighbors models.
"""
indices, features, lens = build_feature_table(database, fpformat=fpformat, chunk_size=chunk_size,
solubility=solubility, view=view)
return _build_nearest_neighbors_model(indices, features, lens, n_models)
def load_nearest_neighbors_model(chunk_size=1e6, fpformat="fp4", solubility='all', session=default_session,
view=SequentialView(), model_size=100000, source="db", costum_query=None):
"""
Loads a NN model.
If a 'default_model.pickle' exists in data it will load the model. Otherwise it will build a model from the
Database. This can take several hours depending on the size of the database.
Parameters
----------
chunk_size : int
Maximum number of entries per chunk.
fpformat : str
The format of the fingerprint (see pybel.fps)
solubility : str
One of high, medium, low or all.
view : cameo.parallel.SequentialView, cameo.parallel.MultiprocesingView
A view to control parallelization.
model_size : int
The size of each NearestNeighbor in the ensemble.
"""
if source == "file":
load_nearest_neighbors_model_from_file(chunk_size=chunk_size, fpformat=fpformat, solubility=solubility,
view=view, model_size=model_size)
else:
load_nearest_neighbors_model_from_db(fpformat=fpformat, solubility=solubility,
model_size=model_size, session=session, costum_query=costum_query)
def load_nearest_neighbors_model_from_db(fpformat="fp4", solubility='all', model_size=1000, session=default_session,
custom_query=None):
"""
Loads a NN model.
If a 'default_model.pickle' exists in data it will load the model. Otherwise it will build a model from the
Database. This can take several hours depending on the size of the database.
Parameters
----------
fpformat : str
The format of the fingerprint (see pybel.fps)
solubility : str
One of high, medium, low or all.
model_size : int
The size of each NearestNeighbor in the ensemble.
session : Session
SQLAlchemy session.
custom_query : ClauseElement
A query to filter elements from the database.
"""
if custom_query is not None:
indices = np.array(session.query(Metabolite.inchi_key).filter(custom_query).all(), dtype=INCHI_KEY_TYPE)
else:
indices = np.array(session.query(Metabolite.inchi_key).all(), dtype=INCHI_KEY_TYPE)
n_models = math.ceil(len(indices) / model_size)
chunk_size = math.ceil(len(indices) / n_models)
chunks = [((i - 1) * chunk_size, i * chunk_size) for i in range(1, n_models + 1)]
models = []
for start, end in chunks:
models.append(DBNearestNeighbors(indices[start:end], session, fpformat))
return DistributedNearestNeighbors(models)
def load_nearest_neighbors_model_from_file(chunk_size=1e6, fpformat="fp4", solubility='all',
view=SequentialView(), model_size=100000):
"""
Loads a NN model from file.
If a 'default_model.pickle' exists in data it will load the model. Otherwise it will build a model from the
Database. This can take several hours depending on the size of the database.
Parameters
----------
chunk_size : int
Maximum number of entries per chunk.
fpformat : str
The format of the fingerprint (see pybel.fps)
solubility : str
One of high, medium, low or all.
view : cameo.parallel.SequentialView, cameo.parallel.MultiprocesingView
A view to control parallelization.
model_size : int
The size of each NearestNeighbor in the ensemble.
"""
if solubility not in SOLUBILITY:
raise ValueError('%s not one of %s' % (solubility, ", ".join(SOLUBILITY.keys())))
model_file = MODEL_FILE % (fpformat, solubility)
if os.path.exists(model_file):
_indices, _features, _lengths = unpickle_large(model_file, progress=True)
else:
print("Building search model (fp: %s, solubility: %s)" % (fpformat, solubility))
_indices, _features, _lengths = build_feature_table(Database.metabolites,
chunk_size=chunk_size,
fpformat=fpformat,
solubility=solubility,
view=view)
pickle_large((_indices, _features, _lengths), model_file, progress=True)
n_models = math.ceil(len(_indices) / model_size)
nn_model = _build_nearest_neighbors_model(_indices, _features, _lengths, n_models)
return nn_model
class DataBuilder(multiprocessing.Process):
def __init__(self, inchi, task_queue, results_queue, atoms_weight, bonds_weight, timeout,
*args, **kwargs):
super(DataBuilder, self).__init__(*args, **kwargs)
self._inchi = inchi
self._session = None
self._tasks = task_queue
self._results = results_queue
self._atoms_weight = atoms_weight
self._bonds_weight = bonds_weight
self._timeout = timeout
@property
def session(self):
if self._session is None:
session_maker = sessionmaker(engine)
self._session = session_maker()
return self._session
@property
def molecule(self):
return rdkit.inchi_to_molecule(self._inchi)
def run(self):
while not self._tasks.empty():
try:
inchi_key, distance = self._tasks.get(block=False, timeout=10)
except Empty:
continue
else:
result = self.apply_similarity(inchi_key, distance)
self._results.put(result)
if self._session is not None:
self.session.close()
self.session.bind.dispose()
def apply_similarity(self, inchi_key, distance):
met = Metabolite.get(inchi_key=inchi_key, session=self.session)
try:
molecule = met.molecule('rdkit', get3d=False)
structural_similarity = rdkit.structural_similarity(self.molecule, molecule,
atoms_weight=self._atoms_weight,
bonds_weight=self._bonds_weight,
timeout=self._timeout)
return [inchi_key, met.formula, met.num_atoms, met.num_bonds, 1 - distance, structural_similarity]
except Exception as e:
print(e)
return None
def search_closest_compounds(molecule, nn_model=None, fp_cut=0.5, fpformat="maccs", atoms_diff=3,
bonds_diff=3, rings_diff=2, session=default_session,
atoms_weight=0.5, bonds_weight=0.5, timeout=120):
"""
Finds the closest compounds given a Molecule.
Parameters
----------
molecule : marsi.chemistry.molecule.Molecule
A molecule representation.
nn_model : marsi.nearest_neighbors.model.DistributedNearestNeighbors
A nearest neighbors model.
fp_cut : float
A cutoff value for fingerprint similarity.
fpformat : str
A valid fingerprint format.
atoms_diff : int
The max number of atoms that can be different (in number, not type).
bonds_diff : int
The max number of bonds that can be different (in number, not type).
rings_diff : int
The max number of rings that can be different (in number, not type).
session : Session
SQLAlchemy session.
atoms_weight : float
The weight of having matching atoms in the structural similarity
bonds_weight : float
The weight of having matching bonds in the structural similarity
Returns
-------
pandas.DataFrame
A data frame with the closest InChI Keys as index and the properties calculated for each hit.
"""
assert isinstance(molecule, Molecule)
if nn_model is None:
query = and_(Metabolite.num_atoms >= molecule.num_atoms - atoms_diff,
Metabolite.num_atoms <= molecule.num_atoms + atoms_diff,
Metabolite.num_bonds >= molecule.num_bonds - bonds_diff,
Metabolite.num_bonds <= molecule.num_bonds + bonds_diff,
Metabolite.num_rings >= molecule.num_rings - rings_diff,
Metabolite.num_rings <= molecule.num_rings + rings_diff)
nn_model = load_nearest_neighbors_model_from_db(fpformat=fpformat, custom_query=query, session=session)
assert isinstance(nn_model, DistributedNearestNeighbors)
neighbors = nn_model.radius_nearest_neighbors(molecule.fingerprint(fpformat), radius=1 - fp_cut)
if molecule.inchi_key in neighbors:
del neighbors[molecule.inchi_key]
dataframe = | DataFrame(columns=["formula", "atoms", "bonds", "tanimoto_similarity", "structural_score"]) | pandas.DataFrame |
import pandas as pd
import os
def get_oneday_data(machine_path, machine_id, day):
data = | pd.read_csv(machine_path, header=None) | pandas.read_csv |
from zipline.api import symbol
from zipline import run_algorithm
import pandas as pd
def validate_single_stock(ticker):
def init(context):
symbol(ticker)
def handle_data(context, data):
pass
start = pd.to_datetime("2017-01-09").tz_localize('US/Eastern')
end = | pd.to_datetime("2017-01-11") | pandas.to_datetime |
import openpyxl
import pandas as pd
from datetime import datetime, timedelta
import xlsxwriter
now = datetime.now()
date_time = now.strftime("%m_%d_%Y %I_%M_%p")
federal_tax_rate_path = "./federaltaxrates.csv"
state_tax_rate_path = "./statetaxrates.csv"
city_tax_rate_path = "./NYCtaxrates.csv"
# calculate social security tax
class EffectiveFederalTax:
def __init__(self, salary, marital_status):
self.salary = salary
self.marital_status = marital_status
def calculateSocialSecurityTaxDue(self):
if self.salary >= 147000:
return 9114
else:
return round(self.salary * 0.062, 2)
# calculate federal income tax + remainder of fica (medicare) for single filers
class EffectiveFederalTaxSingle(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "single")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table = pd.read_csv(federal_tax_rate_path)
federal_tax_bracket_tier = 0
single_income_column = federal_tax_rate_table.columns.get_loc("Single Income")
single_income_percentage_tax_column = federal_tax_rate_table.columns.get_loc("Single Tax Rate")
max_index = len(list(federal_tax_rate_table.index)) - 1
while federal_tax_bracket_tier <= max_index and \
int(federal_tax_rate_table.iloc[federal_tax_bracket_tier, single_income_column]) < \
(self.salary - self.deductions):
federal_tax_bracket_tier += 1
federal_tax_bracket_tier -= 1
federal_tax_due = 0
counter = 0
while counter <= federal_tax_bracket_tier - 1:
federal_tax_due += (federal_tax_rate_table.iloc[counter + 1, single_income_column]
- federal_tax_rate_table.iloc[counter, single_income_column])\
* (float((federal_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - federal_tax_rate_table.iloc[federal_tax_bracket_tier,
single_income_column]) \
* (float((federal_tax_rate_table.iloc[federal_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
federal_tax_due += marginal_tax_due
return round(federal_tax_due, 2)
def calculateMedicareTaxDue(self):
if self.salary <= 200000:
return round(self.salary * 0.0145, 2)
else:
return round(self.salary * 0.0145 + (self.salary - 200000) * 0.009, 2)
def calculateTotalFederalTaxesDue(self):
return self.calculateSocialSecurityTaxDue() + self.calculateFederalIncomeTaxDue() \
+ self.calculateMedicareTaxDue()
# calculate federal income tax + remainder of fica (medicare) for married filers
class EffectiveFederalTaxMarried(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "Married")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table = pd.read_csv(federal_tax_rate_path)
federal_tax_bracket_tier = 0
married_income_column = federal_tax_rate_table.columns.get_loc("Married Income")
married_income_percentage_tax_column = federal_tax_rate_table.columns.get_loc("Married Tax Rate")
max_index = len(list(federal_tax_rate_table.index)) - 1
while federal_tax_bracket_tier <= max_index and \
int(federal_tax_rate_table.iloc[federal_tax_bracket_tier, married_income_column]) \
< (self.salary - self.deductions):
federal_tax_bracket_tier += 1
federal_tax_bracket_tier -= 1
federal_tax_due = 0
counter = 0
while counter <= federal_tax_bracket_tier - 1:
federal_tax_due += (federal_tax_rate_table.iloc[counter + 1, married_income_column]
- federal_tax_rate_table.iloc[counter, married_income_column])\
* (float((federal_tax_rate_table.iloc[counter, married_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - federal_tax_rate_table.iloc[federal_tax_bracket_tier,
married_income_column])\
* (float((federal_tax_rate_table.iloc[federal_tax_bracket_tier,
married_income_percentage_tax_column]).strip("%")) / 100)
federal_tax_due += marginal_tax_due
return round(federal_tax_due, 2)
def calculateMedicareTaxDue(self):
if self.salary <= 250000:
return round(self.salary * 0.0145, 2)
else:
return round(self.salary * 0.0145 + (self.salary - 250000) * 0.009, 2)
def calculateTotalFederalTaxesDue(self):
return self.calculateSocialSecurityTaxDue() + self.calculateFederalIncomeTaxDue() \
+ self.calculateMedicareTaxDue()
class EffectiveStateTax:
def __init__(self, salary, state, marital_status):
self.salary = salary
self.state = state
self.marital_status = marital_status
# calculate state income tax for single filers
class EffectiveStateTaxSingle(EffectiveStateTax):
def __init__(self, salary, state, deductions):
super().__init__(salary, state, "single")
self.deductions = deductions
def calculateStateIncomeTaxDue(self):
state_tax_rate_table = pd.read_csv(state_tax_rate_path)
my_state_tax_rate_table = state_tax_rate_table.loc[state_tax_rate_table["State"] == str(self.state)]
single_income_column = my_state_tax_rate_table.columns.get_loc("Single Filer Brackets")
single_income_percentage_tax_column = my_state_tax_rate_table.columns.get_loc("Single Filer Rates")
max_index = my_state_tax_rate_table["Single Filer Rates"].notnull().sum() - 1
if my_state_tax_rate_table.iloc[max_index, single_income_percentage_tax_column] == "none":
return 0
state_tax_bracket_tier = 0
while state_tax_bracket_tier <= max_index and \
int(my_state_tax_rate_table.iloc[state_tax_bracket_tier, single_income_column]) \
< (self.salary - self.deductions):
state_tax_bracket_tier += 1
state_tax_bracket_tier -= 1
state_tax_due = 0
counter = 0
while counter <= state_tax_bracket_tier - 1:
state_tax_due += (my_state_tax_rate_table.iloc[counter + 1, single_income_column]
- my_state_tax_rate_table.iloc[counter, single_income_column])\
* (float((my_state_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - my_state_tax_rate_table.iloc[state_tax_bracket_tier,
single_income_column])\
* (float((my_state_tax_rate_table.iloc[state_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
state_tax_due += marginal_tax_due
return (round(state_tax_due, 2))
# calculate state income tax for married filers
class EffectiveStateTaxMarried(EffectiveStateTax):
def __init__(self, salary, state, deductions):
super().__init__(salary, state, "married")
self.deductions = deductions
def calculateStateIncomeTaxDue(self):
state_tax_rate_table = pd.read_csv(state_tax_rate_path)
my_state_tax_rate_table = state_tax_rate_table.loc[state_tax_rate_table["State"] == str(self.state)]
married_income_column = my_state_tax_rate_table.columns.get_loc("Married Filing Jointly Brackets")
married_income_percentage_tax_column = my_state_tax_rate_table.columns.get_loc("Married Filing Jointly Rates")
max_index = my_state_tax_rate_table["Married Filing Jointly Rates"].notnull().sum() - 1
if my_state_tax_rate_table.iloc[max_index, married_income_percentage_tax_column] == "none":
return 0
state_tax_bracket_tier = 0
while state_tax_bracket_tier <= max_index and \
int(my_state_tax_rate_table.iloc[state_tax_bracket_tier, married_income_column]) \
< (self.salary - self.deductions):
state_tax_bracket_tier += 1
state_tax_bracket_tier -= 1
state_tax_due = 0
counter = 0
while counter <= state_tax_bracket_tier - 1:
state_tax_due += (my_state_tax_rate_table.iloc[counter + 1, married_income_column]
- my_state_tax_rate_table.iloc[counter, married_income_column])\
* (float((my_state_tax_rate_table.iloc[counter, married_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - my_state_tax_rate_table.iloc[state_tax_bracket_tier,
married_income_column])\
* (float((my_state_tax_rate_table.iloc[state_tax_bracket_tier,
married_income_percentage_tax_column]).strip("%")) / 100)
state_tax_due += marginal_tax_due
return (round(state_tax_due, 2))
class EffectiveCityTax:
def __init__(self, salary, city, marital_status):
self.salary = salary
self.city = city
self.marital_status = marital_status
# calculate city income tax for single filers
class EffectiveCityTaxSingle(EffectiveCityTax):
def __init__(self, salary, city, deductions):
super().__init__(salary, city, "single")
self.deductions = deductions
def calculateCityIncomeTaxDue(self):
city_tax_rate_table = pd.read_csv(city_tax_rate_path)
city_tax_bracket_tier = 0
single_income_column = city_tax_rate_table.columns.get_loc("Single Income")
single_income_percentage_tax_column = city_tax_rate_table.columns.get_loc("Single Tax Rate")
max_index = len(list(city_tax_rate_table.index)) - 1
while city_tax_bracket_tier <= max_index and \
int(city_tax_rate_table.iloc[city_tax_bracket_tier, single_income_column]) < (self.salary - self.deductions):
city_tax_bracket_tier += 1
city_tax_bracket_tier -= 1
city_tax_due = 0
counter = 0
while counter <= city_tax_bracket_tier - 1:
city_tax_due += (city_tax_rate_table.iloc[counter + 1, single_income_column]
- city_tax_rate_table.iloc[counter, single_income_column]) \
* (float((city_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - city_tax_rate_table.iloc[city_tax_bracket_tier,
single_income_column]) \
* (float((city_tax_rate_table.iloc[city_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
city_tax_due += marginal_tax_due
return round(city_tax_due, 2)
# calculate city income tax for married filers
class EffectiveCityTaxMarried(EffectiveCityTax):
def __init__(self, salary, city, deductions):
super().__init__(salary, city, "married")
self.deductions = deductions
def calculateCityIncomeTaxDue(self):
city_tax_rate_table = pd.read_csv(city_tax_rate_path)
city_tax_bracket_tier = 0
married_income_column = city_tax_rate_table.columns.get_loc("Married Income")
married_income_percentage_tax_column = city_tax_rate_table.columns.get_loc("Married Tax Rate")
max_index = len(list(city_tax_rate_table.index)) - 1
while city_tax_bracket_tier <= max_index and \
int(city_tax_rate_table.iloc[city_tax_bracket_tier, married_income_column]) < (self.salary - self.deductions):
city_tax_bracket_tier += 1
city_tax_bracket_tier -= 1
city_tax_due = 0
counter = 0
while counter <= city_tax_bracket_tier - 1:
city_tax_due += (city_tax_rate_table.iloc[counter + 1, married_income_column]
- city_tax_rate_table.iloc[counter, married_income_column]) \
* (float((city_tax_rate_table.iloc[counter, married_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - city_tax_rate_table.iloc[city_tax_bracket_tier,
married_income_column]) \
* (float((city_tax_rate_table.iloc[city_tax_bracket_tier,
married_income_percentage_tax_column]).strip("%")) / 100)
city_tax_due += marginal_tax_due
return round(city_tax_due, 2)
# calculate effective tax rate from the classes/inheritance structure we have created
class TaxProfile:
def __init__(self, my_salary, spouse_salary, marital_status, state, city, federal_deductions, state_deductions,
city_deductions):
self.my_salary = my_salary
self.spouse_salary = spouse_salary
self.salary = my_salary + spouse_salary
self.marital_status = marital_status
self.state = state
self.city = city
self.federal_deductions = federal_deductions
self.state_deductions = state_deductions
self.city_deductions = city_deductions
def createTaxAnalysisWorkBook(self):
workbook = xlsxwriter.Workbook("./" + "Tax_Analysis_" + str(date_time) + ".xlsx")
workbook.close()
assumptions_table = [["salary", "{:,.2f}".format(self.salary)], ["marital_status", str(self.marital_status)],
["state", str(self.state)],
["city", str(self.city)],
["federal_deductions", "{:,.2f}".format(self.federal_deductions)],
["state_deductions", "{:,.2f}".format(self.state_deductions)],
["city_deductions", "{:,.2f}".format(self.city_deductions)]]
assumption_columns = ["Field", "Assumption"]
assumptions = pd.DataFrame(assumptions_table, columns=assumption_columns)
path = "./" + "Tax_Analysis_" + str(date_time) + ".xlsx"
with pd.ExcelWriter(str(path), engine="openpyxl", mode="a", if_sheet_exists="replace") as writer:
assumptions.to_excel(writer, sheet_name="Sheet1")
# calculate tax rate/breakdown for federal, state, and city and (if flag is not set to "please just return
# effective tax rate") export to workbook
def taxAnalytics(self, flag):
if self.marital_status == "married":
my_social_security_tax_object = EffectiveFederalTax(self.my_salary, "married")
spouse_social_security_tax_object = EffectiveFederalTax(self.spouse_salary, "married")
social_security_tax_objects = [my_social_security_tax_object, spouse_social_security_tax_object]
federal_tax_object = EffectiveFederalTaxMarried(self.salary, self.federal_deductions)
state_tax_object = EffectiveStateTaxMarried(self.salary, self.state, self.state_deductions)
city_tax_object = EffectiveCityTaxMarried(self.salary, self.city, self.city_deductions)
else:
my_social_security_tax_object = EffectiveFederalTax(self.my_salary, "single")
social_security_tax_objects = [my_social_security_tax_object]
federal_tax_object = EffectiveFederalTaxSingle(self.salary, self.federal_deductions)
state_tax_object = EffectiveStateTaxSingle(self.salary, self.state, self.state_deductions)
city_tax_object = EffectiveCityTaxSingle(self.salary, self.city, self.city_deductions)
social_security_tax_amount = 0
for social_security_tax_object in social_security_tax_objects:
social_security_tax_amount += social_security_tax_object.calculateSocialSecurityTaxDue()
medicare_tax_amount = federal_tax_object.calculateMedicareTaxDue()
fica_tax_amount = social_security_tax_amount + medicare_tax_amount
federal_income_tax_amount = federal_tax_object.calculateFederalIncomeTaxDue()
total_federal_tax_amount = round(social_security_tax_amount + medicare_tax_amount + federal_income_tax_amount,
2)
state_income_tax_amount = state_tax_object.calculateStateIncomeTaxDue()
if self.city == "New York City":
city_income_tax_amount = city_tax_object.calculateCityIncomeTaxDue()
else:
city_income_tax_amount = 0
total_tax_amount = total_federal_tax_amount + state_income_tax_amount + city_income_tax_amount
if flag == "please just return effective tax rate":
return total_tax_amount / self.salary
analytics_table = [["total tax paid", "{:,.2f}".format(total_tax_amount)],
["effective tax rate", str("{:.2%}".format(total_tax_amount / self.salary))],
["total federal tax paid", "{:,.2f}".format(total_federal_tax_amount)],
["effective federal tax rate", str("{:.2%}".format(total_federal_tax_amount /
self.salary))],
["total fica tax paid", "{:,.2f}".format(fica_tax_amount)],
["fica tax rate", str("{:.2%}".format(fica_tax_amount / self.salary))],
["federal income tax paid", "{:,.2f}".format(federal_income_tax_amount)],
["effective federal income tax rate", str("{:.2%}".format(federal_income_tax_amount
/ self.salary))],
["total state income tax paid", "{:,.2f}".format(state_income_tax_amount)],
["effective state income tax rate", str("{:.2%}".format(state_income_tax_amount
/ self.salary))],
["total city income tax paid", "{:,.2f}".format(city_income_tax_amount)],
["effective city income tax rate", str("{:.2%}".format(city_income_tax_amount
/ self.salary))]]
self.createTaxAnalysisWorkBook()
dataframe = | pd.DataFrame(analytics_table) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from datetime import datetime
from sys import stdout
from sklearn.preprocessing import scale
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel
from sklearn.utils.validation import indexable
from sklearn.model_selection import check_cv
from sklearn.metrics.scorer import check_scoring
from sklearn.model_selection._validation import _fit_and_score
from sklearn.externals.joblib import Parallel, delayed
def compute_features():
# Load json data
with open('json_file.json') as data_file:
patients = json.load(data_file)
print("JSON file loaded")
# Features computation
print("Features computation launched")
visits = []
for patient in patients.values():
for i in range(1, len(patient['visits']) + 1):
visits.append(patient['visits'][str(i)])
n_visits = len(visits)
print("n_visits = %s" % n_visits)
# Features DataFrame with encounter_nums index
encounter_nums = [int(visit.get('encounter_num')) for visit in visits]
X = pd.DataFrame(index=encounter_nums)
# Time vector & censoring indicator
print("Adding labels...", end="")
next_visit = [visit.get('next_visit') for visit in visits]
T = np.array([1e10 if str(t) == 'none' else t for t in next_visit]).astype(
int)
end_dates = pd.to_datetime([visit.get('end_date') for visit in visits])
C = pd.to_datetime('2016-01-15 00:00:00') - end_dates
days, seconds = C.days, C.seconds
C = days * 24 + seconds // 3600 # in hours (discrete)
delta = (T <= C).astype(int)
Y = T
Y[delta == 0] = C[delta == 0]
labels = pd.DataFrame({'Y': Y, 'delta': delta}, index=encounter_nums)
X = pd.concat([X, labels], axis=1)
print(" done")
# Basic features
print("Adding basic features...", end="")
# Add also patient_num & encounter_num for future random choice
patient_num, encounter_num = [], []
sex, baseline_HB, genotype_SS, age, transfu_count = [], [], [], [], []
LS_ALONE, LS_INACTIVE, MH_ACS, MH_AVN, MH_DIALISIS = [], [], [], [], []
MH_HEART_FAILURE, MH_ISCHEMIC_STROKE, MH_LEG_ULCER = [], [], []
MH_NEPHROPATHY, MH_PHTN, MH_PRIAPISM, MH_RETINOPATHY = [], [], [], []
OPIOID_TO_DISCHARGE, ORAL_OPIOID, USED_MORPHINE = [], [], []
USED_OXYCODONE, duration, previous_visit, rea = [], [], [], []
for patient in patients.values():
for _ in range(1, len(patient['visits']) + 1):
patient_num.append(patient['patient_num'])
sex.append(1 if int(patient['sex']) == 1 else 0)
baseline_HB.append(patient['baseline_HB'])
genotype_SS.append(patient['genotype_SS'])
for visit in visits:
encounter_num.append(visit.get('encounter_num'))
age.append(visit.get('age'))
rea.append(visit.get('rea'))
LS_ALONE.append(visit.get('LS_ALONE'))
LS_INACTIVE.append(visit.get('LS_INACTIVE'))
MH_ACS.append(visit.get('MH_ACS'))
MH_AVN.append(visit.get('MH_AVN'))
MH_DIALISIS.append(visit.get('MH_DIALISIS'))
MH_HEART_FAILURE.append(visit.get('MH_HEART_FAILURE'))
MH_ISCHEMIC_STROKE.append(visit.get('MH_ISCHEMIC_STROKE'))
MH_LEG_ULCER.append(visit.get('MH_LEG_ULCER'))
MH_NEPHROPATHY.append(visit.get('MH_NEPHROPATHY'))
MH_PHTN.append(visit.get('MH_PHTN'))
MH_PRIAPISM.append(visit.get('MH_PRIAPISM'))
MH_RETINOPATHY.append(visit.get('MH_RETINOPATHY'))
ORAL_OPIOID.append(visit.get('ORAL_OPIOID'))
USED_MORPHINE.append(visit.get('USED_MORPHINE'))
USED_OXYCODONE.append(visit.get('USED_OXYCODONE'))
duration.append(visit.get('duration'))
previous_visit.append(visit.get('previous_visit'))
transfu_count.append(visit.get('transfu_count'))
threshold = 24 * 30 * 18 # 18 months
previous_visit = [0 if (t == 'none' or t > threshold) else 1 for t in
previous_visit]
MH_ACS = [1 if int(x) == 2 else x for x in MH_ACS]
MH_AVN = [1 if int(x) == 2 else x for x in MH_AVN]
MH_DIALISIS = [1 if int(x) == 2 else x for x in MH_DIALISIS]
MH_HEART_FAILURE = [1 if int(x) == 2 else x for x in MH_HEART_FAILURE]
MH_ISCHEMIC_STROKE = [1 if int(x) == 2 else x for x in MH_ISCHEMIC_STROKE]
MH_LEG_ULCER = [1 if int(x) == 2 else x for x in MH_LEG_ULCER]
MH_NEPHROPATHY = [1 if int(x) == 2 else x for x in MH_NEPHROPATHY]
MH_PHTN = [1 if int(x) == 2 else x for x in MH_PHTN]
MH_PRIAPISM = [1 if int(x) == 2 else x for x in MH_PRIAPISM]
MH_RETINOPATHY = [1 if int(x) == 2 else x for x in MH_RETINOPATHY]
X_basic = pd.DataFrame(
{'patient_num': patient_num, 'encounter_num': encounter_num, 'sex': sex,
'genotype_SS': genotype_SS, 'age': age, 'rea': rea,
'LS_INACTIVE': LS_INACTIVE, 'MH_ACS': MH_ACS, 'MH_AVN': MH_AVN,
'MH_DIALISIS': MH_DIALISIS, 'MH_HEART_FAILURE': MH_HEART_FAILURE,
'MH_ISCHEMIC_STROKE': MH_ISCHEMIC_STROKE,
'MH_LEG_ULCER': MH_LEG_ULCER, 'LS_ALONE': LS_ALONE,
'MH_NEPHROPATHY': MH_NEPHROPATHY, 'MH_PHTN': MH_PHTN,
'MH_PRIAPISM': MH_PRIAPISM, 'MH_RETINOPATHY': MH_RETINOPATHY,
'ORAL_OPIOID': ORAL_OPIOID, 'baseline_HB': baseline_HB,
'USED_MORPHINE': USED_MORPHINE, 'USED_OXYCODONE': USED_OXYCODONE,
'duration': duration, 'previous_visit': previous_visit,
'transfu_count': transfu_count},
index=encounter_nums)
X = pd.concat([X, X_basic], axis=1)
print(" done")
# Bio data
print("Adding bio features...", end="")
bio_data, bio_names = pd.DataFrame(), []
for visit in visits:
encounter_num = int(visit.get('encounter_num'))
tmp = pd.DataFrame(index=[encounter_num])
end_date = pd.to_datetime(visit.get('end_date'))
for bio_name, bio_values in visit.get('bio').items():
# keep last value
bio_names.append(bio_name)
values = [val['nval_num'] for val in bio_values.values()]
tmp[bio_name] = values[-1]
# only keep last 48h values
offset = end_date - pd.DateOffset(hours=48)
values, index = [], []
for dic in bio_values.values():
val_time = pd.to_datetime(dic['date_bio'])
if val_time > offset:
values.append(float(dic['nval_num']))
index.append(float(
(val_time - offset) / pd.Timedelta(
'1 hour')))
# if at least 2 pts, add slope
if len(values) > 1:
x, y = index, values
# least-squares
A = np.vstack([np.array(x), np.ones(len(x))]).T
slope, _ = np.linalg.lstsq(A, y)[0]
else:
slope = np.nan
bio_names.append(bio_name + ' slope')
tmp[bio_name + ' slope'] = slope
bio_data = bio_data.append(tmp)
bio_names_count = pd.Series(
bio_names).value_counts() * 100 / n_visits
bio_percentage = 35
bio_param_kept = bio_names_count[bio_names_count > bio_percentage]
bio_data = bio_data[bio_param_kept.index]
print(" done")
X = | pd.concat([X, bio_data], axis=1) | pandas.concat |
import multiprocessing
import os
from queue import Queue
from typing import List
from injector import inject
import pandas as pd
from pandas import DataFrame
from domain.operation.execution.services.OperationCacheService import OperationCacheService
from infrastructor.connection.adapters.ConnectionAdapter import ConnectionAdapter
from infrastructor.connection.file.FileProvider import FileProvider
from infrastructor.exceptions.NotSupportedFeatureException import NotSupportedFeatureException
from infrastructor.logging.SqlLogger import SqlLogger
from models.dto.PagingModifier import PagingModifier
class FileAdapter(ConnectionAdapter):
@inject
def __init__(self,
sql_logger: SqlLogger,
file_provider: FileProvider,
operation_cache_service: OperationCacheService,
):
self.operation_cache_service = operation_cache_service
self.sql_logger = sql_logger
self.file_provider = file_provider
def clear_data(self, data_integration_id) -> int:
target_connection = self.operation_cache_service.get_target_connection(
data_integration_id=data_integration_id)
target_context = self.file_provider.get_context(
connection=target_connection.Connection)
data_integration_columns = self.operation_cache_service.get_columns_by_integration_id(
data_integration_id=data_integration_id)
file_path = os.path.join(target_connection.File.Folder, target_connection.File.FileName)
if target_connection.File.Csv.HasHeader:
if target_connection.File.Csv.Header is not None and target_connection.File.Csv.Header != '':
headers = target_connection.File.Csv.Header.split(target_connection.File.Csv.Separator)
else:
headers = [(data_integration_column.TargetColumnName) for data_integration_column in
data_integration_columns]
truncate_affected_rowcount = target_context.recreate_file(
file=file_path, headers=headers,
separator=target_connection.File.Csv.Separator)
else:
truncate_affected_rowcount = target_context.delete_file(
file=file_path)
return truncate_affected_rowcount
def get_source_data_count(self, data_integration_id) -> int:
return -1
def start_source_data_operation(self,
data_integration_id: int,
data_operation_job_execution_integration_id: int,
limit: int,
process_count: int,
data_queue: Queue,
data_result_queue: Queue):
source_connection = self.operation_cache_service.get_source_connection(
data_integration_id=data_integration_id)
source_context = self.file_provider.get_context(connection=source_connection.Connection)
has_header = None
if source_connection.File.Csv.HasHeader:
has_header = 0
headers = None
separator = source_connection.File.Csv.Separator
if source_connection.File.Csv.Header is not None and source_connection.File.Csv.Header != '':
headers = source_connection.File.Csv.Header.split(separator)
if source_connection.File.FileName is not None and source_connection.File.FileName != '':
file_path = source_context.get_file_path(folder_name=source_connection.File.Folder,
file_name=source_connection.File.FileName)
source_context.get_unpredicted_data(file=file_path,
names=headers,
header=has_header,
separator=separator,
limit=limit,
process_count=process_count,
data_queue=data_queue,
result_queue=data_result_queue)
else:
csv_files = source_context.get_all_files(folder_name=source_connection.File.Folder, file_regex='(.*csv$)')
for csv_file in csv_files:
self.sql_logger.info(f"file read started. FilePath:{csv_file} ")
source_context.get_unpredicted_data(file=csv_file,
names=headers,
header=has_header,
separator=separator,
limit=limit,
process_count=process_count,
data_queue=data_queue,
result_queue=data_result_queue)
def get_source_data(self, data_integration_id: int, paging_modifier: PagingModifier) -> DataFrame:
source_connection = self.operation_cache_service.get_source_connection(
data_integration_id=data_integration_id)
source_context = self.file_provider.get_context(connection=source_connection.Connection)
data_integration_columns = self.operation_cache_service.get_columns_by_integration_id(
data_integration_id=data_integration_id)
has_header = None
if source_connection.File.Csv.HasHeader:
has_header = 0
headers = None
if source_connection.File.Csv.Header is not None and source_connection.File.Csv.Header != '':
headers = source_connection.File.Csv.Header.split(source_connection.File.Csv.Separator)
file_path = os.path.join(source_connection.File.Folder, source_connection.File.FileName)
readed_data = source_context.get_data(file=file_path,
names=headers,
header=has_header,
start=paging_modifier.Start,
limit=paging_modifier.Limit,
separator=source_connection.File.Csv.Separator)
column_names = [(data_integration_column.SourceColumnName) for data_integration_column in
data_integration_columns]
data = readed_data[column_names]
replaced_data = data.where(pd.notnull(data), None)
return replaced_data.values.tolist()
def prepare_insert_row(self, data, columns):
insert_rows = []
for extracted_data in data:
row = []
for column in columns:
column_data = extracted_data[column]
row.append(column_data)
insert_rows.append(tuple(row))
return insert_rows
def prepare_data(self, data_integration_id: int, source_data: DataFrame) -> List[any]:
data_integration_columns = self.operation_cache_service.get_columns_by_integration_id(
data_integration_id=data_integration_id)
source_columns = [(data_integration_column.SourceColumnName) for data_integration_column in
data_integration_columns]
if isinstance(source_data, pd.DataFrame):
data = source_data[source_columns]
prepared_data = data.values.tolist()
else:
prepared_data = self.prepare_insert_row(data=source_data, columns=source_columns)
return prepared_data
def write_target_data(self, data_integration_id: int, prepared_data: List[any], ) -> int:
target_connection = self.operation_cache_service.get_target_connection(
data_integration_id=data_integration_id)
target_context = self.file_provider.get_context(connection=target_connection.Connection)
df = pd.DataFrame(prepared_data)
data = df.where( | pd.notnull(df) | pandas.notnull |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 = | pd.merge(df1, df_2012, on='hospid') | pandas.merge |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
*.py: Description of what * does.
Last Modified:
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.0.1"
# import gevent
from .dbManager import SQLiteWrapper, MongoDBWrapper
import pandas as pd
from . import GeoPoint, encode, blacklist, loc_default, isempty
# from . import loc_default, blacklist
# from . import isempty
from pylru import lrudecorator
import logging
log = logging.getLogger("rssgeocoder")
class BaseGazetteer(object):
"""
Base Gazetteer class
"""
def __init__(self):
pass
def query(self, name, absoluteMatch=False):
"""
search for name
"""
pass
class GeoNames(BaseGazetteer):
def __init__(self, dbpath, priority=None):
self.db = SQLiteWrapper(dbpath)
if priority is None:
# TODO: Define default priority
self.priority = None
else:
self.priority = priority
@lrudecorator(10000)
def query(self, name, min_popln=0):
"""
Search the locations DB for the given name
params:
name - string
min_popln - integer
return:
list of possible locations the input string refers to
"""
if name in loc_default:
name = loc_default[name]
if name in blacklist:
return []
country = self._querycountry(name)
#admin = self._querystate(name)
#city = self._querycity(name, min_popln=min_popln)
#alternateNames = self._query_alternatenames(name, min_popln)
if country == []:
admin = self._querystate(name)
city = self._querycity(name, min_popln=min_popln)
alternateNames = self._query_alternatenames(name, min_popln)
#g1 = gevent.spawn(self._querystate, name)
#g2 = gevent.spawn(self._querycity, name, min_popln=min_popln)
#g3 = gevent.spawn(self._query_alternatenames, name, min_popln)
#gevent.joinall([g1, g2, g3])
#admin, city, alternateNames = g1.value, g2.value, g3.value
else:
admin, city, alternateNames = [], [], []
ldist = (city + country + admin + alternateNames)
if ldist == [] and "'" in name:
log.info('splitting location name on quote mark-{}'.format(encode(name)))
ldist = self._query_alternatenames(name.split("'", 1)[0])
if ldist != []:
df = | pd.DataFrame([i.__dict__ for i in ldist]) | pandas.DataFrame |
from netCDF4 import Dataset
import pandas as pd
import numpy as np
ncep_path = '/SubX/forecast/tas2m/daily/full/NCEP-CFSv2/' # the path where the raw data from NCEP-CFSv2 is saved
gmao_path = '/SubX/forecast/tas2m/daily/full/GMAO-GEOS_V2p1/'
for model in ['NCEP', 'GMAO']:
if model == 'NCEP':
path = ncep_path
NCEP_date = []
for date in | pd.date_range('2017-07-01', '2019-12-31') | pandas.date_range |
import argparse
import math
import json
from tqdm import tqdm
from nltk.tag import pos_tag
import pandas as pd
import networkx as nx
import torch
import config
def get_relevant_tokens(word_count_path, threshold):
d = pd.read_csv(word_count_path, sep='\t', header=None, quotechar=None, quoting=3)
d.columns = ['token', 'count']
d = d.loc[d['count'] > threshold]
return d.token.tolist()
def prune_dt(input_dt_edges_path, relevant_tokens, output_dt_edges_path):
d = pd.read_csv(input_dt_edges_path, sep='\t', header=None, quotechar=None, quoting=3)
d.columns = ['word1', 'word2', 'weight']
d = d.loc[d['word1'].isin(relevant_tokens) & d['word2'].isin(relevant_tokens)]
d.to_csv(output_dt_edges_path, sep='\t', index=False, header=None, quotechar=None, quoting=3)
def update_POS_tags(input_DT_path, output_DT_path):
d = pd.read_csv(input_DT_path, sep='\t', header=None, quotechar=None, quoting=3)
d.columns = ['word1', 'word2', 'weight']
def replace_POS(e):
# https://cs.nyu.edu/grishman/jet/guide/PennPOS.html
d = {'NP': 'NNP', 'NPS': 'NNPS', 'PP': 'PRP', 'PP$': 'PRP$'}
word, pos = e.rsplit(config.DT_token_pos_delimiter, 1)
if(pos in d.keys()):
return f'{word}{config.DT_token_pos_delimiter}{d[pos]}'
else:
return f'{word}{config.DT_token_pos_delimiter}{pos}'
d.word1 = d.word1.apply(lambda x: replace_POS(x))
d.word2 = d.word2.apply(lambda x: replace_POS(x))
d.to_csv(output_DT_path, sep='\t', index=False, header=None, quotechar=None, quoting=3)
def load_DT(DT_edges_path=config.prune_DT_edges_path):
df = pd.read_csv(DT_edges_path, header=None, sep='\t', quotechar=None, quoting=3)
df.columns = ['word1', 'word2', 'weight']
G = nx.from_pandas_edgelist(df, 'word1', 'word2', 'weight')
print('Loaded the DT networkx graph')
return G
def edge_weight_u_v(DT_G, node1, node2):
try:
# ensure that shortest path over self-loops are not computed
shortest_path_length = nx.algorithms.shortest_paths.generic.shortest_path_length(G=DT_G, source=node1, target=node2, weight=None)
score = math.exp((-1) * (config.path_lambda) * (shortest_path_length - 1))
path_exists = True
except nx.exception.NodeNotFound:
score = -1
path_exists = False
except nx.exception.NetworkXNoPath:
score = -1
path_exists = False
return path_exists, score
def setup_graph_edges(DT_G, sentence):
space_tokenized_sentence = sentence.split()
if(config.is_en):
pos_tagged_space_tokenized_sentence = [token + config.DT_token_pos_delimiter + tag for (token, tag) in pos_tag(space_tokenized_sentence)]
else:
# no POS tagger used in the non english DT
pos_tagged_space_tokenized_sentence = space_tokenized_sentence
assert(len(pos_tagged_space_tokenized_sentence) == len(space_tokenized_sentence))
# to ensure that every graph has edges - setup the mandatory self-loops
_edge_index = [[i, i] for i in range(len(space_tokenized_sentence))]
_edge_attr = [[1] for _ in _edge_index]
for i in range(len(space_tokenized_sentence)):
for j in range(i+1, len(space_tokenized_sentence)):
assert(i != j)
path_exists, edge_weight = edge_weight_u_v(DT_G, pos_tagged_space_tokenized_sentence[i], pos_tagged_space_tokenized_sentence[j])
if(path_exists):
_edge_index.append([i, j])
_edge_attr.append([edge_weight])
_edge_index.append([j, i])
_edge_attr.append([edge_weight])
edge_index = torch.LongTensor(_edge_index).to(config.device)
edge_index = torch.transpose(edge_index, 0, 1)
# shape(edge_index) = [2, num_edges]
edge_attr = torch.FloatTensor(_edge_attr).to(config.device)
# shape(edge_attr) = [num_edges, 1]
return edge_index, edge_attr
def get_sentences_encoded_dict(tokenizer, sentences, max_length):
assert(len(sentences) == 1 or len(sentences) == 2)
if(len(sentences) == 1):
encoded_dict = tokenizer.encode_plus(sentences[0], add_special_tokens=True, max_length=max_length, truncation=True, padding='max_length', return_attention_mask=True, return_tensors='pt')
elif(len(sentences) == 2):
encoded_dict = tokenizer.encode_plus(sentences[0], sentences[1], add_special_tokens=True, max_length=max_length, truncation=True, padding='max_length', return_attention_mask=True, return_tensors='pt')
input_ids = encoded_dict['input_ids'][0].to(config.device)
if(config.lm_model_name.startswith('roberta') or config.lm_model_name.startswith('xlm-roberta')):
token_type_ids = torch.zeros_like(input_ids)
else:
token_type_ids = encoded_dict['token_type_ids'][0].to(config.device)
attention_mask = encoded_dict['attention_mask'][0].to(config.device)
return input_ids, token_type_ids, attention_mask
def get_label_embedding(label, label_dict):
assert(label in label_dict)
vec = torch.zeros(len(label_dict), dtype=torch.float, device=config.device)
vec[label_dict[label]] = 1
vec = torch.unsqueeze(vec, 0)
# shape(vec) = [1, len(label_dict)]
return vec
def get_score_embedding(score):
vec = torch.tensor([score], dtype=torch.float).unsqueeze(0).to(config.device)
# shape(vec) = [1, 1]
return vec
def get_WiC_data_frame(WiC_data_path, WiC_gold_path):
df_data = pd.read_csv(WiC_data_path, header=None, sep='\t')
df_gold = pd.read_csv(WiC_gold_path, header=None, sep='\t')
df = pd.concat([df_data, df_gold], axis=1, sort=False)
df.columns = ['target', 'pos', 'indices', 'context_1', 'context_2', 'label']
print('Loaded the WiC dataset')
return df
def get_RTE_data_frame(RTE_data):
df = pd.read_csv(RTE_data, sep='\t')
print(f'RTE dataframe loaded from {RTE_data}')
return df
def get_STS_B_data_frame(STS_B_data, columns=config.STS_B_columns):
# Loader adapted from https://colab.research.google.com/github/hybridnlp/tutorial/blob/master/07a_semantic_claim_search.ipynb
rows = []
print(f'Loading STS_B dataset from {STS_B_data}')
with open(STS_B_data, mode='r', encoding='utf') as f:
lines = f.readlines()
print(f'Reading {len(lines)} lines from {STS_B_data}')
for lnr, line in enumerate(lines):
cols = line.split('\t')
assert len(cols) >= 7, 'line %s has %s columns instead of %s:\n\t%s' % (
lnr, len(cols), 7, "\n\t".join(cols)
)
cols = cols[:7]
assert len(cols) == 7
rows.append(cols)
df = pd.DataFrame(rows, columns=columns)
df.sent_a = df.sent_a.str.strip()
df.sent_b = df.sent_b.str.strip()
# score is read as a string, so add a copy with correct type
df['score_f'] = df['score'].astype('float64')
return df
def get_MRPC_data_frame(MRPC_data, columns=config.MRPC_columns):
# Loader adapted from https://colab.research.google.com/github/hybridnlp/tutorial/blob/master/07a_semantic_claim_search.ipynb
rows = []
print(f'Loading MRPC dataset from {MRPC_data}')
with open(MRPC_data, mode='r', encoding='utf') as f:
lines = f.readlines()[1:]
print(f'Reading {len(lines)} lines from {MRPC_data}')
for lnr, line in enumerate(lines):
cols = line.split('\t')
assert len(cols) == 5
rows.append(cols)
df = pd.DataFrame(rows, columns=columns)
return df
def get_SST_2_data_frame(SST_2_data):
d = pd.read_csv(SST_2_data, sep='\t')
return d
def get_CoLA_data_frame(CoLA_data):
d = pd.read_csv(CoLA_data, sep='\t', header=None)
d.columns = ['category', 'label', 'domain', 'sentence']
return d
def get_WNLI_translated_data_frame(WNLI_data):
d = pd.read_csv(WNLI_data)
return d
def get_IITP_product_reviews_data_frame(ITP_product_reviews_data):
d = pd.read_csv(ITP_product_reviews_data, header=None)
d.columns = ['label', 'sentence']
d = d.convert_dtypes()
return d
def get_MIDAS_discourse_json(json_path):
json_file = open(json_path)
orig_d = json.load(json_file)
prune_d = list()
for d in orig_d:
if(d['Discourse Mode'] in config.MIDAS_discourse_labels.keys()):
prune_d.append(d)
return prune_d
def get_DPIL_data_frame(DPIL_data):
d = pd.read_csv(DPIL_data, header=None)
d.columns = ['sentence_1', 'sentence_2', 'label']
return d
def get_KhondokerIslam_bengali_data_frame(KhondokerIslam_bengali_path):
d = pd.read_csv(KhondokerIslam_bengali_path)
return d
def get_rezacsedu_sentiment_data_frame(rezacsedu_sentiment_path):
d = pd.read_csv(rezacsedu_sentiment_path, header=None)
d.columns = ['text', 'label']
return d
def get_BEmoC_data_frame(BEmoC_path):
d = pd.read_excel(BEmoC_path)
return d
def get_Seid_amharic_sentiment_data_frame(Seid_amharic_sentiment_path):
d = | pd.read_csv(Seid_amharic_sentiment_path, header=None) | pandas.read_csv |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 04:52:01 2019
@author: jamiesom
"""
import pandas as pd
from electricitylci.globals import data_dir, output_dir
import numpy as np
from electricitylci.eia860_facilities import eia860_generator_info
import re
def generate_power_plant_construction(year):
"""This function uses an NETL study that generated the life cycle inventory
for power plant construction using an economic input output model. Two types
of plants are considered: sub-critical pulverized coal and a natural gas
combined cycle plant. The inventory provided by the study is for an entire plant.
This inventory is divided by the net generation capacity of those plants to
place the inventory on the basis of a MW and then divided by an assumed plant
life of 30 years, which is a conservative assumption considering the lifetime
of these plants is typically much longer. These per year/per MW impacts
are mapped to the fossil power generators in the U.S. where they are scaled
by the net generating capacity of the plants (as provided by EIA data). These
impacts are eventually divided by the generation for the year in MWh to provide
the construction impacts on the basis of the functional unit.
Parameters
----------
year : int
Year of EIA data to use to provide net generating capacity
Returns
-------
dataframe
This dataframe provides construction inventory for each power plant
reporting to EIA.
"""
gen_df = eia860_generator_info(year)
gen_columns=[
"plant_id",
"generator_id",
"technology",
"prime_mover",
"nameplate_capacity_mw",
"energy_source_1"
]
energy_sources=[
"NG",'BIT', 'DFO', 'LIG', 'SUB', 'RC', 'KER', 'RFO', 'PC',
'WC'
]
compartment_mapping={
'resource/in ground':"resource",
'resource':"resource",
'resource/in water':"resource",
'resource/in air':"resource",
'air/unspecified':"emission/air",
'resource/land':"resource",
'water/unspecified':"emission/water",
'air/low population density':"emission/air",
'soil/groundwater':"emission/water",
'air/unspecified/2,4':"emission/air",
'soil/unspecified':"emission/soil",
'soil/industrial':"emission/soil",
'soil/unspecified/2,4':"emission/soil",
'water/unspecified/2,4':"emission/water",
'/':"",
'resource/groundwater':"resource",
'resource/surface water':"resource",
'water/surface water':"resource"
}
gas_prime = ["GT","IC","OT","CT","CS","CE","CA","ST"]
coal_type = ["BIT","SUB","LIG","WC","RC"]
gen_df = gen_df.loc[gen_df["energy_source_1"].isin(energy_sources), gen_columns]
gen_df["plant_id"]=gen_df["plant_id"].astype(int)
groupby_cols=["plant_id","technology","energy_source_1","prime_mover"]
gen_df_group = gen_df.groupby(by=groupby_cols,as_index=False)["nameplate_capacity_mw"].sum()
prime_energy_combo=gen_df_group.groupby(by=["prime_mover","energy_source_1"]).size().reset_index().rename(columns={0:'count'})
prime_energy_combo["const_type"]="coal"
gas_const_criteria=(prime_energy_combo["prime_mover"].isin(gas_prime))&(~prime_energy_combo["energy_source_1"].isin(coal_type))
prime_energy_combo.loc[gas_const_criteria,"const_type"]="ngcc"
gen_df_group=gen_df_group.merge(prime_energy_combo[['prime_mover', 'energy_source_1', 'const_type']],
on=["prime_mover","energy_source_1"],
how="left")
inventory = | pd.read_csv(f"{data_dir}/plant_construction_inventory.csv") | pandas.read_csv |
from plotly.offline import plot, iplot, init_notebook_mode
from pandas.plotting import register_matplotlib_converters
import seaborn as sns
import matplotlib.pyplot as plt
from urllib.request import urlopen
from datetime import timedelta
import json
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
from plotly.subplots import make_subplots
from plotly.offline import plot
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
import cv2
import keras
import warnings
from .models import Images
from PIL import Image
def is_grey_scale(img_path):
img = Image.open(img_path).convert('RGB')
w, h = img.size
for i in range(w):
for j in range(h):
r, g, b = img.getpixel((i, j))
if r != g != b:
return False
return True
# MKIT STARTS
def load_models():
model1 = keras.models.load_model("models/first_level_hierarchy.h5")
model2 = keras.models.load_model("models/second_level_hierarchy.h5")
return model1, model2
def test_pneumonia(image, model1, model2):
logs = ["Covid 19", "Bacterial Pneumonia", "Viral Pneumonia", "Negative"]
result = dict()
image = (np.array([cv2.resize(image, (150, 150))]).reshape(
1, 150, 150, 3)).astype('float32')/255
base = model1.predict(image)
indx = np.argmax(base)
if indx == 1:
derived = model2.predict(image)
indx_der = np.argmax(derived)
result['Pneumonia'] = [logs[indx_der], derived[0][indx_der]*100]
elif indx == 0:
result['Pneumonia'] = [logs[3], base[0][indx]*100]
return(result)
# MKIT ENDS
# statistics start
plots = []
data = pd.read_csv(
"datasets/covid19GlobalForecastingweek1/train.csv", parse_dates=['Date'])
cleaned_data = pd.read_csv(
"datasets/covid19cleancompletedataset/covid_19_clean_complete.csv", parse_dates=['Date'])
cases = ['Confirmed', 'Deaths', 'Recovered', 'Active']
cleaned_data['Active'] = cleaned_data['Confirmed'] - \
cleaned_data['Deaths'] - cleaned_data['Recovered']
cleaned_data['Country/Region'] = cleaned_data['Country/Region'].replace(
'Mainland China', 'China')
cleaned_data[['Province/State']
] = cleaned_data[['Province/State']].fillna('')
cleaned_data[cases] = cleaned_data[cases].fillna(0)
cleaned_data.rename(columns={'Date': 'date'}, inplace=True)
data = cleaned_data
grouped = data.groupby(
'date')['date', 'Confirmed', 'Deaths', 'Active'].sum().reset_index()
grouped = data.groupby(
'date')['date', 'Confirmed', 'Deaths', 'Active'].sum().reset_index()
fig1 = px.line(grouped, x="date", y="Deaths",
title="Worldwide Death Cases Over Time")
grouped_india = data[data['Country/Region'] == "India"].reset_index()
grouped_india_date = grouped_india.groupby(
'date')['date', 'Confirmed', 'Deaths'].sum().reset_index()
plot_titles = ['India']
fig2 = px.line(grouped_india_date, x="date", y="Confirmed",
title=f"Confirmed Cases in {plot_titles[0].upper()} Over Time", color_discrete_sequence=['#F61067'], height=500)
data['Province/State'] = data['Province/State'].fillna('')
temp = data[[col for col in data.columns if col != 'Province/State']]
latest = temp[temp['date'] == max(temp['date'])].reset_index()
latest_grouped = latest.groupby(
'Country/Region')['Confirmed', 'Deaths'].sum().reset_index()
fig3 = px.bar(latest_grouped.sort_values('Confirmed', ascending=False)[
:40][::-1], x='Confirmed', y='Country/Region', title='Confirmed Cases Worldwide', text='Confirmed', height=1000, orientation='h')
fig4 = px.bar(latest_grouped.sort_values('Deaths', ascending=False)[:30][::-1], x='Deaths', y='Country/Region', color_discrete_sequence=[
'#84DCC6'], title='Deaths Cases Worldwide', text='Deaths', height=1000, orientation='h')
temp = cleaned_data.groupby(
'date')['Recovered', 'Deaths', 'Active'].sum().reset_index()
temp = temp.melt(id_vars="date", value_vars=[
'Recovered', 'Deaths', 'Active'], var_name='case', value_name='count')
temp['case'].value_counts()
pio.templates.default = "plotly_dark"
fig5 = px.line(temp, x="date", y="count", color='case',
title='Cases over time: Line Plot', color_discrete_sequence=['cyan', 'red', 'orange'])
fig6 = px.area(temp, x="date", y="count", color='case',
title='Cases over time: Area Plot', color_discrete_sequence=['cyan', 'red', 'orange'])
formated_gdf = data.groupby(
['date', 'Country/Region'])['Confirmed', 'Deaths'].max()
formated_gdf = data.groupby(
['date', 'Country/Region'])['Confirmed', 'Deaths'].max()
formated_gdf = formated_gdf.reset_index()
formated_gdf['date'] = pd.to_datetime(formated_gdf['date'])
formated_gdf['date'] = formated_gdf['date'].dt.strftime('%m/%d/%Y')
formated_gdf['size'] = formated_gdf['Confirmed'].pow(0.3)
fig8 = px.scatter_geo(formated_gdf, locations="Country/Region", locationmode='country names',
color="Confirmed", size='size', hover_name="Country/Region",
range_color=[0, 1500],
projection="natural earth", animation_frame="date",
title='COVID-19: Spread Over Time', color_continuous_scale="portland")
formated_gdf = formated_gdf.reset_index()
formated_gdf['date'] = pd.to_datetime(formated_gdf['date'])
formated_gdf['date'] = formated_gdf['date'].dt.strftime('%m/%d/%Y')
formated_gdf['size'] = formated_gdf['Confirmed'].pow(0.3)
pio.templates.default = "plotly_dark"
fig7 = px.scatter_geo(formated_gdf, locations="Country/Region", locationmode='country names', color="Deaths", size='size', hover_name="Country/Region",
range_color=[0, 100], projection="natural earth", animation_frame="date", title='COVID-19: Deaths Over Time', color_continuous_scale="peach")
# statistics end
# symptoms start
symptoms = {'symptom': ['Fever',
'Dry cough',
'Fatigue',
'Sputum production',
'Shortness of breath',
'Muscle pain',
'Sore throat',
'Headache',
'Chills',
'Nausea or vomiting',
'Nasal congestion',
'Diarrhoea',
'Haemoptysis',
'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
symptom_graph = px.bar(symptoms[['symptom', 'percentage']].sort_values('percentage', ascending=False),
y="percentage", x="symptom", color='symptom',
log_y=True, template='ggplot2', title='Symptom of Coronavirus')
symptom_div = plot(symptom_graph, output_type='div', include_plotlyjs=False, show_link=False,
link_text="", image_width=500, config={"displaylogo": False})
plots.append(symptom_div)
# symptoms end
# india starts
cnf = '#393e46' # confirmed - grey
dth = '#ff2e63' # death - red
rec = '#21bf73' # recovered - cyan
act = '#fe9801' # active case - yellow
register_matplotlib_converters()
pio.templates.default = "plotly"
# importing datasets
df = | pd.read_csv('datasets/complete.csv', parse_dates=['Date']) | pandas.read_csv |
import pandas as pd
import datetime
import numpy as np
import icd
def get_age(row):
"""Calculate the age of patient by row
Arg:
row: the row of pandas dataframe.
return the patient age
"""
raw_age = row['DOD'].year - row['DOB'].year
if (row['DOD'].month < row['DOB'].month) or ((row['DOD'].month == row['DOB'].month) and (row['DOD'].day < row['DOB'].day)):
return raw_age - 1
else:
return raw_age
mimic_patients = 'mimic_csv/PATIENTS.csv'
mimic_note_events = 'mimic_csv/NOTEEVENTS.csv'
mimic_admissions = 'mimic_csv/ADMISSIONS.csv'
mimic_diagnoses = 'mimic_csv/DIAGNOSES_ICD.csv'
patient = pd.read_csv(mimic_patients)
patient['DOD'] = pd.to_datetime(patient['DOD']).dt.date
patient['DOB'] = pd.to_datetime(patient['DOB']).dt.date
patient['DOD_HOSP'] = pd.to_datetime(patient['DOD_HOSP'])
patient['age'] = patient.apply(get_age, axis=1)
#for col in patient.columns:
# print(col) #desc col headers
patient = patient.drop(['DOD_SSN', 'DOD_HOSP'], axis=1)
admission = | pd.read_csv(mimic_admissions) | pandas.read_csv |
import pandas as pd
# bookings_to_arr
#
# Accepts a pandas dataframe containing bookings data and returns a pandas
# dataframe containing changes in ARR with the following columns:
# - date - the date of the change
# - type - the type of the change (new, upsell, downsell, and churn)
# - customer_id - the id of the customer
# - prior_arr - the ARR for the customer prior to the change
# - next_arr - the ARR for the customer following the change
# - delta_arr - the change in ARR
#
# The bookings (input) dataframe should contain the following columns:
# - date - the date of the booking (as pandas Timestamps)
# - customer_id - the unique id of the customer
# - arr - the amount of ARR booked
# - start_date - the start date of the contract (as pandas Timestamps)
# - end_date - the end date of the contract (as pandas Timestamps)
def bookings_to_arr(bookings):
intervals = bookings_to_intervals(bookings)
print(intervals)
def bookings_to_intervals(bookings):
interval_list = []
for index, row in bookings.sort_values(by=["customer_id", "start_date"]).iterrows():
interval_list.append(row)
if index == 2:
interval_list.append(row)
# print(f"Index: {index}")
# print(bookings.loc[index])
# print("\n")
return pd.DataFrame(data=interval_list)
test_bookings = pd.DataFrame.from_records(
[
{
"date": pd.Timestamp(ts_input="9/25/2019", tz="UTC"),
"customer_id": 1234,
"arr": 200,
"start_date": pd.Timestamp(ts_input="10/1/2019", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2020", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2018", tz="UTC"),
"customer_id": 1234,
"arr": 125,
"start_date": pd.Timestamp(ts_input="10/1/2018", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2019", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2020", tz="UTC"),
"customer_id": 1234,
"arr": 150,
"start_date": pd.Timestamp(ts_input="10/1/2020", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2021", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2021", tz="UTC"),
"customer_id": 1234,
"arr": 150,
"start_date": | pd.Timestamp(ts_input="10/1/2021", tz="UTC") | pandas.Timestamp |
import pandas as pd
def load_dataset(csv_path):
df_inflacao = | pd.read_csv(csv_path, sep=';', decimal=',') | pandas.read_csv |
""" Compute the accuracies required to compute the overlapping scores.
Namely, for each model m trained with data augmentation on one candidate corruption (trained with corruption_trainings.py), get the accuracy of m for each candidate corruption (using the ImageNet validation set corrupted with the considered corruption)
Computed accuracies are saved in a pickle file at ../Results/corruption_cat
Required: one model must have been trained for each considered candidate corruption using corruption_trainings.py
"""
import torch
import numpy as np
import os
import sys
import torchvision
import pandas
import sys
from torch.utils.data import DataLoader
p = os.path.abspath('..')
sys.path.insert(1, p)
import data
import tools
import candidate_corruptions
dataset_path = sys.argv[1]
list_models = sys.argv[2:]
if list_models == ["all_candidates"]:
list_models = ["clean"] + list(candidate_corruptions.dict_corruptions.keys())
list_corruptions = list(list_models)
batch_size = 100
num_workers = 4
device = torch.device("cuda:0")
load_path = "../Results/trained_models"
res_array = np.zeros([len(list_models),len(list_corruptions)])
test_acc = 0
for i in range(len(list_models)):
print("Get test accuracies of {} model".format(list_models[i]))
classifier = torchvision.models.resnet18(num_classes=100, pretrained=False)
try:
classifier.load_state_dict(torch.load(os.path.join(load_path,list_models[i],"checkpoint"), map_location=device))
classifier = torch.nn.DataParallel(classifier, device_ids=[0])
except:
classifier = torch.nn.DataParallel(classifier, device_ids=[0])
classifier.load_state_dict(torch.load(os.path.join(load_path,list_models[i],"checkpoint"), map_location=device))
classifier.to(device)
classifier.eval()
for k in range(len(list_corruptions)):
test_set = data.get_Inet100(dataset_path, "test", corruptions=[list_corruptions[k]], album_mode=True)
test_loader = DataLoader(test_set, batch_size=batch_size,shuffle=True, num_workers=num_workers,drop_last=True,pin_memory=True)
test_set_size = len(test_set)
epoch_test_size = test_set_size//batch_size
with torch.no_grad() :
for _, couple in enumerate(test_loader):
x, l = couple
x, l = x.to(device), l.to(device)
y = classifier(x)
test_acc = tools.update_topk_acc(test_acc,y,l,epoch_test_size,1)
res_array[i,k] = test_acc.item()
test_acc = 0
res_array = | pandas.DataFrame(res_array, index=list_models, columns=list_corruptions) | pandas.DataFrame |
"""Run 20newsgroups data experiment."""
import os
import numpy as np
import random
import pickle
import pandas as pd
import methods_20news
from methods_20news import Methods
from prep_20news import *
from utils_20news import *
from statistics import mean
from statistics import median
from statistics import stdev
random.seed(1)
np.random.seed(1)
# ------------ PARAMETERS ------------
rank = 13 # (int) input rank for NMF and (S)SNMF models
iterations = 11 # (odd int) number of iterations to run for analysis
run_analysis = 1 # (boolean) run all methods to obtain class. acc./keywords/class. reports/heatmaps (reproduce paper results)
nmf_search = 0 # (boolean) run for various tolerance values (reproduce paper results with iterations = 10)
ssnmf_search = 0 # (boolean) run for various tolerances and regularizers (reproduce paper results with iterations = 10)
clust_analysis = 0 # (boolean) run code to compute clustering scores (reproduce paper results)
# -------------------------------------
cls_names =["Computers","Sciences","Politics","Religion","Recreation"]
sub_names = ["graphics", "mac", "windows", "crypt", "electronics", "space", "guns", "mideast", \
"atheism", "christian", "autos", "baseball", "hockey"]
# Load and subsample 20news data, assign a label to each category, and split data into train, val, and test sets
newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat = load_data()
# Construct a full train set that consists of both train and validation set
X_train_full = newsgroups_train+newsgroups_val
train_labels_full = np.concatenate((train_labels, val_labels), axis=0)
train_subcat_full = np.concatenate((train_subcat, val_subcat), axis=0)
# Compute the TFIDF representation of the train set
vectorizer_train, feature_names_train, X_train = tfidf_train(newsgroups_train, n_features = 5000)
X_train, train_labels, train_subcat = shuffle_data(X_train, train_labels, train_subcat)
# Apply TFIDF transformation to validation set
X_val = tfidf_transform(vectorizer_train, newsgroups_val)
X_val, val_labels, val_subcat = shuffle_data(X_val, val_labels, val_subcat)
# Compute the TFIDF representation of the full train set
vectorizer_train_full, feature_names_train_full, X_train_full = tfidf_train(X_train_full, n_features = 5000)
X_train_full, train_labels_full, train_subcat_full = shuffle_data(X_train_full, train_labels_full, train_subcat_full)
# Apply TFIDF transformation to test data set
X_test = tfidf_transform(vectorizer_train_full, newsgroups_test)
X_test, test_labels, test_subcat = shuffle_data(X_test, test_labels, test_subcat)
if run_analysis == 1:
# Construct an evaluation module
evalualtion_module = Methods(X_train = X_train, X_val = X_val, X_test = X_test,\
train_labels = train_labels, val_labels = val_labels,\
test_labels = test_labels, X_train_full = X_train_full,\
train_labels_full = train_labels_full, cls_names = cls_names,\
feature_names_train=feature_names_train_full)
# "Optimal" parameters for SSNMF Models 3,4,5,6 respectively
ssnmf_tol = [1e-4,1e-4,1e-3,1e-3]
lamb = [1e+2,1e+1,1e+2,1e+3]
# "Optimal" NMF parameters
nmf_tol = 1e-4
# Run SSNMF Analysis
acc_dict, A_dict, B_dict, S_dict, S_test_dict, Yhat_dict, median_dict, iter_dict = evalualtion_module.run_analysis(ssnmf_tol= ssnmf_tol, \
nmf_tol = nmf_tol, lamb=lamb, ka=rank, itas=50, iterations=iterations)
evalualtion_module.median_results(acc_dict, A_dict, B_dict, Yhat_dict, median_dict, iter_dict)
if nmf_search == 1:
""" Run NMF for various tolerance values."""
mean_acc = []
std_acc = []
tol_list = [1e-5,1e-4,1e-3,1e-2]
for tol_idx in range(len(tol_list)):
nmf_acc = []
nmf_tol = tol_list[tol_idx]
print("Testing tolerance equal to {}.".format(nmf_tol))
# Construct an evaluation module
evalualtion_module = Methods(X_train = X_train, X_val = X_val, X_test = X_val,\
train_labels = train_labels, val_labels = val_labels,\
test_labels = val_labels, X_train_full = X_train,\
train_labels_full = train_labels, cls_names = cls_names,\
feature_names_train=feature_names_train)
for j in range(iterations):
print("Iteration {}.".format(j))
nmf_svm_acc, W, nn_svm, nmf_svm_predicted, nmf_iter, H, H_test = evalualtion_module.NMF(rank=rank, nmf_tol=nmf_tol)
nmf_acc.append(nmf_svm_acc)
mean_acc.append(mean(nmf_acc))
std_acc.append(stdev(nmf_acc))
print("\n\nResults for {} iterations.\n".format(iterations))
for tol_idx in range(len(tol_list)):
print("NMF average accuracy (with tol = {}): {:.4f} ยฑ {:.4f}.".format(tol_list[tol_idx],mean_acc[tol_idx],std_acc[tol_idx]))
if ssnmf_search == 1:
""" Run SSNMF for various tolerance and regularizer values."""
tol_list = [1e-4,1e-3,1e-2]
lam_list = [1e+1,1e+2,1e+3]
mean_dict = {"Model3": [], "Model4": [], "Model5": [], "Model6": []}
std_dict = {"Model3": [], "Model4": [], "Model5": [], "Model6": []}
for lam_idx in range (len(lam_list)):
ssnmf_lam = lam_list[lam_idx]
print("Testing lambda equal to {}.".format(ssnmf_lam))
for tol_idx in range (len(tol_list)):
ssnmf_tol = tol_list[tol_idx]
print("Testing tolerance equal to {}.".format(ssnmf_tol))
acc_dict = {"Model3": [], "Model4": [], "Model5": [], "Model6": []}
# Construct an evaluation module
evalualtion_module = Methods(X_train = X_train, X_val = X_val, X_test = X_val,\
train_labels = train_labels, val_labels = val_labels,\
test_labels = val_labels, X_train_full = X_train,\
train_labels_full = train_labels, cls_names = cls_names,\
feature_names_train=feature_names_train)
for j in range(iterations):
print("Iteration {}.".format(j))
for i in range(3,7):
# Run SSNMF
test_evals, A, B, ssnmf_predicted, ssnmf_iter, S, S_test = evalualtion_module.SSNMF(modelNum = i,
ssnmf_tol = ssnmf_tol,lamb = ssnmf_lam, ka = rank, itas= 50)
acc_dict["Model" + str(i)].append(test_evals[-1])
for i in range(3,7):
acc = acc_dict["Model" + str(i)]
mean_dict["Model" + str(i)].append(mean(acc))
std_dict["Model" + str(i)].append(stdev(acc))
print("Model {} average accuracy (with tol = {} and lam = {}): {:.4f} ยฑ {:.4f}.".format(i,ssnmf_tol,ssnmf_lam,mean(acc),stdev(acc)))
for i in range(3,7):
idx_final = 0
for lam_idx in range(len(lam_list)):
ssnmf_lam = lam_list[lam_idx]
for tol_idx in range (len(tol_list)):
ssnmf_tol = tol_list[tol_idx]
m_final = mean_dict["Model" + str(i)][idx_final]
s_final = std_dict["Model" + str(i)][idx_final]
print("Model {} average accuracy (with tol = {} and lam = {}): {:.4f} ยฑ {:.4f}.".format(i,ssnmf_tol,ssnmf_lam,m_final,s_final))
idx_final += 1
print()
if clust_analysis == 1:
""" Compute hard/soft clustering scores."""
clust_list = ["hard", "soft"]
# Ground-truth matrix:
subcat_all = np.concatenate((train_subcat_full, test_subcat), axis = None)
subcat_onehot = | pd.get_dummies(subcat_all) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 14 09:27:05 2021
@author: vargh
"""
import numpy as np
import pandas as pd
from sympy import symbols, pi, Eq, integrate, diff, init_printing, solve
from scipy.optimize import curve_fit
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d, interp2d
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from shapely.geometry import Polygon
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
#init_printing()
## Functions
def calc_maneuver_sum(spec_range, spec_dtr, fuel_used_interper, maneuver_time_interper, printer):
calc_fuel_used = fuel_used_interper(spec_range, spec_dtr)
calc_maneuver_time = maneuver_time_interper(spec_range, spec_dtr)
if printer:
print('Total distance range: %.2f m'%(spec_range))
print('Total fuel mass burned range: %.2f kg'%(calc_fuel_used))
print('Maneuver time: %.2f s'%(calc_maneuver_time))
return calc_fuel_used, calc_maneuver_time
def calc_geom(threeD_balloon, theta):
norm_vec = np.array([np.cos(theta), 0, np.sin(theta)])
proj_of_u_on_n = (np.dot(threeD_balloon, norm_vec))*norm_vec.reshape(len(norm_vec), 1)
proj_of_u_on_n = threeD_balloon - proj_of_u_on_n.transpose()
points = np.zeros((threeD_balloon.shape[0], 2))
points[:, 0] = proj_of_u_on_n[:, 1]
points[:, 1] = proj_of_u_on_n[:, 2]
hull = ConvexHull(points)
bound = points[hull.vertices]
perp_A_x = Polygon(bound).area
cent_y = Polygon(bound).centroid.coords[0][0]
norm_vec2 = np.array([np.sin(theta), 0, np.cos(theta)])
proj_of_u_on_n2 = (np.dot(threeD_balloon, norm_vec2))*norm_vec2.reshape(len(norm_vec2), 1)
proj_of_u_on_n2 = threeD_balloon - proj_of_u_on_n2.transpose()
points2 = np.zeros((threeD_balloon.shape[0], 2))
points2[:, 0] = proj_of_u_on_n2[:, 0]
points2[:, 1] = proj_of_u_on_n2[:, 1]
hull2 = ConvexHull(points2)
bound2 = points2[hull2.vertices]
perp_A_y = Polygon(bound2).area
cent_x = Polygon(bound2).centroid.coords[0][0]
return perp_A_x, perp_A_y, cent_x, cent_y
def init_calc(threeD_balloon, payload_height, payload_width, payload_depth, connector_height, balloon_height, balloon_mass, COG_payload_h, COG_payload_w, rho_atmo, dim_scale, dyn_visc, F_b, thrust_f, thrust_r, m_dot_f, m_dot_r, acc_g, consider_bouyancy_drift, time_step, target_range, d_tol, dragthrustratio, min_burn_index, moment_arm_thruster):
## Initializations
t = np.array([0]) # time
r_m = np.array([total_rover_mass]) # full rover mass
# kinematics in x, displacement, velocity and acceleration
d_x = np.array([0]) # (m)
v_x = np.array([0]) # (m/s)
a_x = np.array([0]) # (m/s^2)
# kinematics in y, displacement, velocity and acceleration
d_y = np.array([0]) # (m)
v_y = np.array([0]) # (m/s)
a_y = np.array([0]) # (m/s^2)
# moment about z
m_z = np.array([0]) # (Nm)
F = np.array([thrust_f]) # Thrust (N)
D_x = np.array([0]) # Drag in x (N)
D_y = np.array([0]) # Drag in y (N)
# rotational kinematics in z, displacement, velocity, accleration
alpha = np.array([0]) # (rad/s^2)
omega = np.array([0]) # (rad/s)
theta = np.array([0]) # (rad)
rem_fuel = np.array([fuel_mass])
ballast_mass = np.array([0])
i = 0
fail = 0
burn_index = 0
while abs(d_x[i] - target_range) > d_tol and not(fail == 1):
## initial conditions
prev_t = t[i]
prev_r_m = r_m[i]
prev_d_x = d_x[i]
prev_v_x = v_x[i]
prev_a_x = a_x[i]
prev_d_y = d_y[i]
prev_v_y = v_y[i]
prev_a_y = a_y[i]
prev_m_z = m_z[i]
prev_F = F[i]
prev_D_x = D_x[i]
prev_D_y = D_y[i]
prev_alpha = alpha[i]
prev_omega = omega[i]
prev_theta = theta[i]
prev_fuel = rem_fuel[i]
prev_ballast_mass = ballast_mass[i]
## time
t = np.append(t, prev_t + time_step)
cur_t = prev_t + time_step
## Modified perpendicular area
perp_A_x, perp_A_y, cent_x, cent_y = calc_geom(threeD_balloon, prev_theta) # calculates perpendicular area in x and y and the centroid for a given theta
## Center of Gravity, Center of Drag, Moment of Inertia (not rotated)
COG_balloon_h = (payload_height + connector_height + balloon_height/2)
COG_balloon_w = cent_x
COG_cur_h = ((r_m[i] - balloon_mass)*COG_payload_h + balloon_mass*COG_balloon_h)/(r_m[i]) # calculates changing height COG
COG_cur_w = ((r_m[i] - balloon_mass)*COG_payload_w + balloon_mass*COG_balloon_w)/(r_m[i]) # calculates changing COG
J_payload_u = r_m[i]*(payload_height**2 + payload_width**2) # untransformed moment of inertia of payload
trans_payload_J_d = np.sqrt(COG_cur_h**2 + COG_cur_w**2) - COG_payload # distance axis of rotation must be moved
J_payload_t = J_payload_u + r_m[i]*trans_payload_J_d**2 # moving axis of rotation with parallel axis theorem
trans_balloon_J_d = np.sqrt((COG_balloon_h - COG_cur_h)**2 + (COG_balloon_w - COG_cur_w)**2) # distance axis of rotation must be moved
J_balloon_t = J_balloon_u + balloon_mass*trans_balloon_J_d**2 # moving axis of rotation with parallel axis theorem
J_tot = J_payload_t + J_balloon_t
COD_balloon_h = COG_balloon_h # needs to be updated based on CFD
COD_balloon_w = COG_balloon_w # needs to be updated based on CFD
# Skin Friction coefficient
if prev_v_x != 0:
re_num = rho_atmo*prev_v_x*dim_scale/dyn_visc # Reynold's Number
C_f = .027/np.power(re_num, 1/7) ## Prandtl's 1/7 Power Law
else:
C_f = 0 # If velocity = 0, C_f = 0
D_mag = np.sqrt(prev_D_x**2 + prev_D_y**2) # magnitude of drag
res_freq = int(np.ceil(2*pi*np.sqrt(J_tot/(F_b*balloon_height)))) # calculated resonant frequency
thrust = thrust_f # thrust
m_dot = m_dot_f # mass flow rate
if abs(D_mag/thrust) < dragthrustratio: # if thrust to drag ratio is less than max ratio, burn
burn_condition = 1
else:
if burn_index > min_burn_index: # if engine has burned for minimal time, and drag condition exceeded, stop burning
burn_condition = 0
burn_index = 0
if burn_condition:
burn_index = burn_index + 1
## Force
cur_F = thrust
cur_fuel = prev_fuel - m_dot*time_step
# Ballast
cur_ballast_mass = prev_ballast_mass + m_dot*time_step
cur_r_m = prev_r_m
else:
cur_F = 0
cur_r_m = prev_r_m
cur_fuel = prev_fuel
mass_deficit = 0
cur_ballast_mass = prev_ballast_mass
perp_A_pay_x = payload_width/np.cos(prev_theta)*payload_depth # calculates perpendicular surface area of payload
pay_drag_x = -.5*(C_D_payload+C_f)*perp_A_pay_x*rho_atmo*prev_v_x**2 # calculates drag from payload
ball_drag_x = -.5*(C_D_balloon+C_f)*perp_A_x*rho_atmo*prev_v_x**2 # calculates drag from balloon in x
ball_drag_y = -.5*(C_D_balloon+C_f)*perp_A_y*rho_atmo*prev_v_y**2 # calculates drag from balloon in y
cur_D_x = pay_drag_x + ball_drag_x # calculates total drag in x
cur_D_y = ball_drag_y # calculates total drag in y
cur_D_mag = np.sqrt(cur_D_x**2 + cur_D_y**2) # Magnitude of drag
## Linear Kinematics
tot_force_x = cur_F*np.cos(prev_theta) + cur_D_x # effective thrust in x
tot_force_y = cur_F*np.sin(prev_theta) + cur_D_y # effective force in y
cur_a_x = tot_force_x/cur_r_m
cur_a_y = tot_force_y/cur_r_m
cur_v_x = prev_v_x+cur_a_x*time_step
cur_v_y = prev_v_y+cur_a_y*time_step
cur_d_x = prev_d_x+cur_v_x*time_step
cur_d_y = prev_d_y+cur_v_y*time_step
## Rotational Kinematics
# Payload Gravity Torque
g_m_a_y_pay = COG_cur_h - COG_payload_h # moment arm for gravity on the payload y
g_m_a_x_pay = COG_cur_w - COG_payload_w # moment arm for gravity on the payload x
g_m_a_pay = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_pay = abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_pay)
# Balloon Gravity Torque
g_m_a_y_ball = COG_cur_h - COG_balloon_h # moment arm for gravity on the payload y
g_m_a_x_ball = COG_cur_w - COG_balloon_w # moment arm for gravity on the payload x
g_m_a_ball = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_ball = -abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_ball)
g_m = g_m_pay + g_m_ball
# Balloon Drag Torque
d_m_a_y = COD_balloon_h - COG_cur_h # moment arm for drag on the balloon y
d_m_a_x = COD_balloon_w - COG_cur_w # moment arm for drag on the balloon x
d_m_a = np.sqrt(d_m_a_y**2 + d_m_a_x**2) # euclidean distance
ball_D_mag = np.sqrt(ball_drag_x**2 + ball_drag_y**2) # magnitude of drag on balloon
d_m = d_m_a*ball_D_mag*np.cos(prev_theta) - pay_drag_x*g_m_a_pay # sum all drag moments
# Bouyancy force torque, balloon
b_m_a_y = COG_balloon_h - COG_cur_h # moment arm for bouyancy force y
b_m_a_x = COG_balloon_w - COG_cur_w # moment arm for bouyancy force x
b_m_a = np.sqrt(b_m_a_y**2 + b_m_a_x**2) # euclidean
b_m = b_m_a * F_b * np.sin(prev_theta) # total buoyancy moment
t_m_a = moment_arm_thruster # thruster moment arm
t_m = cur_F * (moment_arm_thruster) # thruster moment
m_z_tot = d_m - b_m + t_m - g_m # total moment
cur_alpha = m_z_tot / J_tot
cur_omega = prev_omega + cur_alpha*time_step
cur_theta = prev_theta + cur_omega*time_step
## all updates
F = np.append(F, cur_F)
r_m = np.append(r_m, cur_r_m)
D_x = np.append(D_x, cur_D_x)
D_y = np.append(D_y, cur_D_y)
a_x = np.append(a_x, cur_a_x)
a_y = np.append(a_y, cur_a_y)
v_x = np.append(v_x, cur_v_x)
v_y = np.append(v_y, cur_v_y)
d_x = np.append(d_x, cur_d_x)
d_y = np.append(d_y, cur_d_y)
m_z = np.append(m_z, m_z_tot)
alpha = np.append(alpha, cur_alpha)
omega = np.append(omega, cur_omega)
theta = np.append(theta, cur_theta)
rem_fuel = np.append(rem_fuel, cur_fuel)
ballast_mass = np.append(ballast_mass, cur_ballast_mass)
i = i + 1
if cur_fuel < 0:
fail = 1
print('Not Enough Fuel Mass')
if i % 100 == 0:
print('.', end= '')
if i % 5000 == 0:
print('\n')
all_data = np.zeros((len(t), 17))
all_data[:, 0] = t
all_data[:, 1] = F
all_data[:, 2] = r_m
all_data[:, 3] = D_x
all_data[:, 4] = D_y
all_data[:, 5] = a_x
all_data[:, 6] = a_y
all_data[:, 7] = v_x
all_data[:, 8] = v_y
all_data[:, 9] = d_x
all_data[:, 10] = d_y
all_data[:, 11] = m_z
all_data[:, 12] = alpha
all_data[:, 13] = omega
all_data[:, 14] = theta
all_data[:, 15] = rem_fuel
all_data[:, 16] = ballast_mass
headers = ['time', 'force', 'mass', 'drag_x', 'drag_y', 'acceleration_x', 'acceleration_y', 'velocity_x', 'velocity_y', 'displacement_x', 'displacement_y', 'moment_z', 'alpha', 'omega', 'theta', 'fuel_mass', 'ballast_mass']
return pd.DataFrame(all_data, columns=headers)
def drag_stop_calc(test, ind_ignore, maneuver_time, max_vel, forward_burn_frac, ind_at_end, threeD_balloon, payload_height, payload_width, payload_depth, connector_height, balloon_height, balloon_mass, COG_payload_h, COG_payload_w, rho_atmo, dim_scale, dyn_visc, F_b, thrust_f, thrust_r, m_dot_f, m_dot_r, acc_g, consider_bouyancy_drift, time_step, target_range, d_tol, dragthrustratio, min_burn_index, moment_arm_thruster):
## Drag Stop
reverse_burn_frac = 1 - forward_burn_frac # deprecated if no reverse burn
cutoff_time = maneuver_time * forward_burn_frac
## Initializations
t = np.array([0]) # time
r_m = np.array([total_rover_mass]) # full rover mass
# kinematics in x, displacement, velocity and acceleration
d_x = np.array([0]) # (m)
v_x = np.array([0]) # (m/s)
a_x = np.array([0]) # (m/s^2)
# kinematics in y, displacement, velocity and acceleration
d_y = np.array([0]) # (m)
v_y = np.array([0]) # (m/s)
a_y = np.array([0]) # (m/s^2)
# moment about z
m_z = np.array([0]) # (Nm)
F = np.array([thrust_f]) # Thrust (N)
D_x = np.array([0]) # Drag in x (N)
D_y = np.array([0]) # Drag in y (N)
# rotational kinematics in z, displacement, velocity, accleration
alpha = np.array([0]) # (rad/s^2)
omega = np.array([0]) # (rad/s)
theta = np.array([0]) # (rad)
rem_fuel = np.array([fuel_mass])
ballast_mass = np.array([0])
i = 0
fail = 0
burn_index = 0
vel_checker = 10 # lets loop accelerate the craft
while vel_checker >= vel_elbow and not(fail == 1):
## initial conditions
prev_t = t[i]
prev_r_m = r_m[i]
prev_d_x = d_x[i]
prev_v_x = v_x[i]
prev_a_x = a_x[i]
prev_d_y = d_y[i]
prev_v_y = v_y[i]
prev_a_y = a_y[i]
prev_m_z = m_z[i]
prev_F = F[i]
prev_D_x = D_x[i]
prev_D_y = D_y[i]
prev_alpha = alpha[i]
prev_omega = omega[i]
prev_theta = theta[i]
prev_fuel = rem_fuel[i]
prev_ballast_mass = ballast_mass[i]
## time
t = np.append(t, prev_t + time_step)
cur_t = prev_t + time_step
## Modified perpendicular area
perp_A_x, perp_A_y, cent_x, cent_y = calc_geom(threeD_balloon, prev_theta)
## COG, COD, J (not rotated)
COG_balloon_h = (payload_height + connector_height + balloon_height/2)
COG_balloon_w = cent_x
COG_cur_h = ((r_m[i] - balloon_mass)*COG_payload_h + balloon_mass*COG_balloon_h)/(r_m[i]) # calculates changing height COG
COG_cur_w = ((r_m[i] - balloon_mass)*COG_payload_w + balloon_mass*COG_balloon_w)/(r_m[i]) # calculates changing COG
J_payload_u = r_m[i]*(payload_height**2 + payload_width**2) # untransformed moment of inertia of payload
trans_payload_J_d = np.sqrt(COG_cur_h**2 + COG_cur_w**2) - COG_payload # distance axis of rotation must be moved
J_payload_t = J_payload_u + r_m[i]*trans_payload_J_d**2 # moving axis of rotation with parallel axis theorem
trans_balloon_J_d = np.sqrt((COG_balloon_h - COG_cur_h)**2 + (COG_balloon_w - COG_cur_w)**2) # distance axis of rotation must be moved
J_balloon_t = J_balloon_u + balloon_mass*trans_balloon_J_d**2 # moving axis of rotation with parallel axis theorem
J_tot = J_payload_t + J_balloon_t
COD_balloon_h = COG_balloon_h # needs to be updated based on CFD
COD_balloon_w = COG_balloon_w # needs to be updated based on CFD
if prev_v_x != 0:
re_num = rho_atmo*prev_v_x*dim_scale/dyn_visc
C_f = .027/np.power(re_num, 1/7) ## Prandtl's 1/7 Power Law
else:
C_f = 0
D_mag = np.sqrt(prev_D_x**2 + prev_D_y**2)
res_freq = int(np.ceil(2*pi*np.sqrt(J_tot/(F_b*balloon_height))))
max_alpha = max_theta/4*res_freq**2
if cur_t < cutoff_time:
reverse = 0
else:
reverse = 1
if reverse:
thrust = 0
m_dot = 0
curdtr = 0
else:
thrust = thrust_f
m_dot = m_dot_f
curdtr = abs(D_mag/thrust)
if curdtr < dragthrustratio:
if reverse:
burn_condition = 0
else:
burn_condition = 1
else:
if burn_index > min_burn_index:
burn_condition = 0
burn_index = 0
if burn_condition:
burn_index = burn_index + 1
## Force
cur_F = thrust
cur_fuel = prev_fuel - m_dot*time_step
# Ballast
cur_ballast_mass = prev_ballast_mass + m_dot*time_step
cur_r_m = prev_r_m
else:
cur_F = 0
cur_r_m = prev_r_m
cur_fuel = prev_fuel
mass_deficit = 0
cur_ballast_mass = prev_ballast_mass
perp_A_pay_x = payload_width/np.cos(prev_theta)*payload_depth
pay_drag_x = -.5*(C_D_payload+C_f)*perp_A_pay_x*rho_atmo*prev_v_x**2
ball_drag_x = -.5*(C_D_balloon+C_f)*perp_A_x*rho_atmo*prev_v_x**2
ball_drag_y = -.5*(C_D_balloon+C_f)*perp_A_y*rho_atmo*prev_v_y**2
cur_D_x = pay_drag_x + ball_drag_x
cur_D_y = ball_drag_y
cur_D_mag = np.sqrt(cur_D_x**2 + cur_D_y**2)
## Linear Kinematics
tot_force_x = cur_F*np.cos(prev_theta) + cur_D_x
tot_force_y = cur_F*np.sin(prev_theta) + cur_D_y
cur_a_x = tot_force_x/cur_r_m
cur_a_y = tot_force_y/cur_r_m
cur_v_x = prev_v_x+cur_a_x*time_step
cur_v_y = prev_v_y+cur_a_y*time_step
cur_d_x = prev_d_x+cur_v_x*time_step
cur_d_y = prev_d_y+cur_v_y*time_step
## Rotational Kinematics
# Payload Gravity Torque
g_m_a_y_pay = COG_cur_h - COG_payload_h # moment arm for gravity on the payload y
g_m_a_x_pay = COG_cur_w - COG_payload_w # moment arm for gravity on the payload x
g_m_a_pay = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_pay = abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_pay)
# Balloon Gravity Torque
g_m_a_y_ball = COG_cur_h - COG_balloon_h # moment arm for gravity on the payload y
g_m_a_x_ball = COG_cur_w - COG_balloon_w # moment arm for gravity on the payload x
g_m_a_ball = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_ball = -abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_ball)
g_m = g_m_pay + g_m_ball
# Balloon Drag Torque
d_m_a_y = COD_balloon_h - COG_cur_h # moment arm for drag on the balloon y
d_m_a_x = COD_balloon_w - COG_cur_w # moment arm for drag on the balloon x
d_m_a = np.sqrt(d_m_a_y**2 + d_m_a_x**2) # euclidean distance
ball_D_mag = np.sqrt(ball_drag_x**2 + ball_drag_y**2) # magnitude of drag on balloon
d_m = d_m_a*ball_D_mag*np.cos(prev_theta) - pay_drag_x*g_m_a_pay # sum all drag moments
# Bouyancy force torque, balloon
b_m_a_y = COG_balloon_h - COG_cur_h # moment arm for bouyancy force y
b_m_a_x = COG_balloon_w - COG_cur_w # moment arm for bouyancy force x
b_m_a = np.sqrt(b_m_a_y**2 + b_m_a_x**2) # euclidean
b_m = b_m_a * F_b * np.sin(prev_theta) # total buoyancy moment
t_m_a = moment_arm_thruster # thruster moment arm
t_m = cur_F * (moment_arm_thruster) # thruster moment
m_z_tot = d_m - b_m + t_m - g_m # total moment
cur_alpha = m_z_tot / J_tot
cur_omega = prev_omega + cur_alpha*time_step
cur_theta = prev_theta + cur_omega*time_step
## all updates
F = np.append(F, cur_F)
r_m = np.append(r_m, cur_r_m)
D_x = np.append(D_x, cur_D_x)
D_y = np.append(D_y, cur_D_y)
a_x = np.append(a_x, cur_a_x)
a_y = np.append(a_y, cur_a_y)
v_x = np.append(v_x, cur_v_x)
v_y = np.append(v_y, cur_v_y)
d_x = np.append(d_x, cur_d_x)
d_y = np.append(d_y, cur_d_y)
m_z = np.append(m_z, m_z_tot)
alpha = np.append(alpha, cur_alpha)
omega = np.append(omega, cur_omega)
theta = np.append(theta, cur_theta)
rem_fuel = np.append(rem_fuel, cur_fuel)
ballast_mass = np.append(ballast_mass, cur_ballast_mass)
i = i + 1
if cur_fuel < 0:
fail = 1
print('Not Enough Fuel Mass')
if i % 100 == 0:
print('.', end= '')
if i % 5000 == 0:
print('\n')
if i > ind_ignore:
vel_checker = prev_v_x
else:
vel_checker = 10 # lets loop accelerate the rover
acheived_disp = d_x[-1]
all_data = np.zeros((len(t), 17))
all_data[:, 0] = t
all_data[:, 1] = F
all_data[:, 2] = r_m
all_data[:, 3] = D_x
all_data[:, 4] = D_y
all_data[:, 5] = a_x
all_data[:, 6] = a_y
all_data[:, 7] = v_x
all_data[:, 8] = v_y
all_data[:, 9] = d_x
all_data[:, 10] = d_y
all_data[:, 11] = m_z
all_data[:, 12] = alpha
all_data[:, 13] = omega
all_data[:, 14] = theta
all_data[:, 15] = rem_fuel
all_data[:, 16] = ballast_mass
headers = ['time', 'force', 'mass', 'drag_x', 'drag_y', 'acceleration_x', 'acceleration_y', 'velocity_x', 'velocity_y', 'displacement_x', 'displacement_y', 'moment_z', 'alpha', 'omega', 'theta', 'fuel_mass', 'ballast_mass']
if test:
return acheived_disp
else:
return acheived_disp, pd.DataFrame(all_data, columns=headers)
## Constants ##
g_0 = 9.8 # Earth gravity (m/s^2)
G = 6.67E-11 # Gravitational Constant
## Rover Constants ##
altitude = 300 # hovering altitude m
payload_mass = 200 # rover payload mass kg
structure_mass = 1103.53 # balloon mass kg
###### Body Info #####
body_atmo_pressure = 146921.2 # Atmospheric Pressure of visited body (Pa)
body_mass = 1.3452E23 # mass of body (kg)
body_radius = 2574.7*1000 # radius of body (m)
acc_g = G*body_mass/body_radius**2 # acceleration due to gravity (m/s^2)
rho_atmo = 1.225 * 4.4 # density of atmosphere of body kg/m^3
dyn_visc = 4.35
## forward engine
thrust_per_engine_f = 3.6 # thrust per engine (N)
num_engines_f = 8 # number of engines (should be greater that 2 for torqueing)
thrust_f = thrust_per_engine_f * num_engines_f # total thrust (N)
## backward engine
thrust_per_engine_r = 3 # thrust per engine (N)
num_engines_r = 1 # number of engines (should be greater that 2 for torqueing)
thrust_r = thrust_per_engine_r * num_engines_r # total thrust (N)
Isp = 57 # specific impulse of rocket propellant (s)
vexit = g_0 * Isp # exit velocity of propellant
m_dot_f = thrust_f/vexit # mass flow rate (kg/s)
m_dot_r = thrust_r/vexit
fuel_mass = 95 # available fuel mass (kg)
## Balloon Constants
rho_He = .18 #, density of helium in the ballon (kg/m^3)
volume_req = 356.76 # volume required to lift the mass of itself and the payload (m^3)
balloon_mass = structure_mass + rho_He*volume_req # mass of balloon (kg)
total_rover_mass = payload_mass + balloon_mass # total initial mass of rover (kg)
F_b = total_rover_mass * acc_g
# Balloon Shape - https://www.geogebra.org/m/ydGnFQ2c
x = symbols('x') # sympy symbol
filename = 'C:/Users/vargh/Desktop/Scripts/SHREC/goodyear_airfoil.csv' # generated dataset from link above
gy_geom = | pd.read_csv(filename) | pandas.read_csv |
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not | notnull(-np.inf) | pandas.types.missing.notnull |
import os
import gzip
import pandas as pd
import scipy.io as sio
import pathlib
from enum import Enum
import torch
from sklearn.preprocessing import MinMaxScaler, normalize, StandardScaler
class Btype(Enum):
Undefined = 0
Normal = 1
ESSV_aka_PAC = 2
Aberrated = 3
ESV_aka_PVC = 4
class Rtype(Enum):
Undefined = 0
End = 1
Noise = 2
NSR = 3
AFib = 4
AFlutter = 5
cur_dir = '/content/gdrive/My Drive/ssh_files/submitted - Copy'
emb_file = cur_dir+'/test_emb.csv.gz'
labels_file = cur_dir+'/test_labels.csv.gz'
train_patients = 1000
test_patients = 100
train_subset = []
test_subset = []
train_labels_subset = []
test_labels_subset = []
#change this param to choose amount of data to work with
partition = 10
with gzip.open(labels_file, 'rb') as f:
header = f.readline().decode('ascii').replace("\n","").replace("\r", "")
for i, line in enumerate(f):
decoded = line.decode('ascii').replace("\n", "").replace("\r", "").split(",")
patient, segment, frame = decoded[1], decoded[2], decoded[3]
btype, rtype = Btype(int(decoded[4])), Rtype(int(decoded[5]))
if btype in [Btype.Undefined] or rtype in [Rtype.Undefined, Rtype.End, Rtype.Noise]:
continue
# also can add condition on rtype or btype or any other to manipulate the learning data
if int(patient) < train_patients and int(patient) % partition == 0:
train_labels_subset.append([patient, segment, frame, btype.value, rtype.value])
elif int(patient) % partition == 0:
test_labels_subset.append([patient, segment, frame, btype.value, rtype.value])
train_labels = pd.DataFrame(train_labels_subset, columns=['patient', 'segment', 'frame', 'bt_label', 'rt_label'])
test_labels = | pd.DataFrame(test_labels_subset, columns=['patient', 'segment', 'frame', 'bt_label', 'rt_label']) | pandas.DataFrame |
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
import pandas as pd
class Ledger(BoxLayout):
"""
Ledger data structure:
x: first number in equation (float)
y: second number in equation (float)
op: operator ['+', "-", '*', '/'] (str)
z: result (float)
"""
def __init__(self, **kwargs):
super(Ledger, self).__init__(**kwargs)
self.col = 'x'
self.row = 1
self.df = pd.DataFrame(columns=['x', 'y', 'op', 'z'])
self.df.loc[1] = {'x': 0, 'y': 0, 'op': '', 'z': 0}
self.clear_button_src = 'assets/graphics/clear.png'
def select(self, col: str):
"""select active column"""
if col not in ['x', 'y', 'op', 'z']:
print(f"[ WARNING ] Invalid column: {col}")
pass
else:
self.col = col
def update(self, n: int, op: str = '='):
"""update active cell"""
if op == '+':
self.df.at[self.row, self.col] += n
if op == '-':
self.df.at[self.row, self.col] -= n
if op == '*':
self.df.at[self.row, self.col] *= n
if op == '/':
self.df.at[self.row, self.col] /= n
if op == '=':
self.df.at[self.row, self.col] = n
self.refresh_ledger()
def open_help(self):
TEXT = '''
Input digits in the sandbox to tabulate numbers on the ledger.
You can perform operations on numbers using the operations buttons immediately to the right.
'''
p = Popup(
title='Ledger Help',
size_hint_y=None,
height=200,
content=Label(text=TEXT)
)
p.open()
def operation(self, op: str):
"""update operator column of current row and advance selection"""
if op not in ['+', '-', '*', '/']:
print(f"[ WARNING ] Invalid operator: {op}")
print(self.df.at[self.row, 'op'])
self.df.at[self.row, 'op'] = op
self.parent.parent.children[1].set_value(self.df.at[self.row, 'x'])
self.select('y')
self.refresh_ledger()
def get_row(self) -> dict:
"""return all values from active row"""
return self.df.loc[self.row].to_dict()
def submit_row(self):
"""calculate result, update ledger, new row"""
row = self.get_row()
x = self.df.at[self.row, 'x']
y = self.df.at[self.row, 'y']
print(row['op'])
if row['op'] == '+':
result = x + y
elif row['op'] == '-':
result = x - y
elif row['op'] == '*':
result = x * y
elif row['op'] == '/':
result = x / y
else:
result = x
self.df.at[self.row, 'z'] = result
self.parent.parent.children[1].set_value(result)
self.new_row()
def refresh_ledger(self):
"""refresh ledger view with current data"""
# TODO: Reincorporate cuneiform translator commented out below (untested)
rows = self.df.values.astype('str')
self.rv.data = [
{'value': row} for row in rows
]
def new_row(self):
"""add and select new row at bottom of ledger"""
index = len(self.df.index) + 1
self.df.loc[index] = {'x': 0, 'y': 0, 'op': '', 'z': 0}
self.row = index
self.col = 'x'
self.refresh_ledger()
self.get_row()
def clear(self):
"""clear ledger and backing dataframe"""
self.df = | pd.DataFrame(columns=['x', 'y', 'op', 'z']) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Author : <NAME>
Date : 2022-02-03
Purpose: Parse tracy JSON files and produce summary .xlsx sheet.
"""
import argparse
from typing import NamedTuple
import json, pathlib, time
import pandas as pd
class Args(NamedTuple):
""" Command-line arguments """
json_file_path: pathlib.Path
output_dir: bool
input_type: bool
csv: bool
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Parse tracy JSONs and produce a summary excel sheet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('json_file_path',
metavar='json_file_path',
type=pathlib.Path,
help='Path of directory containing tracy JSONs (or directories of gene JSONs)')
parser.add_argument('-o',
'--output_dir',
help="flag whether directory 'output' will be created",
action='store_false')
parser.add_argument('-i',
'--input_type',
help="flag whether Michael put genes into individual folders",
action='store_false')
parser.add_argument('--csv',
help="flag whether .csv files will be produced for each gene (mostly for debugging)",
action='store_true')
args = parser.parse_args()
return Args(args.json_file_path, args.output_dir, args.input_type, args.csv)
# --------------------------------------------------
def main() -> None:
start_main = time.time()
# define args
args = get_args()
json_file_path_arg = args.json_file_path.resolve()
output_dir_arg = args.output_dir
input_type_arg = args.input_type
csv_arg = args.csv
# check output_dir flag, if true: make an output dir
if output_dir_arg:
output_flag = 'tracy_json_parse'
output_path = json_file_path_arg.joinpath(f"{output_flag}_output-{time.strftime('%Y_%m_%d-%H%M%S', time.localtime(time.time()))}")
output_path.mkdir(parents=True, exist_ok=True)
else:
output_path = json_file_path_arg
sample_list = {
'#reference': {
'aliases': ['reference', 'ref'],
'species': '#ref'
},
'AliBarber': {
'aliases': ['alibarber'],
'species': 'Bos taurus'
},
'Cochise': {
'aliases': ['cochise'],
'species': 'Bos taurus'
},
'Sansao': {
'aliases': ['sansao'],
'species': 'Bos taurus'
},
'Slugger': {
'aliases': ['slugger', 'slogger'],
'species': 'Bos taurus'
},
'LLNKune': {
'aliases': ['llnkune', 'llnkure'],
'species': 'Bos indicus'
},
'Nagaki': {
'aliases': ['nagaki'],
'species': 'Bos indicus'
},
'Raider': {
'aliases': ['raider'],
'species': 'Bos indicus'
},
'Renditium': {
'aliases': ['renditium', 'rendition'],
'species': 'Bos indicus'
}
}
list_of_genotype_DataFrames = {}
# check input_type flag, if true:
if input_type_arg:
for gene_dir in json_file_path_arg.iterdir():
if gene_dir.is_dir():
for sample_json in gene_dir.glob('*.json'):
query_path = gene_dir
list_of_genotype_DataFrames.update({gene_dir.stem: generate_genotype_DataFrame(sample_list, gene_dir.stem, query_path, output_path, csv_arg)})
else:
list_of_genes = list(set([gene.stem.split('_')[0] for gene in json_file_path_arg.glob('*.json')]))
for gene in list_of_genes:
query_path = json_file_path_arg
list_of_genotype_DataFrames.update({gene: generate_genotype_DataFrame(sample_list, gene, query_path, output_path, csv_arg)})
write_genotype_DataFrames_to_excel(list_of_genotype_DataFrames, output_path)
print_runtime(f'Produced summary tracy genotyping excel file.')
def generate_genotype_DataFrame(sample_list, gene_ID, query_path, output_path, csv_arg):
"""
Function will generate a genotype DataFrame with genotyping template
"""
SNP_data = generate_template(sample_list)
for sample_json in query_path.glob(f'{gene_ID}*.json'):
gene = sample_json.stem.split('_')[0]
sample = validate_sample_name(sample_json.stem.split('_')[1], sample_list)
results = parse_json(sample_json, gene, sample)
SNP_data[sample]['seq'] = True
SNP_data['#reference']['seq'] = True
for i in results[0]:
SNP_data[sample].update(i)
for i in results[1]:
SNP_data['#reference'].update(i)
SNP_DataFrame = | pd.DataFrame.from_dict(SNP_data, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from scipy.ndimage import gaussian_filter
from scipy import signal
import cv2
## plot the data classes as a circle to view the unbalance between the classes
def plot_num_of_classes(labels):
plt.figure(figsize=(20, 10))
my_circle = plt.Circle((0,0), 0.7, color="white")
plt.pie(labels, labels= ['n', 'q', 'v', 's', 'f'], colors=
['red', 'green', 'blue', 'skyblue', 'orange'], autopct='%1.1f%%')
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
## split the data into train, validation, and test
## validation and test each class has 100 samples
## input_size of the sample in the dataset
def split(dataset, input_size=256):
input_size = input_size + 1 # plus the label
test_label = []
validation_label = []
validation = []
test = []
train_index = []
num_train_index = 0
## seed the split algorithm
np.random.seed(0)
## for validation and test dataset which has 100 samples for each class
num_classes = 5
for i in range(num_classes):
ind = np.where(dataset[:, -1] == i)[0] ## return index
print('number of class :', len(ind))
idx = np.random.permutation(ind)
rest = idx[:200]
train_idx = idx[200:]
print('number of train data per class', len(train_idx))
train_index.append(train_idx)
# train.append(ecgsignal[train_idx, :])
validation.append(dataset[rest[:100], :-1])
test.append(dataset[rest[100:], :-1])
validation_label.append(dataset[rest[:100], -1])
test_label.append(dataset[rest[100:], -1])
# train_label.append(target[train_idx]) ### training data needs to be noted the number of dataset
num_train_index += len(train_index[i])
print(len(train_index[i]))
train = np.zeros((num_train_index, input_size))
train_label = np.zeros((num_train_index, 1))
train[0:len(train_index[0]), :] = dataset[train_index[0], :]
culm = len(train_index[0]) + len(train_index[1])
train[len(train_index[0]):culm, :] = dataset[train_index[1], :]
train[culm:(culm + len(train_index[2])), :] = dataset[train_index[2], :]
culm += len(train_index[2])
train[culm:(culm + len(train_index[3])), :] = dataset[train_index[3], :]
culm += len(train_index[3])
train[culm:(culm + len(train_index[4])), :] = dataset[train_index[4], :]
culm = len(train_index[0]) + len(train_index[1])
train_label[0:len(train_index[0]), 0] = dataset[train_index[0], -1]
train_label[len(train_index[0]):culm, 0] = dataset[train_index[1], -1]
train_label[culm:(culm + len(train_index[2])), 0] = dataset[train_index[2], -1]
culm += len(train_index[2])
train_label[culm:(culm + len(train_index[3])), 0] = dataset[train_index[3], -1]
culm += len(train_index[3])
train_label[culm:(culm + len(train_index[4])), 0] = dataset[train_index[4], -1]
validation = np.concatenate(validation, 0)
test = np.concatenate(test, 0)
test_label = np.concatenate(test_label, 0)
validation_label = np.concatenate(validation_label, 0)
return (train, train_label), (validation, validation_label), (test, test_label)
## upsample function
## due to the unbalance of the classes
### according to https://elitedatascience.com/imbalanced-classes
def upsample(train, upsample_size=10000):
df_1 = train[train[:, -1] == 1]
df_2 = train[train[:, -1] == 2]
df_3 = train[train[:, -1] == 3]
df_4 = train[train[:, -1] == 4]
idxs = np.random.choice(train[train[:, -1] == 0].shape[0], upsample_size, replace=False)
df_0 = train[idxs]
#df_0 = (train[train_label == 0]).sample(n=20000, random_state=42)
df_1 = pd.DataFrame(df_1)
df_2 = pd.DataFrame(df_2)
df_3 = pd.DataFrame(df_3)
df_4 = | pd.DataFrame(df_4) | pandas.DataFrame |
import importlib
import pandas as pd
from pandas import compat
from .parser import Parser
import logging
class UberModel(object):
"""
Collection of static methods used across all the ubertool models.
"""
def __init__(self):
"""Main utility class for building Ubertool model classes for model execution."""
super(UberModel, self).__init__()
self.name = self.__class__.__name__
self.pd_obj = None
self.pd_obj_exp = None
self.pd_obj_out = None
def validate_input_names(self, model_inputs, user_inputs):
"""
Compare the user supplied inputs with the ModelInputs() class attributes, ensuring they match by name
:param model_inputs: ModelInputs() class instance
:return: Boolean
"""
# Create temporary DataFrame where each column name is the same as ModelInputs attributes
df = pd.DataFrame()
for input_param in model_inputs.__dict__:
df[input_param] = getattr(self, input_param)
keys_a = set(df.keys())
keys_b = set(self.pd_obj.keys())
extras = keys_b - keys_a
n_extras = len(extras)
print(f'There are {n_extras} extra keys.')
if(n_extras > 0): print(extras)
missing = keys_a - keys_b
n_missing = len(missing)
print(f'There are {n_missing} missing keys.')
if(n_missing > 0): print(missing)
# Compare column names of temporary DataFrame (created above) to user-supply DataFrame from JSON
#if df.columns.sort_values().equals(user_inputs.columns.sort_values()):
if n_extras >= 0 and n_missing == 0:
print('Input parameters match what is expected.')
print(set(df.keys()))
return True
else:
print('Inputs parameters do not have all required inputs.')
msg_err1 = "Inputs parameters do not have all required inputs. Please see API documentation.\n"
msg_err2 = "Expected: \n{}\n".format(df.columns.sort_values())
msg_err3 = "Received: \n{}\n".format(self.pd_obj.columns.sort_values())
missing = [item for item in keys_a if item not in keys_b]
msg_missing = "missing the following field(s): \n{}\n".format(missing)
extras = [item for item in keys_b if item not in keys_a]
msg_extras = "the following extra field(s) were found: \n{}\n".format(extras)
print(msg_err1 + msg_err2 + msg_err3 + msg_missing + msg_extras)
raise ValueError(msg_err1 + msg_err2 + msg_err3 + msg_missing + msg_extras)
def coerce_input_dtype(self, incoming_dtype, coerce_dtype, input_series):
#logging.info(incoming_dtype)
if coerce_dtype == 'object':
return input_series.astype('object')
elif coerce_dtype == 'float64':
if incoming_dtype == 'object':
#coerces strings to np.nans
return pd.to_numeric(input_series, errors='coerce')
elif incoming_dtype == 'float64':
return input_series
else:
return input_series.astype('float64')
elif coerce_dtype == 'int64' or 'int32':
if incoming_dtype == 'object':
#coerces strings to np.nans
return pd.to_numeric(input_series, errors='coerce', downcast='int64')
else:
return input_series.astype('int64')
else:
print("dtype of {} is {}\n"
"This format is not handled by UberModel.coerce_input_dtype()".format(input_series.name, coerce_dtype))
return input_series
@staticmethod
def convert_index(df_in):
""" Attempt to covert indices of input DataFrame to duck typed dtype """
parser = Parser(df_in)
df = parser.convert_axes()
return df
def populate_inputs(self, df_in):
"""
Validate and assign user-provided model inputs to their respective class attributes
:param df_in: Pandas DataFrame object of model input parameters
"""
df_user = self.convert_index(df_in)
# mod_name = self.name.lower() + '.' + self.name.lower() + '_exe'
mod_name = "pram_flask.ubertool.ubertool." + self.name.lower() + "." + self.name.lower() + '_exe'
#print(mod_name)
try:
# Import the model's input class (e.g. TedInputs) to compare user supplied inputs to
module = importlib.import_module(mod_name)
model_inputs_class = getattr(module, self.name + "Inputs")
model_inputs = model_inputs_class()
except ValueError as err:
logging.info(mod_name)
logging.info(err.args)
try:
if self.validate_input_names(model_inputs, df_user):
# If the user-supplied DataFrame has the same column names as required by ModelInputs...
# set each Series in the DataFrame to the corresponding ModelInputs attribute (member variable)
# user_inputs_df = self._sanitize(df)
for column in df_user.columns:
coerce_dtype = str(getattr(model_inputs, column).dtype)
df_series = df_user[column]
initial_dtype = str(df_series.dtype)
#if initial_dtype != coerce_dtype:
#logging.info('var:' + column + ' coerce to: ' + coerce_dtype + ' from: ' + initial_dtype)
setattr(self, column, self.coerce_input_dtype(initial_dtype, coerce_dtype, df_series))
except ValueError as err:
logging.info('input validation problem')
logging.info(err.args)
def populate_outputs(self):
# Create temporary DataFrame where each column name is the same as *ModelName*Outputs attributes
"""
Create and return Model Output DataFrame where each column name is a model output parameter
:param model: string, name of the model as referred to in class names (e.g. terrplant, sip, stir, etc..)
:param model_obj: class instance, instance of the model class for which the
:return:
"""
# Import the model's output class (e.g. TerrplantOutputs) to create a DF to store the model outputs in
mod_name = self.name.lower() + '.' + self.name.lower() + '_exe'
#mod_name = "ubertool_ecorest.ubertool.ubertool." + self.name.lower() + "." + self.name.lower() + '_exe'
module = importlib.import_module(mod_name)
model_outputs = getattr(module, self.name + "Outputs")
model_outputs_obj = model_outputs()
df = pd.DataFrame()
for input_param in model_outputs_obj.__dict__:
df[input_param] = getattr(self, input_param)
setattr(self, input_param, df[input_param])
return df
def fill_output_dataframe(self):
""" Combine all output properties into Pandas Dataframe """
for column in self.pd_obj_out.columns:
try:
output = getattr(self, column)
#print(output)
if isinstance(output, pd.Series):
# Ensure model output is a Pandas Series. Only Series can be
# reliably put into a Pandas DataFrame.
self.pd_obj_out[column] = output
else:
print('"{}" is not a Pandas Series. Returned outputs must be a Pandas Series'.format(column))
except:
print("output dataframe error on " + column)
#print('output dataframe')
#print(self.pd_obj_out)
return
def get_dict_rep(self):
"""
Convert DataFrames to dictionary, returning a tuple (inputs, outputs, exp_out)
:param model_obj: model instance
:return: (dict(input DataFrame), dict(outputs DataFrame), dict(expected outputs DataFrame))
"""
name = self.name
if self.name.lower() == "varroapop":
try:
return self.to_dict(self.pd_obj), \
self.pd_obj_out.to_dict('list'), \
self.pd_obj_exp.to_dict('list')
except AttributeError:
return self.to_dict(self.pd_obj), \
self.pd_obj_out.to_dict('list'), \
{}
elif self.name.lower() == "sam":
try:
return self.to_dict(self.pd_obj), \
self.pd_obj_out, \
self.pd_obj_exp
except AttributeError as ex:
return self.to_dict(self.pd_obj), \
{"error": str(ex)}, \
{}
try:
return self.to_dict(self.pd_obj), \
self.to_dict(self.pd_obj_out), \
self.to_dict(self.pd_obj_exp)
except AttributeError:
return self.to_dict(self.pd_obj), \
self.to_dict(self.pd_obj_out), \
{}
@staticmethod
def to_dict(df):
"""
This is an override of the the pd.DataFrame.to_dict() method where the keys in
return dictionary are cast to strings. This fixes an error where duck typing would
sometimes allow non-String keys, which fails when Flask serializes the dictionary to
JSON string to return the HTTP response.
Original method returns: dict((str(k), v.to_dict()) for k, v in compat.iteritems(df))
:param df:
:return:
"""
out = {}
for k, v in df.items():
col = k
for row, value in v.items():
out[col] = {str(row): value}
return out
class ModelSharedInputs(object):
def __init__(self):
"""
Container for the shared model inputs amongst most models (e.g. version, chemical name, & PC Code)
"""
super(ModelSharedInputs, self).__init__()
self.csrfmiddlewaretoken = pd.Series([], dtype="object")
self.version = pd.Series([], dtype="object")
self.chemical_name = pd.Series([], dtype="object")
self.pc_code = | pd.Series([], dtype="object") | pandas.Series |
# %%
import pandas as pd
import numpy as np
import requests # http ์์ฒญ ๋ชจ๋
from bs4 import BeautifulSoup # ์น ํฌ๋กค๋ง ๋ชจ๋
from urllib.request import urlopen # ์น ํฌ๋กค๋ง ๋ชจ๋
from urllib.parse import quote_plus, urlencode
from pandas import DataFrame, Series # ์๋ฆฌ์ฆ, ๋ฐ์ดํฐํ๋ ์ ๋ชจ๋
from pandas import ExcelFile, ExcelWriter # ์์
์ฝ๊ธฐ, ์ฐ๊ธฐ ๋ชจ๋
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from pandas import DataFrame
from django.conf import settings
# %%
| pd.set_option("display.max_columns", 15) | pandas.set_option |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2015, IBM Corp.
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
# Python 2 Compatibility
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import dict
from builtins import zip
from builtins import str
from builtins import int
from future import standard_library
standard_library.install_aliases()
from collections import OrderedDict
import itertools
import math
import warnings
from numbers import Number
import pandas as pd
import numpy as np
import six
import ibmdbpy
from ibmdbpy.utils import chunklist
"""
Statistics module for IdaDataFrames
"""
def _numeric_stats(idadf, stat, columns):
"""
Compute various stats from one or several numerical columns of an IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
Data source.
stat : str
Name of the statistic to be computed.
columns : str or list of str
Name of the columns that belong to the IdaDataFrame.
Returns
-------
Tuple or float64
One value for each column. For a one column input a float64 value is returned except for median
Notes
-----
Currently, the following functions are supported: count, mean, median, std,
var, min, max, sum. Should return a tuple. Only available for numerical
columns.
"""
# Calculate count, mean, median, std, var, min, max
if isinstance(columns, six.string_types):
columns = [columns]
if isinstance(stat, six.string_types):
if stat == "count":
select_string = 'COUNT(\"' + '\"), COUNT(\"'.join(columns) + '\")'
elif stat == "mean":
select_string = ('AVG(CAST(\"' +
'\" AS FLOAT)), AVG(CAST(\"'.join(columns) +
'\" AS FLOAT))')
elif stat == "median":
return _get_percentiles(idadf, 0.5, columns).values[0]
elif stat == "std":
tuple_count = _numeric_stats(idadf, 'count', columns)
# in case of only one column, ensure tuple_count is iterable
if len(columns) == 1:
tuple_count = [tuple_count]
count_dict = dict((x, y) for x, y in zip(columns, tuple_count))
agg_list = []
for column in columns:
agg_list.append("STDDEV(\"%s\")*(SQRT(%s)/SQRT(%s))"
%(column, count_dict[column], count_dict[column]-1))
select_string = ', '.join(agg_list)
elif stat == "var":
tuple_count = _numeric_stats(idadf, 'count', columns)
if len(columns) == 1:
tuple_count = [tuple_count]
count_dict = dict((x, int(y)) for x, y in zip(columns, tuple_count))
agg_list = []
for column in columns:
agg_list.append("VAR(\"%s\")*(%s.0/%s.0)"
%(column, count_dict[column], count_dict[column]-1))
select_string = ', '.join(agg_list)
elif stat == "min":
select_string = 'MIN(\"' + '\"), MIN(\"'.join(columns) + '\")'
elif stat == "max":
select_string = 'MAX(\"' + '\"), MAX(\"'.join(columns) + '\")'
elif stat == "sum":
select_string = 'SUM(\"' + '\"), SUM(\"'.join(columns) + '\")'
name = idadf.internal_state.current_state
return idadf.ida_query("SELECT %s FROM %s" %(select_string, name)).values[0]
def _get_percentiles(idadf, percentiles, columns):
"""
Return percentiles over all entries of a column or list of columns in the
IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
percentiles: Float or list of floats.
All values in percentiles must be > 0 and < 1
columns: String or list of string
Name of columns belonging to the IdaDataFrame.
Returns
-------
DataFrame
"""
if isinstance(columns, six.string_types):
columns = [columns]
if isinstance(percentiles, Number):
percentiles = [percentiles]
name = idadf.internal_state.current_state
# Get na values for each columns
tuple_na = _get_number_of_nas(idadf, columns)
nrow = idadf.shape[0]
data = pd.DataFrame()
for index_col, column in enumerate(columns):
nb_not_missing = nrow - tuple_na[index_col]
indexes = [float(x)*float(nb_not_missing-1) + 1 for x in percentiles]
low = [math.floor(x) for x in indexes]
high = [math.ceil(x) for x in indexes]
tuplelist = []
i = 0
for flag in [((x+1) == y) for x, y in zip(low, high)]:
if flag:
tuplelist.append((i, i+1))
i += 2
else:
tuplelist.append((i, i))
i += 1
unique = low + high
unique = set(unique)
unique = sorted(unique)
unique = [str(x) for x in unique]
indexes_string = ",".join(unique)
df = idadf.ida_query("(SELECT \""+column+"\" AS \""+column+"\" FROM (SELECT "+
"ROW_NUMBER() OVER(ORDER BY \""+column+"\") as rn, \""+
column + "\" FROM (SELECT * FROM " + name +
")) WHERE rn in("+ indexes_string +"))")
#indexvalues = list(df[df.columns[0]])
indexvalues = list(df)
#import pdb ; pdb.set_trace()
#print(tuplelist)
#print(indexvalues)
indexfinal = [(float(str(indexvalues[x[0]]))+float(str(indexvalues[x[1]])))/2 for x in tuplelist]
new_data = pd.DataFrame(indexfinal)
data[column] = (new_data.T).values[0]
percentile_names = [x for x in percentiles]
data.index = percentile_names
return data
def _categorical_stats(idadf, stat, columns):
# TODO:
"""
Computes various stats from one or several categorical columns of the IdaDataFrame.
This is not implemented.
Parameters
----------
idadf : IdaDataFrame
stat : str
Name of the statistic function to be computed.
columns : str or list of str
Name of columns belonging to the IdaDataFrame.
Returns
-------
Tuple.
"""
# Calculates count, unique, top, freq
raise NotImplementedError("TODO")
def _get_number_of_nas(idadf, columns):
"""
Return the count of missing values for a list of columns in the IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
columns : str or list
One column as a string or a list of columns in the idaDataFrame.
Returns
-------
Tuple
"""
if isinstance(columns, six.string_types):
columns = [columns]
name = idadf.internal_state.current_state
query_list = list()
for column in columns:
string = ("(SELECT COUNT(*) AS \"" + column + "\" FROM " +
name + " WHERE \"" + column + "\" IS NULL)")
query_list.append(string)
query_string = ', '.join(query_list)
# TODO: Improvement idea : Get nrow (shape) and substract by count("COLUMN")
return idadf.ida_query("SELECT * FROM " + query_string, first_row_only = True)
def _count_level(idadf, columnlist=None):
"""
Count distinct levels across a list of columns of an IdaDataFrame grouped
by themselves.
Parameters
----------
columnlist : list
List of column names that exist in the IdaDataFrame. By default, these
are all columns in IdaDataFrame.
Returns
-------
Tuple
Notes
-----
The function assumes the follwing:
* The columns given as parameter exists in the IdaDataframe.
* The parameter columnlist is an optional list.
* Columns are referenced by their own name (character string).
"""
if columnlist is None:
columnlist = idadf.columns
name = idadf.internal_state.current_state
query_list = []
for column in columnlist:
# Here cast ?
query_list.append("(SELECT COUNT(*) AS \"" + column +"\" FROM (" +
"SELECT \"" + column + "\" FROM " + name +
" GROUP BY \"" + column + "\" ))")
#query_list.append("(SELECT CAST(COUNT(*) AS BIGINT) AS \"" + column +"\" FROM (" +
# "SELECT \"" + column + "\" FROM " + name + " ))")
query_string = ', '.join(query_list)
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
return idadf.ida_query("SELECT " + column_string + " FROM " + query_string, first_row_only = True)
def _count_level_groupby(idadf, columnlist=None):
"""
Count distinct levels across a list of columns in the IdaDataFrame grouped
by themselves. This is used to get the dimension of the resulting cross table.
Parameters
----------
columnlist : list
List of column names existing in the IdaDataFrame. By default, these
are columns of self
Returns
-------
Tuple
Notes
-----
The function assumes the follwing:
* The columns given as parameter exists in the IdaDataframe.
* The parameter columnlist is a optional and is a list.
* Columns are referenced by their own name (character string).
"""
if columnlist is None:
columnlist = idadf.columns
name = idadf.internal_state.current_state
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
query = (("SELECT COUNT(*) FROM (SELECT %s, COUNT(*) as COUNT "+
"FROM %s GROUP BY %s ORDER BY %s, COUNT ASC)")
%(column_string, name, column_string, column_string))
return idadf.ida_query(query, first_row_only = True)
# TODO: REFACTORING: factors function should maybe return a tuple ?
def _factors_count(idadf, columnlist, valuelist=None):
"""
Count non-missing values for all columns in a list (valuelist) over the
IdaDataFrame grouped by a list of columns(columnlist).
Parameters
----------
columnlist : list
List of column names that exist in self.
valuelist : list
List of column names that exist in self.
Assumptions
-----------
* The columns given as parameter exists in the IdaDataframe
* The parameter columnlist is a optional and is a list
* Columns are referenced by their own name (character string)
Returns
-------
DataFrame
"""
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
name = idadf.internal_state.current_state
if valuelist is None:
query = (("SELECT %s, COUNT(*) as COUNT FROM %s GROUP BY %s ORDER BY %s, COUNT ASC")
%(column_string, name, column_string, column_string))
else:
agg_list = []
for value in valuelist:
query = "COUNT(\"%s\") as \"%s\""%(value,value)
agg_list.append(query)
agg_string = ', '.join(agg_list)
value_string = '\"' + '", "'.join(valuelist) + '\"'
query = (("SELECT %s,%s FROM %s GROUP BY %s ORDER BY %s,%s ASC")
%(column_string, agg_string, name, column_string, column_string, value_string))
return idadf.ida_query(query)
def _factors_sum(idadf, columnlist, valuelist):
"""
Compute the arithmetic sum over for all columns in a list (valuelist)
over the IdaDataFrame grouped by a list of columns (columnlist).
Parameters
----------
columnlist : list
List of column names that exist in self.
valuelist : list
List of column names that exist in self.
Assumptions
-----------
* The columns given as parameter exists in the IdaDataframe
* The parameter columnlist is a optional and is a list
* Columns are referenced by their own name (character string)
Returns
-------
DataFrame
"""
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
name = idadf.internal_state.current_state
agg_list = []
for value in valuelist:
query = "SUM(\"%s\") as \"%s\""%(value, value)
agg_list.append(query)
agg_string = ', '.join(agg_list)
value_string = '\"' + '", "'.join(valuelist) + '\"'
query = (("SELECT %s,%s FROM %s GROUP BY %s ORDER BY %s,%s ASC")
%(column_string, agg_string, name, column_string, column_string, value_string))
return idadf.ida_query(query)
def _factors_avg(idadf, columnlist, valuelist):
"""
Compute the arithmetic average for all columns in a list (valuelist) over
the IdaDataFrame grouped by a list of columns (columnlist).
Parameters
----------
columnlist : list
List of column names that exist in self.
valuelist : list
List of column names that exist in self.
Assumptions
-----------
* The columns given as parameter exists in the IdaDataframe
* The parameter columnlist and valuelist are array-like
* Columns are referenced by their own name (character string)
Returns
-------
DataFrame
"""
column_string = '\"' + '\", \"'.join(columnlist) + '\"'
name = idadf.internal_state.current_state
agg_list = []
for value in valuelist:
agg = (("CAST(AVG(CAST(\"%s\" AS DECIMAL(10,6))) AS DECIMAL(10,6)) \"%s\"")
%(value, value))
agg_list.append(agg)
agg_string = ', '.join(agg_list)
value_string = '\"' + '", "'.join(valuelist) + '\"'
query = (("SELECT %s,%s FROM %s GROUP BY %s ORDER BY %s,%s ASC")
%(column_string, agg_string, name, column_string, column_string, value_string))
return idadf.ida_query(query)
###############################################################################
### Pivot Table
###############################################################################
def pivot_table(idadf, values=None, columns=None, max_entries=1000, sort=None,
factor_threshold=None, interactive=False, aggfunc='count'):
"""
See IdaDataFrame.pivot_table
"""
# TODO : Support index
if aggfunc.lower() not in ['count', 'sum', 'avg', 'average', 'mean']:
print("For now only 'count' and 'sum' and 'mean' as aggregation function is supported")
return
if (columns is None) & (factor_threshold is None):
print("Please provide parameter factor_threshold for automatic selection of columns")
return
if isinstance(columns, six.string_types):
columns = [columns]
if isinstance(values, six.string_types):
values = [values]
if (values is None) and (aggfunc.lower() != "count"):
raise ValueError("Cannot aggregate using another function than count if" +
"no value(s) was/were given")
####### Identify automatically categorical fields #########
# Load distinct count for each and evaluate categorical or not
data = idadf._table_def(factor_threshold) #
if columns is None:
factors = data.loc[data['VALTYPE'] == "CATEGORICAL", ['TYPENAME', 'FACTORS']]
if len(factors) == 0:
print("No categorical columns to tabulate")
return
else:
factors = data.loc[columns, ['TYPENAME', 'FACTORS']]
if sort == "alpha":
factors.sort_index(inplace=True, ascending=1)
elif sort == "factor":
factors.sort(['FACTORS'], inplace=True, ascending=1)
if columns is None:
print("Automatic selection of columns :", factors.index.values)
columns = factors.index.values
nb_row = _count_level_groupby(idadf, factors.index.values)[0] * len(columns)
nb_col = len(factors.index.values)
nb_entries = nb_row * nb_col
if nb_entries > max_entries: # Overflow risk
print("Number of entries :", nb_entries)
print("Value counts for factors:")
factor_values = factors[['FACTORS']]
factor_values.columns = ['']
print(factor_values.T)
print("WARNING :Attempt to make a table with more than " +
str(max_entries)+ " elements. Either increase max_entries " +
"parameter or remove columns with too many levels.")
return
print("Output dataframe has dimensions", nb_row, "x", (nb_col+1))
if interactive is True:
display_yes = ibmdbpy.utils.query_yes_no("Do you want to download it in memory ?")
if not display_yes:
return
categorical_columns = list(factors.index)
if aggfunc.lower() == 'count':
dataframe = _factors_count(idadf, categorical_columns, values) # Download dataframe
if aggfunc.lower() == 'sum':
dataframe = _factors_sum(idadf, categorical_columns, values) # Download dataframe
if aggfunc.lower() in ['avg', 'average', 'mean']:
dataframe = _factors_avg(idadf, categorical_columns, values) # Download dataframe
if values is not None:
agg_values = values
else: agg_values = aggfunc.upper()
if isinstance(agg_values, six.string_types):
agg_values = [agg_values]
dataframe.columns = categorical_columns + agg_values # Name the aggregate column
# Formatting result
if len(agg_values) == 1:
dataframe[None] = agg_values[0]
else:
catdataframe = dataframe[categorical_columns]
dataframe = catdataframe.join(dataframe[agg_values].stack().reset_index(1))
dataframe['level_1'] = | pd.Categorical(dataframe['level_1'], agg_values) | pandas.Categorical |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = | Series([4, 5, 6]) | pandas.Series |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for Population and Observation methods in the Data Commons Python
Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
from pandas.util.testing import assert_series_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
import zlib
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json and allowed constraining properties
req = kwargs['json']
headers = kwargs['headers']
constrained_props = [
{
'property': 'placeOfBirth',
'value': 'BornInOtherStateInTheUnitedStates'
},
{
'property': 'age',
'value': 'Years5To17'
}
]
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_populations.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_populations']\
and req['population_type'] == 'Person'\
and req['pvs'] == constrained_props:
if req['dcids'] == ['geoId/06085', 'geoId/4805000']:
# Response returned when querying for multiple valid dcids.
res_json = json.dumps([
{
'dcid': 'geoId/06085',
'population': 'dc/p/crgfn8blpvl35'
},
{
'dcid': 'geoId/4805000',
'population': 'dc/p/f3q9whmjwbf36'
}
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response returned when querying for a dcid that does not exist.
res_json = json.dumps([
{
'dcid': 'geoId/06085',
'population': 'dc/p/crgfn8blpvl35'
},
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid'] or req['dcids'] == []:
# Response returned when both given dcids do not exist or no dcids are
# provided to the method.
res_json = json.dumps([])
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_observations
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_observations']\
and req['measured_property'] == 'count'\
and req['stats_type'] == 'measuredValue'\
and req['observation_date'] == '2018-12'\
and req['observation_period'] == 'P1M'\
and req['measurement_method'] == 'BLSSeasonallyAdjusted':
if req['dcids'] == ['dc/p/x6t44d8jd95rd', 'dc/p/lr52m1yr46r44', 'dc/p/fs929fynprzs']:
# Response returned when querying for multiple valid dcids.
res_json = json.dumps([
{
'dcid': 'dc/p/x6t44d8jd95rd',
'observation': '18704962.000000'
},
{
'dcid': 'dc/p/lr52m1yr46r44',
'observation': '3075662.000000'
},
{
'dcid': 'dc/p/fs929fynprzs',
'observation': '1973955.000000'
}
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/p/x6t44d8jd95rd', 'dc/MadDcid']:
# Response returned when querying for a dcid that does not exist.
res_json = json.dumps([
{
'dcid': 'dc/p/x6t44d8jd95rd',
'observation': '18704962.000000'
},
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid'] or req['dcids'] == []:
# Response returned when both given dcids do not exist or no dcids are
# provided to the method.
res_json = json.dumps([])
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_place_obs
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_place_obs']\
and req['place_type'] == 'City'\
and req['observation_date'] == '2017'\
and req['population_type'] == 'Person'\
and req['pvs'] == constrained_props:
res_json = json.dumps({
'places': [
{
'name': '<NAME>',
'place': 'geoId/4247344',
'populations': {
'dc/p/pq6frs32sfvk': {
'observations': [
{
'marginOfError': 39,
'measuredProp': 'count',
'measuredValue': 67,
}
],
}
}
}
]
})
return MockResponse({
'payload': base64.b64encode(zlib.compress(res_json.encode('utf-8')))
}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
def get_request_mock(*args, **kwargs):
""" A mock GET requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for get requests to get_pop_obs.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_pop_obs'] + '?dcid=geoId/06085':
# Response returned when querying for a city in the graph.
res_json = json.dumps({
'name': 'Mountain View',
'placeType': 'City',
'populations': {
'dc/p/013ldrstf6lnf': {
'numConstraints': 6,
'observations': [
{
'marginOfError': 119,
'measuredProp': 'count',
'measuredValue': 225,
'measurementMethod': 'CensusACS5yrSurvey',
'observationDate': '2014'
}, {
'marginOfError': 108,
'measuredProp': 'count',
'measuredValue': 180,
'measurementMethod': 'CensusACS5yrSurvey',
'observationDate': '2012'
}
],
'popType': 'Person',
'propertyValues': {
'age': 'Years16Onwards',
'gender': 'Male',
'income': 'USDollar30000To34999',
'incomeStatus': 'WithIncome',
'race': 'USC_HispanicOrLatinoRace',
'workExperience': 'USC_NotWorkedFullTime'
}
}
}
})
return MockResponse({
'payload': base64.b64encode(zlib.compress(res_json.encode('utf-8')))
}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPopulations(unittest.TestCase):
""" Unit tests for get_populations. """
_constraints = {
'placeOfBirth': 'BornInOtherStateInTheUnitedStates',
'age': 'Years5To17'
}
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_populations with proper dcids returns valid results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Call get_populations
populations = dc.get_populations(['geoId/06085', 'geoId/4805000'], 'Person',
constraining_properties=self._constraints)
self.assertDictEqual(populations, {
'geoId/06085': 'dc/p/crgfn8blpvl35',
'geoId/4805000': 'dc/p/f3q9whmjwbf36'
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_populations with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Call get_populations
pops_1 = dc.get_populations(['geoId/06085', 'dc/MadDcid'], 'Person',
constraining_properties=self._constraints)
pops_2 = dc.get_populations(['dc/MadDcid', 'dc/MadderDcid'], 'Person',
constraining_properties=self._constraints)
# Verify the results
self.assertDictEqual(pops_1, {'geoId/06085': 'dc/p/crgfn8blpvl35'})
self.assertDictEqual(pops_2, {})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_populations with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
pops = dc.get_populations(
[], 'Person', constraining_properties=self._constraints)
self.assertDictEqual(pops, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_multiple_dcids(self, post_mock):
""" Calling get_populations with a Pandas Series and proper dcids returns
a Pandas Series with valid results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get the input and expected output
dcids = pd.Series(['geoId/06085', 'geoId/4805000'])
expected = pd.Series(['dc/p/crgfn8blpvl35', 'dc/p/f3q9whmjwbf36'])
# Call get_populations
actual = dc.get_populations(
dcids, 'Person', constraining_properties=self._constraints)
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_dcids(self, post_mock):
""" Calling get_populations with a Pandas Series and dcids that do not exist
returns empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get input and expected output
dcids_1 = pd.Series(['geoId/06085', 'dc/MadDcid'])
dcids_2 = pd.Series(['dc/MadDcid', 'dc/MadderDcid'])
expected_1 = pd.Series(['dc/p/crgfn8blpvl35', ''])
expected_2 = | pd.Series(['', '']) | pandas.Series |
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def clean_portfolio(portfolio):
""" Clean the portfolio dataset.
- It makes columns for the channels
- Changes the name of the id column to offer_id
Input:
- portfolio: original dataset
Returns:
- portfolio_clean
"""
portfolio_clean = portfolio.copy()
# Create dummy columns for the channels column
d_chann = pd.get_dummies(portfolio_clean.channels.apply(pd.Series).stack(),
prefix="channel").sum(level=0)
portfolio_clean = pd.concat([portfolio_clean, d_chann], axis=1, sort=False)
portfolio_clean.drop(columns='channels', inplace=True)
# Change column name
portfolio_clean.rename(columns={'id':'offer_id'}, inplace=True)
return portfolio_clean
def clean_profile(profile):
""" Clean the profile dataset.
- Fix the date format
- Change the column name id to customer_id
- Create column to identify customers with demographic data
- Add dummy columns for gender
Input:
- profile: original dataset
Returns:
- profile_clean
"""
profile_clean = profile.copy()
# Transform date from int to datetime
date = lambda x: pd.to_datetime(str(x), format='%Y%m%d')
profile_clean.became_member_on = profile_clean.became_member_on.apply(date)
# Create column that separates customers with valida data
profile_clean['valid'] = (profile_clean.age != 118).astype(int)
# Change the name of id column to customer_id
profile_clean.rename(columns={'id':'customer_id'}, inplace=True)
# Create dummy columns for the gender column
dummy_gender = pd.get_dummies(profile_clean.gender, prefix="gender")
profile_clean = pd.concat([profile_clean, dummy_gender], axis=1, sort=False)
return profile_clean
def clean_transcript(transcript):
""" Clean the transcript dataset.
- Split value in several columns for offers and transactions
- Split event column into sevelar columns
- Change column name person to customer_id
Input:
- transcript: original dataset
Returns:
- transcript_clean
"""
transcript_clean = transcript.copy()
# Split event into several dummy columns
transcript_clean.event = transcript_clean.event.str.replace(' ', '_')
dummy_event = pd.get_dummies(transcript_clean.event, prefix="event")
transcript_clean = pd.concat([transcript_clean, dummy_event], axis=1,
sort=False)
transcript_clean.drop(columns='event', inplace=True)
# Get the offer_id data from the value column
transcript_clean['offer_id'] = [[*v.values()][0]
if [*v.keys()][0] in ['offer id',
'offer_id'] else None
for v in transcript_clean.value]
# Get the transaction amount data from the value column
transcript_clean['amount'] = [np.round([*v.values()][0], decimals=2)
if [*v.keys()][0] == 'amount' else None
for v in transcript_clean.value]
transcript_clean.drop(columns='value', inplace=True)
# Change the name of person column to customer_id
transcript_clean.rename(columns={'person':'customer_id'}, inplace=True)
return transcript_clean
def merge_datasets(portfolio_clean, profile_clean, transcript_clean):
""" Merge the three data sets into one
Input:
- portfolio_clean
- profile_clean
- transcript_clean
Output:
- df: merged dataframe
"""
trans_prof = pd.merge(transcript_clean, profile_clean, on='customer_id',
how="left")
df = | pd.merge(trans_prof, portfolio_clean, on='offer_id', how='left') | pandas.merge |
#!/usr/bin/python3
# import the module
import os
import glob
import pandas as pd
import csv
from sqlalchemy import create_engine
import psycopg2
import config #you need to create this config.py file and update the variables with your database, username and password
import subprocess
import sys
#Note: you need to indicate which directory (e.g. path/to/pur1997) in argv[1]
# Get a database connection
conn_string = "host="+config.HOST+" dbname="+config.DB+" user="+config.username+" password="+config.password
# Get a database connection
conn = psycopg2.connect(conn_string)
# Create a cursor object. Allows us to execute the SQL query
cursor = conn.cursor()
def load_data(schema, table):
sql_command = "SELECT * FROM {}.{};".format(str(schema), str(table))
# Load the data
data = pd.read_sql(sql_command, conn)
return (data)
# Download data that is already uploaded (find where you left off)
chem_df = load_data(config.dpr_schema, config.use_data)
# make lists for filtering the data
already_in = list(chem_df['use_no'])
chem_list = list( | pd.read_csv("/home/bmain/pesticide/chem_com.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# encoding: utf-8
import argparse
import os
import sys
import torch
from torch.backends import cudnn
import numpy as np
import random
sys.path.append('.')
from data import make_data_loader
from model import build_model
from engine.evaluator import do_inference
from config import cfg
from utils.logger import setup_logger
from torch.utils.tensorboard import SummaryWriter
from utils.tensorboard_logger import record_dict_into_tensorboard
def seed_torch(seed=2018):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def eval(cfg, target_set_name="test"):
# prepare dataset
train_loader, val_loader, test_loader = make_data_loader(cfg, is_train=False)
# build model and load parameter
model = build_model(cfg)
if os.path.exists(cfg.TEST.WEIGHT) != True:
if os.path.exists("./pretrained/") != True:
os.makedirs("./pretrained")
os.system("wget -O './pretrained/msa_transformer_model.pth' https://tmp-titan.vx-cdn.com:616/file/613ca738783a8/msa_transformer_model.pth")
model.load_param("Overall", cfg.TEST.WEIGHT) #"Overall", "None"
# pass alphabet to construct batch converter for dataset
train_loader.dataset.get_batch_converter(model.backbone_alphabet)
val_loader.dataset.get_batch_converter(model.backbone_alphabet)
test_loader.dataset.get_batch_converter(model.backbone_alphabet)
# input data_loader
if target_set_name == "train":
input_data_loader = train_loader
elif target_set_name == "valid":
input_data_loader = val_loader
elif target_set_name == "test":
input_data_loader = test_loader
else:
raise Exception("Wrong Dataset Name!")
# build and launch engine for evaluation
Eval_Record = do_inference(cfg,
model,
input_data_loader,
None,
target_set_name=target_set_name,
plot_flag=True)
# logging with tensorboard summaryWriter
model_epoch = cfg.TEST.WEIGHT.split('/')[-1].split('.')[0].split('_')[-1]
model_iteration = len(train_loader) * int(model_epoch) if model_epoch.isdigit() == True else 0
writer_test = SummaryWriter(cfg.SOLVER.OUTPUT_DIR + "/summary/eval_" + target_set_name)
record_dict_into_tensorboard(writer_test, Eval_Record, model_iteration)
writer_test.close()
# record in csv
csv_name = "metrics"
import pandas as pd
if Eval_Record.get("Contact Prediction") is not None:
sheet_name = "Contact-Prediction"
col_names = ["dataset"]
value = [cfg.DATA.DATASETS.NAMES]
for k in Eval_Record["Contact Prediction"]["Precision"].keys():
col_names.append(k)
value.append(Eval_Record["Contact Prediction"]["Precision"][k])
df = pd.DataFrame([value], columns=col_names)
xls_filename = os.path.join(cfg.SOLVER.OUTPUT_DIR, "{}.xlsx".format(csv_name))
if os.path.exists(xls_filename) != True:
with pd.ExcelWriter(xls_filename, engine="openpyxl", mode='w') as writer:
df.to_excel(writer, sheet_name=sheet_name)
else:
with | pd.ExcelWriter(xls_filename, engine="openpyxl", mode='a') | pandas.ExcelWriter |
from .nwb_interface import NWBDataset
from .chop import ChopInterface, chop_data, merge_chops
from itertools import product
import numpy as np
import pandas as pd
import h5py
import sys
import os
import logging
logger = logging.getLogger(__name__)
PARAMS = {
'mc_maze': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 100,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 70,
},
},
'mc_rtt': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'finger_vel',
'lag': 140,
'make_params': {
'align_field': 'start_time',
'align_range': (0, 600),
'allow_overlap': True,
},
'eval_make_params': {
'align_field': 'start_time',
'align_range': (0, 600),
'allow_overlap': True,
},
'fp_len': 200,
},
'area2_bump': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'decode_masks': lambda x: np.stack([x.ctr_hold_bump == 0, x.ctr_hold_bump == 1]).T,
'lag': -20,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-100, 500),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-100, 500),
'allow_overlap': True,
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['cond_dir', 'ctr_hold_bump'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-100, 500),
},
'kern_sd': 40,
},
},
'dmfc_rsg': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'behavior_source': 'trial_info',
'behavior_mask': lambda x: x.is_outlier == 0,
'behavior_field': ['is_eye', 'theta', 'is_short', 'ts', 'tp'],
'jitter': lambda x: np.stack([
np.zeros(len(x)),
np.where(x.split == 'test', np.zeros(len(x)),
np.clip(1500.0 - x.get('tp', pd.Series(np.nan)).to_numpy(), 0.0, 300.0))
]).T,
'make_params': {
'align_field': 'go_time',
'align_range': (-1500, 0),
'allow_overlap': True,
},
'eval_make_params': {
'start_field': 'set_time',
'end_field': 'go_time',
'align_field': 'go_time',
},
'eval_tensor_params': {
'seg_len': 1500,
'pad': 'front'
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['is_eye', 'theta', 'is_short', 'ts'],
'make_params': {
'start_field': 'set_time',
'end_field': 'go_time',
'align_field': 'go_time',
},
'kern_sd': 70,
'pad': 'front',
'seg_len': 1500,
'skip_mask': lambda x: x.is_outlier == 1,
},
},
'mc_maze_large': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 120,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 50,
},
},
'mc_maze_medium': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 120,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 50,
},
},
'mc_maze_small': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 120,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 50,
},
},
}
def make_train_input_tensors(dataset, dataset_name,
trial_split='train',
update_params=None,
save_file=True,
return_dict=True,
save_path="train_input.h5",
include_behavior=False,
include_forward_pred=False,
seed=0):
"""Makes model training input tensors.
Creates 3d arrays containing heldin and heldout spikes
for train trials (and other data if indicated)
and saves them as .h5 files and/or returns them
in a dict
Parameters
----------
dataset : NWBDataset
An instance of NWBDataset to make tensors from
dataset_name : {'mc_maze', 'mc_rtt', 'area2_bum', 'dmfc_rsg',
'mc_maze_large', 'mc_maze_medium', 'mc_maze_small'}
Name of dataset. Used to select default
parameters from PARAMS
trial_split : {'train', 'val'}, array-like, or list, optional
The selection of trials to make the tensors with.
It can be the predefined trial splits 'train'
or 'val', an array-like boolean mask (see the
include_trials argument of `NWBDataset.make_trial_data`),
or a list containing the previous two types, which
will include trials that are in any of the splits
in the list. By default 'train'
update_params : dict, optional
New parameters with which to update default
dict from PARAMS
save_file : bool, optional
Whether to save the reshaped data to an
h5 file, by default True
return_dict : bool, optional
Whether to return the reshaped data in a
data dict with the same keys as the h5 files,
by default True
save_path : str, optional
Path to where the h5 output file should be saved
include_behavior : bool, optional
Whether to include behavioral data in the
returned tensors, by default False
include_forward_pred : bool, optional
Whether to include forward-prediction spiking
data in the returned tensors, by default False
seed : int, optional
Seed for random generator used for jitter
Returns
-------
dict of np.array
A dict containing 3d numpy arrays of
spiking data for indicated trials, and possibly
additional data based on provided arguments
"""
assert isinstance(dataset, NWBDataset), "`dataset` must be an instance of NWBDataset"
assert dataset_name in PARAMS.keys(), f"`dataset_name` must be one of {list(PARAMS.keys())}"
assert isinstance(trial_split, (pd.Series, np.ndarray, list)) or trial_split in ['train', 'val'], \
"Invalid `trial_split` argument. Please refer to the documentation for valid choices"
# Fetch and update params
params = PARAMS[dataset_name].copy()
if update_params is not None:
params.update(update_params)
# Add filename extension if necessary
if not save_path.endswith('.h5'):
save_path = save_path + '.h5'
# unpack params
spk_field = params['spk_field']
hospk_field = params['hospk_field']
make_params = params['make_params'].copy()
jitter = params.get('jitter', None)
# Prep mask
trial_mask = _prep_mask(dataset, trial_split)
# Prep jitter if necessary
if jitter is not None:
np.random.seed(seed)
jitter_vals = _prep_jitter(dataset, trial_mask, jitter)
align_field = make_params.get('align_field', make_params.get('start_field', 'start_time'))
align_vals = dataset.trial_info[trial_mask][align_field]
align_jit = align_vals + pd.to_timedelta(jitter_vals, unit='ms')
align_jit.name = align_field.replace('_time', '_jitter_time')
dataset.trial_info = | pd.concat([dataset.trial_info, align_jit], axis=1) | pandas.concat |
import sys
import itertools
from pathlib import Path
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib import cm
FIGURES_DIR = (
Path(__file__).resolve().parents[2]
/ "figures"
/ "ukbiobank"
/ Path(sys.argv[0]).stem
)
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
TAB_COLORS = [cm.tab10(i) for i in range(10)]
TAB_COLORS = [cm.Pastel2(i) for i in range(10)]
TAB_COLORS = [cm.Pastel1(i) for i in range(10)]
TAB_COLORS = [cm.Set2(i) for i in range(10)]
data_dir = Path(__file__).parent / "data"
score_files = data_dir.glob("scores_*.csv")
scores = []
for sf in score_files:
scores.append(pd.read_csv(str(sf)))
df = | pd.concat(scores, ignore_index=False) | pandas.concat |
"""Module to support machine learning of activity states from acc data"""
from accelerometer import utils
from accelerometer.models import MODELS
from io import BytesIO
import numpy as np
import os
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import sklearn.ensemble._forest as forest
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix
import joblib
import tarfile
import warnings
import urllib
import pathlib
import shutil
def activityClassification(epochFile, activityModel="walmsley"):
"""Perform classification of activity states from epoch feature data
Based on a balanced random forest with a Hidden Markov Model containing
transitions between predicted activity states and emissions trained using a
free-living groundtruth to identify pre-defined classes of behaviour from
accelerometer data.
:param str epochFile: Input csv file of processed epoch data
:param str activityModel: Input tar model file which contains random forest
pickle model, HMM priors/transitions/emissions npy files, and npy file
of METs for each activity state
:return: Pandas dataframe of activity epoch data with one-hot encoded labels
:rtype: pandas.DataFrame
:return: Activity state labels
:rtype: list(str)
"""
activityModel = resolveModelPath(activityModel)
X = epochFile
featureColsFile = getFileFromTar(activityModel, 'featureCols.txt').getvalue()
featureColsList = featureColsFile.decode().split('\n')
featureCols = list(filter(None, featureColsList))
with pd.option_context('mode.use_inf_as_null', True):
null_rows = X[featureCols].isnull().any(axis=1)
print(null_rows.sum(), "rows with missing (NaN, None, or NaT) or Inf values, out of", len(X))
X['label'] = 'none'
X.loc[null_rows, 'label'] = 'inf_or_null'
# Setup RF
# Ignore warnings on deployed model using different version of pandas
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
rf = joblib.load(getFileFromTar(activityModel, 'rfModel.pkl'))
labels = rf.classes_.tolist()
rfPredictions = rf.predict(X.loc[~null_rows, featureCols].to_numpy())
# Free memory
del rf
# Setup HMM
priors = np.load(getFileFromTar(activityModel, 'hmmPriors.npy'))
transitions = np.load(getFileFromTar(activityModel, 'hmmTransitions.npy'))
emissions = np.load(getFileFromTar(activityModel, 'hmmEmissions.npy'))
hmmPredictions = viterbi(rfPredictions.tolist(), labels, priors,
transitions, emissions)
# Save predictions to pandas dataframe
X.loc[~null_rows, 'label'] = hmmPredictions
# Perform MET prediction...
# Pandas .replace method has a small bug
# See https://github.com/pandas-dev/pandas/issues/23305
# We need to force type
met_vals = np.load(getFileFromTar(activityModel, 'METs.npy'))
met_dict = dict(zip(labels, met_vals))
X.loc[~null_rows, 'MET'] = X.loc[~null_rows, 'label'].replace(met_dict).astype('float')
# Apply one-hot encoding
for l in labels:
X[l] = 0
X.loc[X['label'] == l, l] = 1
# Null values aren't one-hot encoded, so set such instances to NaN
for l in labels:
X.loc[X[labels].sum(axis=1) == 0, l] = np.nan
return X, labels
MIN_TRAIN_CLASS_COUNT = 100
def trainClassificationModel(
trainingFile,
labelCol="label", participantCol="participant",
atomicLabelCol="annotation", metCol="MET",
featuresTxt="activityModels/features.txt",
trainParticipants=None, testParticipants=None,
rfThreads=1, rfTrees=1000, rfFeats=None, rfDepth=None,
outputPredict="activityModels/test-predictions.csv",
outputModel=None
):
"""Train model to classify activity states from epoch feature data
Based on a balanced random forest with a Hidden Markov Model containing
transitions between predicted activity states and emissions trained using
the input training file to identify pre-defined classes of behaviour from
accelerometer data.
:param str trainingFile: Input csv file of training data, pre-sorted by time
:param str labelCol: Input label column
:param str participantCol: Input participant column
:param str atomicLabelCol: Input 'atomic' annotation e.g. 'walking with dog'
vs. 'walking'
:param str metCol: Input MET column
:param str featuresTxt: Input txt file listing feature column names
:param str trainParticipants: Input comma separated list of participant IDs
to train on.
:param str testParticipants: Input comma separated list of participant IDs
to test on.
:param int rfThreads: Input num threads to use when training random forest
:param int rfTrees: Input num decision trees to include in random forest
:param str outputPredict: Output CSV of person, label, predicted
:param str outputModel: Output tarfile object which contains random forest
pickle model, HMM priors/transitions/emissions npy files, and npy file
of METs for each activity state. Will only output trained model if this
is not null e.g. "activityModels/sample-model.tar"
:return: New model written to <outputModel> OR csv of test predictions
written to <outputPredict>
:rtype: void
"""
# Load list of features to use in analysis
featureCols = getListFromTxtFile(featuresTxt)
# Load in participant information, and remove null/messy labels/features
train = pd.read_csv(trainingFile)
train = train[~pd.isnull(train[labelCol])]
allCols = [participantCol, labelCol, atomicLabelCol, metCol] + featureCols
with pd.option_context('mode.use_inf_as_null', True):
train = train[allCols].dropna(axis=0, how='any')
# Reduce size of train/test sets if we are training/testing on some people
if testParticipants is not None:
testPIDs = testParticipants.split(',')
test = train[train[participantCol].isin(testPIDs)]
train = train[~train[participantCol].isin(testPIDs)]
if trainParticipants is not None:
trainPIDs = trainParticipants.split(',')
train = train[train[participantCol].isin(trainPIDs)]
# Train Random Forest model
# First "monkeypatch" RF function to perform per-class balancing
global MIN_TRAIN_CLASS_COUNT
MIN_TRAIN_CLASS_COUNT = train[labelCol].value_counts().min()
forest._parallel_build_trees = _parallel_build_trees
# Then train RF model (which include per-class balancing)
rfClassifier = RandomForestClassifier(n_estimators=rfTrees,
n_jobs=rfThreads,
max_features=rfFeats,
max_depth=rfDepth,
oob_score=True)
rfModel = rfClassifier.fit(train[featureCols], train[labelCol].tolist())
# Train Hidden Markov Model
states, priors, emissions, transitions = train_HMM(rfModel, train[labelCol], labelCol)
rfModel.oob_decision_function_ = None # out of bound errors are no longer needed
# Estimate usual METs-per-class
METs = []
for s in states:
MET = train[train[labelCol] == s].groupby(atomicLabelCol)[metCol].mean().mean()
METs += [MET]
# Now write out model
if outputModel is not None:
saveModelsToTar(outputModel, featureCols, rfModel, priors, transitions, emissions, METs)
# Assess model performance on test participants
if testParticipants is not None:
print('test on participant(s):, ', testParticipants)
labels = rfModel.classes_.tolist()
rfPredictions = rfModel.predict(test[featureCols])
hmmPredictions = viterbi(rfPredictions.tolist(), labels, priors,
transitions, emissions)
test['predicted'] = hmmPredictions
# And write out to file
outCols = [participantCol, labelCol, 'predicted']
test[outCols].to_csv(outputPredict, index=False)
print('Output predictions written to: ', outputPredict)
def train_HMM(rfModel, y_trainF, labelCol):
"""Train Hidden Markov Model
Use data not considered in construction of random forest to estimate
probabilities of: i) starting in a given state; ii) transitioning from
one state to another; and iii) probabilitiy of the random forest being
correct when predicting a given class (emission probability)
:param sklearn.RandomForestClassifier rfModel: Input random forest object
:param dataframe.Column y_trainF: Input groundtruth for each intance
:param str labelCol: Input label column
:return: states - List of unique activity state labels
rtype: numpy.array
:return: priors - Prior probabilities for each activity state
rtype: numpy.array
:return: transitions - Probability matrix of transitioning from one activity
state to another
rtype: numpy.array
:return: emissions - Probability matrix of RF prediction being true
rtype: numpy.array
"""
states = rfModel.classes_
# Get out of bag (OOB) predictions from Random Forest
predOOB = pd.DataFrame(rfModel.oob_decision_function_)
predOOB.columns = states
predOOB['labelOOB'] = predOOB.idxmax(axis=1)
predOOB['groundTruth'] = y_trainF.values
# Initial state probabilities
prior = []
for s in states:
sProb = len(y_trainF[y_trainF == s]) / (len(y_trainF) * 1.0)
prior += [sProb]
# Emission probabilities
emissions = np.zeros((len(states), len(states)))
j = 0
for predictedState in states:
k = 0
for actualState in states:
emissions[j, k] = predOOB[actualState][predOOB['groundTruth'] == predictedState].sum()
emissions[j, k] /= len(predOOB[predOOB['groundTruth'] == predictedState])
k += 1
j += 1
# Transition probabilities
train = y_trainF.to_frame()
train['nextLabel'] = train[labelCol].shift(-1)
transitions = np.zeros((len(states), len(states)))
j = 0
for s1 in states:
k = 0
for s2 in states:
transitions[j, k] = len(train[(train[labelCol] == s1) & (train['nextLabel'] == s2)]
) / (len(train[train[labelCol] == s1]) * 1.0)
k += 1
j += 1
# Return HMM matrices
return states, prior, emissions, transitions
def viterbi(observations, states, priors, transitions, emissions,
probabilistic=False):
"""Perform HMM smoothing over observations via Viteri algorithm
:param list(str) observations: List/sequence of activity states
:param numpy.array states: List of unique activity state labels
:param numpy.array priors: Prior probabilities for each activity state
:param numpy.array transitions: Probability matrix of transitioning from one
activity state to another
:param numpy.array emissions: Probability matrix of RF prediction being true
:param bool probabilistic: Write probabilistic output for each state, rather
than writing most likely state for any given prediction.
:return: Smoothed list/sequence of activity states
:rtype: list(str)
"""
def norm(x):
return x / x.sum()
tinyNum = 0.000001
nObservations = len(observations)
nStates = len(states)
v = np.zeros((nObservations, nStates)) # initialise viterbi table
# Set prior state values for first observation...
for state in range(0, len(states)):
v[0, state] = np.log(priors[state] * emissions[state, states.index(observations[0])] + tinyNum)
# Fill in remaning matrix observations
# Use log space as multiplying successively smaller p values)
for k in range(1, nObservations):
for state in range(0, len(states)):
v[k, state] = np.log(emissions[state, states.index(observations[k])] + tinyNum) + \
np.max(v[k - 1, :] + np.log(transitions[:, state] + tinyNum), axis=0)
# Now construct viterbiPath (propagating backwards)
viterbiPath = observations
# Pick most probable state for final observation
viterbiPath[nObservations - 1] = states[np.argmax(v[nObservations - 1, :], axis=0)]
# Probabilistic method will give probability of each label
if probabilistic:
viterbiProba = np.zeros((nObservations, nStates)) # initialize table
viterbiProba[nObservations - 1, :] = norm(v[nObservations - 1, :])
# And then work backwards to pick most probable state for all other observations
for k in list(reversed(range(0, nObservations - 1))):
viterbiPath[k] = states[np.argmax(
v[k, :] + np.log(transitions[:, states.index(viterbiPath[k + 1])] + tinyNum), axis=0)]
if probabilistic:
viterbiProba[k, :] = norm(v[k, :] + np.log(transitions[:, states.index(viterbiPath[k + 1])] + tinyNum))
# Output as list...
return viterbiProba if probabilistic else viterbiPath
GLOBAL_INDICES = []
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None, n_samples_bootstrap=None):
"""Monkeypatch scikit learn to use per-class balancing
Private function used to fit a single tree in parallel.
"""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
indices = np.empty(shape=0, dtype='int64')
for y_class in np.unique(y):
sample_indices, selected = np.where(y == y_class)
# SELECT min_count FROM CLASS WITH REPLACEMENT
sample_indices = np.random.choice(sample_indices,
size=MIN_TRAIN_CLASS_COUNT, replace=True)
indices = np.concatenate((indices, sample_indices))
# IGNORE sample_weight AND SIMPLY PASS SELECTED DATA
tree.fit(X[indices, :], y[indices], check_input=True)
GLOBAL_INDICES.append(indices)
return tree
def perParticipantSummaryHTML(dfParam, yTrueCol, yPredCol, pidCol, outHTML):
"""Provide HTML summary of how well activity classification model works
at the per-participant level
:param dataframe dfParam: Input pandas dataframe
:param str yTrueCol: Input for y_true column label
:param str yPregCol: Input for y_pred column label
:param str pidCol: Input for participant ID column label
:param str outHTML: Output file to print HTML summary to
:return: HTML file reporting kappa, accuracy, and confusion matrix
:rtype: void
"""
# get kappa & accuracy on a per-participant basis
pIDs = dfParam[pidCol].unique()
pIDKappa = []
pIDAccuracy = []
for pID in pIDs:
d_tmp = dfParam[dfParam[pidCol] == pID]
pIDKappa += [metrics.cohen_kappa_score(d_tmp[yTrueCol], d_tmp[yPredCol])]
pIDAccuracy += [metrics.accuracy_score(d_tmp[yTrueCol], d_tmp[yPredCol])]
d_summary = pd.DataFrame()
d_summary['pid'] = pIDs
d_summary['kappa'] = pIDKappa
d_summary['accuracy'] = pIDAccuracy
# print out values to html string
kappaSDHTML = "Mean Kappa (SD) = "
kappaSDHTML += utils.meanSDstr(d_summary['kappa'].mean(),
d_summary['kappa'].std(), 2)
accuracySDHTML = "Mean accuracy (SD) = "
accuracySDHTML += utils.meanSDstr(d_summary['accuracy'].mean() * 100,
d_summary['accuracy'].std() * 100, 1) + ' %'
kappaCIHTML = "Mean Kappa (95% CI) = "
kappaCIHTML += utils.meanCIstr(d_summary['kappa'].mean(),
d_summary['kappa'].std(), len(d_summary), 2)
accuracyCIHTML = "Mean accuracy (95% CI) = "
accuracyCIHTML += utils.meanCIstr(d_summary['accuracy'].mean() * 100,
d_summary['accuracy'].std() * 100, len(d_summary), 1) + ' %'
# get confusion matrix to pandas dataframe
y_true = dfParam[yTrueCol]
y_pred = dfParam[yPredCol]
labels = sorted(list(set(y_true) | set(y_pred)))
cnf_matrix = confusion_matrix(y_true, y_pred, labels)
df_confusion = | pd.DataFrame(data=cnf_matrix, columns=labels, index=labels) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import streamlit as st
import pandas as pd
import numpy as np
import geopandas as gpd
from pathlib import Path
from PIL import Image
import altair as alt
import pydeck as pdk
import numpy as np
from api_key import mapbox_key
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import pydeckmapping
from importlib import reload
reload(pydeckmapping)
from shapely.geometry import Point
from pydeckmapping import build_map
import datetime
import sys
sys.path.append('/Users/david/Dropbox/PhD/Scripts/Spatial analyses')
import pyspace
reload(pyspace)
from pyproj import Transformer
transformer = Transformer.from_crs("epsg:2056", "epsg:4326")
st.image('https://reseau-delta.ch/assets/ci_content/images/logo.png',width = 180)
st.markdown(st.__version__)
st.title("Plateforme d'analyse des donnรฉes du rรฉseau de soins Delta")
text_intro = """ Les donnรฉes analysรฉes sur cette plateforme correspondent aux donnรฉes Delta de l'annรฉe {} et portent sur plus de {} patients dont {} ร Genรจve. Il y a {} prescriteurs dont {} MPR Delta, {} distributeurs et {} cercles. """
################################
###########LOAD DATA############
################################
@st.cache(allow_output_mutation=True)
def load_data(path,DATE_COLUMN = None):
"""Load data into DataFrame"""
data = pd.read_csv(path)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
if DATE_COLUMN is not None:
data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
@st.cache(allow_output_mutation=True)
def load_gdf(path):
"""Load data into GeoDataFrame"""
data = gpd.read_file(path)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
return data
date = '20200318'
#####################################GENERAL DATASETS###########################################
data_folder = Path("../Data").resolve()
buildings_ge = pd.read_pickle('../Data/buildings_ge.pkl')
drug_path = data_folder/'Clean_data'/'{}_drug.csv'.format(date)
geom_path = data_folder/'Clean_data'/'{}_geometries.geojson'.format(date)
patient_path = data_folder/'Clean_data'/'{}_patient.geojson'.format(date)
cercle_path = data_folder/'Clean_data'/'{}_cercle.csv'.format(date)
event_path = data_folder / 'Clean_data'/'{}_event.geojson'.format(date)
mpr_path = data_folder / 'Clean_data'/'{}_mpr.geojson'.format(date)
distributor_path = data_folder/'Clean_data/{}_distributor.geojson'.format(date)
prescriber_path = data_folder / 'Clean_data/{}_prescriber.geojson'.format(date)
provider_path = data_folder / 'Clean_data/{}_provider.geojson'.format(date)
animator_path = data_folder / 'Clean_data/{}_animator.geojson'.format(date)
prestation_path = data_folder/'Clean_data'/'{}_prestation.csv'.format(date)
data_load_state = st.text('Loading data...') # Create a text element and let the reader know the data is loading.
df_geometries = load_gdf(path = geom_path) #Import geometries
gdf_distributor = load_gdf(distributor_path)
gdf_prescriber = load_gdf(prescriber_path)
gdf_provider = load_gdf(provider_path)
gdf_animator = load_gdf(animator_path)
gdf_event = load_gdf(event_path)
gdf_mpr = load_gdf(mpr_path)
df_cercle = load_data(cercle_path)
df_drug = load_data(path = drug_path,DATE_COLUMN = 'delivereddate') #Load drug data
gdf_patient = load_gdf(path = patient_path) # Load patient data
data_load_state.text('Loading data...done!') # Notify the reader that the data was successfully loaded.
atc_data = load_data(path = '../Data/atc_nomenclature.csv') #Load the ATC nomenclature from WHO
df_atc_levels = pd.read_csv('../Data/atc_levels.csv') #Import ATC levels
cantons = gpd.read_file('/Users/david/Dropbox/PhD/Data/Databases/SITG/SHAPEFILE_LV95_LN02/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp')
communes = gpd.read_file('/Users/david/Dropbox/PhD/Data/Databases/SITG/SHAPEFILE_LV95_LN02/swissBOUNDARIES3D_1_3_TLM_HOHEITSGEBIET.shp')
communes_ge = communes[communes.KANTONSNUM == 25]
MIP = ['indometacin','acemetacin','ketoprofen','phenylbutazon','piroxicam','meloxicam','etoricoxib','pethidin','chinidin','flecainid','sotalol','nitrofurantoin','zolpidem','estradiol','trimipamine','acรฉmรฉtacine','amiodarone']
#################################################################################################
################################## DATA PREPARATION #############################################
atc_data = atc_data.fillna('')
gdf_mpr['mpr_yy_bth'] = gdf_mpr['mprbirthday'].str.split('.').str[2]
gdf_mpr['mpr_yy_entry'] = gdf_mpr['mprentrydate'].str.split('.').str[2]
gdf_mpr['mpr_yy_exit'] = gdf_mpr['mprexitdate'].str.split('.').str[2]
gdf_mpr = gdf_mpr.drop(['mprentrydate','mprbirthday','mprexitdate'],axis = 1).drop_duplicates()
gdf_mpr[['mpr_yy_bth','mpr_yy_entry','mpr_yy_exit']] = gdf_mpr[['mpr_yy_bth','mpr_yy_entry','mpr_yy_exit']].astype('float')
no_dupli = gdf_mpr.groupby(['id']).mean().reset_index()
no_dupli = no_dupli.drop(['e','n'],axis = 1)
gdf_mpr = gdf_mpr.drop(['mpr_yy_bth','mpr_yy_entry','mpr_yy_exit'],axis = 1).merge(no_dupli, on = 'id').drop_duplicates().reset_index()
gdf_mpr = gdf_mpr[['id','name','mprsex','mpr_yy_bth','mpr_yy_entry','mpr_yy_exit','e','n','geometry']].drop_duplicates(subset = ['id'])
gdf_mpr['age'] = 2018-gdf_mpr.mpr_yy_bth
gdf_mpr.loc[gdf_mpr.age > 200, 'age'] = 65 ###To be changed (better to change in Data Preparation and replace yy_bth before age calculation)
gdf_mpr.loc[gdf_mpr.age < 0,'age'] = np.nan
bins = [30, 45, 60, 75]
gdf_mpr['age_cat'] = pd.cut(gdf_mpr['age'], bins)
dict_atc_levels= dict(zip(df_atc_levels.atc, df_atc_levels.level))
gdf_event_cercle = pd.merge(gdf_event,df_cercle, left_on = 'id',right_on = 'eventid', how = 'left')
uniq_cercle_geom = gdf_event_cercle.drop_duplicates(subset = 'circlename',keep='first').reset_index(drop = True)
# uniq_cercle_geom['longitude'],uniq_cercle_geom['latitude'] = uniq_cercle_geom.to_crs(epsg = 4326).geometry.x,uniq_cercle_geom.to_crs(epsg = 4326).geometry.y
uniq_cercle_geom[['latitude','longitude']]= uniq_cercle_geom.apply(lambda x: transformer.transform(x.e,x.n),axis = 1,result_type = 'expand')
geojson_file_CQ = '../Data/CQ_polygons.geojson'
bins = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115]
gdf_patient['age_cat'] = pd.cut(gdf_patient['age'], bins)
#################################################################################################
################################ PERSONAL ID ##############################################
st.sidebar.markdown('## Insert your personal ID')
st.sidebar.markdown('Example : 40e8ac4dbc023d86a815f8476a1884e4')
personal_id = st.sidebar.text_input('Personal ID')
if personal_id not in df_drug.prescriberid.values:
st.sidebar.markdown('### *Invalid ID*')
else:
st.sidebar.markdown('### *Valid ID*')
colors_id = {}
for i in df_drug.prescriberid.values:
colors_id[i] = 'blue'
if i == personal_id:
colors_id[i] = 'red'
#################################################################################################
################################ INTRODUCTION TEXT ##############################################
text_intro = text_intro.format(2018,gdf_patient.id.nunique(),gdf_patient[gdf_patient.networkname == 'Delta Genรจve'].id.nunique(),df_drug.prescriberid.nunique(),df_drug[df_drug.mpr_delta == 1].prescriberid.nunique(),df_drug.distributorid.nunique(),52)
st.markdown(text_intro)
st.sidebar.markdown('# *Analyses*')
#################################################################################################
################################ SHOW RAW DATA ##################################################
if st.sidebar.checkbox('Show raw prescription data',key = 'Drug prescription data'):
st.subheader('Raw data')
st.write(df_drug.head())
#################################################################################################
#################################AGE FILTERING ##################################################
st.markdown('### Age filtering')
age_filter = st.slider("Patient Age", 0, 110, (25, 75), step = 5)
patients = gdf_patient[(gdf_patient.age >= age_filter[0])&(gdf_patient.age <= age_filter[1])].id.values
filtered_drug = df_drug[df_drug.patientid.isin(patients)]
filtered_drug = | pd.merge(filtered_drug,df_geometries[['id','lat','lon']],how = 'left',left_on = 'patientid',right_on = 'id') | pandas.merge |
#!/usr/bin/env python3
import pdb
import pandas as pd
from pylru import lrudecorator
import seaborn as sns
BII_URL = 'http://ipbes.s3.amazonaws.com/weighted/' \
'historical-BIIAb-npp-country-1880-2014.csv'
@lrudecorator(10)
def get_raw_bii_data():
return pd.read_csv(BII_URL)
def findt(ss):
rval = [None] * len(ss)
rval[0] = True
for i in range(1, len(ss)):
rval[i] = not pd.isnull(ss.iloc[i]) and ss.iloc[i] != ss.iloc[i - 1]
return pd.Series(rval)
def get_bii_data(dropna=True):
bii = get_raw_bii_data()
cols = list(filter(lambda nn: nn[0:6] == 'BIIAb_' or nn[0:4] == 'GDP_',
bii.columns))
bii2 = bii.loc[:, ['fips', 'ar5', 'name', 'iso3', 'npp_mean'] + cols]
if dropna:
bii2.dropna(inplace=True)
cols = tuple(filter(lambda col: col[0:6] == 'BIIAb_', bii2.columns))
for col in bii2.loc[:, cols].columns:
bii2.insert(5, col.replace('Ab_', 'Ab2_'), bii2[col].div(bii2.npp_mean))
t7 = pd.wide_to_long(bii2, ['BIIAb', 'BIIAb2', 'GDP'], i=['name'],
j='Year', sep='_')
t7.reset_index(inplace=True)
t7 = t7.assign(year=t7.Year.astype(int))
del t7['Year']
return t7
def get_wid_data():
url_temp = 'http://ipbes.s3.amazonaws.com/by-country/%s.csv'
metrics = ('sfiinc992j', 'afiinc992t', 'afiinc992j', 'afiinc992i')
data = dict()
for metric in metrics:
data[metric] = pd.read_csv(url_temp % metric, encoding='utf-8')
return data
def get_eci_data(dropna=False):
bii = get_raw_bii_data()
cols = list(filter(lambda nn: nn[0:4] == 'ECI_', bii.columns))
bii2 = bii.loc[:, ['fips', 'ar5', 'name', 'iso3',] + cols]
if dropna:
bii2.dropna(inplace=True)
t7 = pd.wide_to_long(bii2, 'ECI', i=['name'], j='Year', sep='_')
t7.reset_index(inplace=True)
t7 = t7.assign(year=t7.Year.astype(int))
del t7['Year']
return t7
def get_rol_data(dropna=False):
bii = get_raw_bii_data()
cols = {'WJP Rule of Law Index: Overall Score': 'ROLI',
'Factor 1: Constraints on Government Powers': 'ROLI_1',
'Factor 2: Absence of Corruption': 'ROLI_2',
'Factor 3: Open Government ': 'ROLI_3',
'Factor 4: Fundamental Rights': 'ROLI_4',
'Factor 5: Order and Security': 'ROLI_5',
'Factor 6: Regulatory Enforcement': 'ROLI_6',
'Factor 7: Civil Justice': 'ROLI_7',
'Factor 8: Criminal Justice': 'ROLI_8'
}
bii2 = bii.loc[:, ['fips', 'ar5', 'name', 'iso3'] + list(cols.keys())]
if dropna:
bii2.dropna(inplace=True)
bii2.rename(columns=cols, inplace=True)
return bii2
def get_language_data():
url = 'http://ipbes.s3.amazonaws.com/by-country/language-distance.csv'
return pd.read_csv(url, encoding='utf-8')
def get_area_data():
url = 'http://ipbes.s3.amazonaws.com/by-country/wb-area.csv'
return | pd.read_csv(url, encoding='utf-8') | pandas.read_csv |
import os
import pandas as pd
from Lib.get_texts import get_generated_lyrics, get_lyrics_dataset
from Lib.get_structure import get_lyrics_structure
from Lib.get_sentiment import calculate_sentiment_scores
from Lib.get_bagofwords import get_repetition_scores, combine_bag_of_words, lemmatize_lyrics
# This method performs the calculations of the evaluation metrics for a corpus of texts
# Information are stored in a pandas dataframe
# parameters: Text corpus as list, name of the *csv file
def evaluate_text(texts, name):
# get song structure
pd.set_option("display.max_columns", 20)
pd.set_option("large_repr", "truncate")
df = get_lyrics_structure(texts)
print(df)
# get bag of words
counters = lemmatize_lyrics(texts)
df_words = combine_bag_of_words(counters)
rep_scores = get_repetition_scores(counters)
df["bagofwords"] = counters
df["repscores"] = rep_scores
print(df)
# get sentiment
df2 = calculate_sentiment_scores(texts)
df = | pd.concat([df, df2], axis=1) | pandas.concat |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from io import BytesIO, StringIO
from sqlite3 import connect
from scipy.io import loadmat
import io
from rich.progress import track
import requests
import numpy as np
import pandas as pd
from sklearn.utils import check_X_y
from imblearn.datasets import make_imbalance
from research.utils import img_array_to_pandas
UCI_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/"
KEEL_URL = "http://sci2s.ugr.es/keel/keel-dataset/datasets/imbalanced/"
GIC_URL = "http://www.ehu.eus/ccwintco/uploads/"
OPENML_URL = "https://www.openml.org/data/get_csv/"
FETCH_URLS = {
"breast_tissue": urljoin(UCI_URL, "00192/BreastTissue.xls"),
"ecoli": urljoin(UCI_URL, "ecoli/ecoli.data"),
"eucalyptus": urljoin(OPENML_URL, "3625/dataset_194_eucalyptus.arff"),
"glass": urljoin(UCI_URL, "glass/glass.data"),
"haberman": urljoin(UCI_URL, "haberman/haberman.data"),
"heart": urljoin(UCI_URL, "statlog/heart/heart.dat"),
"iris": urljoin(UCI_URL, "iris/bezdekIris.data"),
"libras": urljoin(UCI_URL, "libras/movement_libras.data"),
"liver": urljoin(UCI_URL, "liver-disorders/bupa.data"),
"pima": "https://gist.githubusercontent.com/ktisha/c21e73a1bd1700294ef790c56c8aec1f"
"/raw/819b69b5736821ccee93d05b51de0510bea00294/pima-indians-diabetes.csv",
"vehicle": urljoin(UCI_URL, "statlog/vehicle/"),
"wine": urljoin(UCI_URL, "wine/wine.data"),
"new_thyroid_1": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid1.zip"
),
"new_thyroid_2": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid2.zip"
),
"cleveland": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "cleveland-0_vs_4.zip"
),
"led": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "led7digit-0-2-4-5-6-7-8-9_vs_1.zip"
),
"page_blocks_1_3": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "page-blocks-1-3_vs_4.zip"
),
"vowel": urljoin(urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "vowel0.zip"),
"yeast_1": urljoin(urljoin(KEEL_URL, "imb_IRlowerThan9/"), "yeast1.zip"),
"banknote_authentication": urljoin(
UCI_URL, "00267/data_banknote_authentication.txt"
),
"arcene": urljoin(UCI_URL, "arcene/"),
"audit": urljoin(UCI_URL, "00475/audit_data.zip"),
"spambase": urljoin(UCI_URL, "spambase/spambase.data"),
"parkinsons": urljoin(UCI_URL, "parkinsons/parkinsons.data"),
"ionosphere": urljoin(UCI_URL, "ionosphere/ionosphere.data"),
"breast_cancer": urljoin(UCI_URL, "breast-cancer-wisconsin/wdbc.data"),
"adult": urljoin(UCI_URL, "adult/adult.data"),
"abalone": urljoin(UCI_URL, "abalone/abalone.data"),
"acute": urljoin(UCI_URL, "acute/diagnosis.data"),
"annealing": urljoin(UCI_URL, "annealing/anneal.data"),
"census": urljoin(UCI_URL, "census-income-mld/census-income.data.gz"),
"contraceptive": urljoin(UCI_URL, "cmc/cmc.data"),
"covertype": urljoin(UCI_URL, "covtype/covtype.data.gz"),
"credit_approval": urljoin(UCI_URL, "credit-screening/crx.data"),
"dermatology": urljoin(UCI_URL, "dermatology/dermatology.data"),
"echocardiogram": urljoin(UCI_URL, "echocardiogram/echocardiogram.data"),
"flags": urljoin(UCI_URL, "flags/flag.data"),
"heart_disease": [
urljoin(UCI_URL, "heart-disease/processed.cleveland.data"),
urljoin(UCI_URL, "heart-disease/processed.hungarian.data"),
urljoin(UCI_URL, "heart-disease/processed.switzerland.data"),
urljoin(UCI_URL, "heart-disease/processed.va.data"),
],
"hepatitis": urljoin(UCI_URL, "hepatitis/hepatitis.data"),
"german_credit": urljoin(UCI_URL, "statlog/german/german.data"),
"thyroid": urljoin(UCI_URL, "thyroid-disease/thyroid0387.data"),
"first_order_theorem": urljoin(OPENML_URL, "1587932/phpPbCMyg"),
"gas_drift": urljoin(OPENML_URL, "1588715/phpbL6t4U"),
"autouniv_au7": urljoin(OPENML_URL, "1593748/phpmRPvKy"),
"autouniv_au4": urljoin(OPENML_URL, "1593744/phpiubDlf"),
"mice_protein": urljoin(OPENML_URL, "17928620/phpchCuL5"),
"steel_plates": urljoin(OPENML_URL, "18151921/php5s7Ep8"),
"cardiotocography": urljoin(OPENML_URL, "1593756/phpW0AXSQ"),
"waveform": urljoin(OPENML_URL, "60/dataset_60_waveform-5000.arff"),
"volkert": urljoin(OPENML_URL, "19335689/file1c556e3db171.arff"),
"asp_potassco": urljoin(OPENML_URL, "21377447/file18547f421393.arff"),
"wine_quality": urljoin(OPENML_URL, "4965268/wine-quality-red.arff"),
"mfeat_zernike": urljoin(OPENML_URL, "22/dataset_22_mfeat-zernike.arff"),
"gesture_segmentation": urljoin(OPENML_URL, "1798765/phpYLeydd"),
"texture": urljoin(OPENML_URL, "4535764/phpBDgUyY"),
"usps": urljoin(OPENML_URL, "19329737/usps.arff"),
"japanese_vowels": urljoin(OPENML_URL, "52415/JapaneseVowels.arff"),
"pendigits": urljoin(OPENML_URL, "32/dataset_32_pendigits.arff"),
"image_segmentation": urljoin(OPENML_URL, "18151937/phpyM5ND4"),
"baseball": urljoin(OPENML_URL, "3622/dataset_189_baseball.arff"),
"indian_pines": [
urljoin(GIC_URL, "2/22/Indian_pines.mat"),
urljoin(GIC_URL, "c/c4/Indian_pines_gt.mat"),
],
"salinas": [
urljoin(GIC_URL, "f/f1/Salinas.mat"),
urljoin(GIC_URL, "f/fa/Salinas_gt.mat"),
],
"salinas_a": [
urljoin(GIC_URL, "d/df/SalinasA.mat"),
urljoin(GIC_URL, "a/aa/SalinasA_gt.mat"),
],
"pavia_centre": [
urljoin(GIC_URL, "e/e3/Pavia.mat"),
urljoin(GIC_URL, "5/53/Pavia_gt.mat"),
],
"pavia_university": [
urljoin(GIC_URL, "e/ee/PaviaU.mat"),
urljoin(GIC_URL, "5/50/PaviaU_gt.mat"),
],
"kennedy_space_center": [
urljoin(GIC_URL, "2/26/KSC.mat"),
urljoin(GIC_URL, "a/a6/KSC_gt.mat"),
],
"botswana": [
urljoin(GIC_URL, "7/72/Botswana.mat"),
urljoin(GIC_URL, "5/58/Botswana_gt.mat"),
],
}
RANDOM_STATE = 0
class Datasets:
"""Base class to download and save datasets."""
def __init__(self, names="all"):
self.names = names
@staticmethod
def _modify_columns(data):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1)
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data = self._modify_columns(fetch_data())
self.content_.append((name, data))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
class ImbalancedBinaryDatasets(Datasets):
"""Class to download, transform and save binary class imbalanced
datasets."""
MULTIPLICATION_FACTORS = [2, 3]
@staticmethod
def _calculate_ratio(multiplication_factor, y):
"""Calculate ratio based on IRs multiplication factor."""
ratio = Counter(y).copy()
ratio[1] = int(ratio[1] / multiplication_factor)
return ratio
def _make_imbalance(self, data, multiplication_factor):
"""Undersample the minority class."""
X_columns = [col for col in data.columns if col != "target"]
X, y = check_X_y(data.loc[:, X_columns], data.target)
if multiplication_factor > 1.0:
sampling_strategy = self._calculate_ratio(multiplication_factor, y)
X, y = make_imbalance(
X, y, sampling_strategy=sampling_strategy, random_state=RANDOM_STATE
)
data = pd.DataFrame(np.column_stack((X, y)))
data.iloc[:, -1] = data.iloc[:, -1].astype(int)
return data
def download(self):
"""Download the datasets and append undersampled versions of them."""
super(ImbalancedBinaryDatasets, self).download()
undersampled_datasets = []
for (name, data), factor in list(
product(self.content_, self.MULTIPLICATION_FACTORS)
):
ratio = self._calculate_ratio(factor, data.target)
if ratio[1] >= 15:
data = self._make_imbalance(data, factor)
undersampled_datasets.append((f"{name} ({factor})", data))
self.content_ += undersampled_datasets
return self
def fetch_breast_tissue(self):
"""Download and transform the Breast Tissue Data Set.
The minority class is identified as the `car` and `fad`
labels and the majority class as the rest of the labels.
http://archive.ics.uci.edu/ml/datasets/breast+tissue
"""
data = pd.read_excel(FETCH_URLS["breast_tissue"], sheet_name="Data")
data = data.drop(columns="Case #").rename(columns={"Class": "target"})
data["target"] = data["target"].isin(["car", "fad"]).astype(int)
return data
def fetch_ecoli(self):
"""Download and transform the Ecoli Data Set.
The minority class is identified as the `pp` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/ecoli
"""
data = pd.read_csv(FETCH_URLS["ecoli"], header=None, delim_whitespace=True)
data = data.drop(columns=0).rename(columns={8: "target"})
data["target"] = data["target"].isin(["pp"]).astype(int)
return data
def fetch_eucalyptus(self):
"""Download and transform the Eucalyptus Data Set.
The minority class is identified as the `best` label
and the majority class as the rest of the labels.
https://www.openml.org/d/188
"""
data = pd.read_csv(FETCH_URLS["eucalyptus"])
data = data.iloc[:, -9:].rename(columns={"Utility": "target"})
data = data[data != "?"].dropna()
data["target"] = data["target"].isin(["best"]).astype(int)
return data
def fetch_glass(self):
"""Download and transform the Glass Identification Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/glass+identification
"""
data = pd.read_csv(FETCH_URLS["glass"], header=None)
data = data.drop(columns=0).rename(columns={10: "target"})
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_haberman(self):
"""Download and transform the Haberman's Survival Data Set.
The minority class is identified as the `1` label
and the majority class as the `0` label.
https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival
"""
data = pd.read_csv(FETCH_URLS["haberman"], header=None)
data.rename(columns={3: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_heart(self):
"""Download and transform the Heart Data Set.
The minority class is identified as the `2` label
and the majority class as the `1` label.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_iris(self):
"""Download and transform the Iris Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/iris
"""
data = pd.read_csv(FETCH_URLS["iris"], header=None)
data.rename(columns={4: "target"}, inplace=True)
data["target"] = data["target"].isin(["Iris-setosa"]).astype(int)
return data
def fetch_libras(self):
"""Download and transform the Libras Movement Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/Libras+Movement
"""
data = pd.read_csv(FETCH_URLS["libras"], header=None)
data.rename(columns={90: "target"}, inplace=True)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_liver(self):
"""Download and transform the Liver Disorders Data Set.
The minority class is identified as the `1` label
and the majority class as the '2' label.
https://archive.ics.uci.edu/ml/datasets/liver+disorders
"""
data = pd.read_csv(FETCH_URLS["liver"], header=None)
data.rename(columns={6: "target"}, inplace=True)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_pima(self):
"""Download and transform the Pima Indians Diabetes Data Set.
The minority class is identified as the `1` label
and the majority class as the '0' label.
https://www.kaggle.com/uciml/pima-indians-diabetes-database
"""
data = pd.read_csv(FETCH_URLS["pima"], header=None, skiprows=9)
data.rename(columns={8: "target"}, inplace=True)
return data
def fetch_vehicle(self):
"""Download and transform the Vehicle Silhouettes Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/Statlog+(Vehicle+Silhouettes)
"""
data = pd.DataFrame()
for letter in ascii_lowercase[0:9]:
partial_data = pd.read_csv(
urljoin(FETCH_URLS["vehicle"], "xa%s.dat" % letter),
header=None,
delim_whitespace=True,
)
partial_data = partial_data.rename(columns={18: "target"})
partial_data["target"] = partial_data["target"].isin(["van"]).astype(int)
data = data.append(partial_data)
return data
def fetch_wine(self):
"""Download and transform the Wine Data Set.
The minority class is identified as the `2` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/wine
"""
data = pd.read_csv(FETCH_URLS["wine"], header=None)
data.rename(columns={0: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_new_thyroid_1(self):
"""Download and transform the Thyroid 1 Disease Data Set.
The minority class is identified as the `positive`
label and the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=145
"""
zipped_data = requests.get(FETCH_URLS["new_thyroid_1"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data)).read("new-thyroid1.dat").decode("utf-8")
)
data = pd.read_csv(
StringIO(sub(r"@.+\n+", "", unzipped_data)),
header=None,
sep=", ",
engine="python",
)
data.rename(columns={5: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_new_thyroid_2(self):
"""Download and transform the Thyroid 2 Disease Data Set.
The minority class is identified as the `positive`
label and the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=146
"""
zipped_data = requests.get(FETCH_URLS["new_thyroid_2"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data)).read("newthyroid2.dat").decode("utf-8")
)
data = pd.read_csv(
StringIO(sub(r"@.+\n+", "", unzipped_data)),
header=None,
sep=", ",
engine="python",
)
data.rename(columns={5: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_cleveland(self):
"""Download and transform the Heart Disease Cleveland Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=980
"""
zipped_data = requests.get(FETCH_URLS["cleveland"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data)).read("cleveland-0_vs_4.dat").decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=1330
"""
data = pd.read_csv(FETCH_URLS["dermatology"], header=None)
data.rename(columns={34: "target"}, inplace=True)
data.drop(columns=33, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_led(self):
"""Download and transform the LED Display Domain Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=998
"""
zipped_data = requests.get(FETCH_URLS["led"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("led7digit-0-2-4-5-6-7-8-9_vs_1.dat")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={7: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_page_blocks_1_3(self):
"""Download and transform the Page Blocks 1-3 Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=124
"""
zipped_data = requests.get(FETCH_URLS["page_blocks_1_3"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("page-blocks-1-3_vs_4.dat")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={10: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_vowel(self):
"""Download and transform the Vowel Recognition Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=127
"""
zipped_data = requests.get(FETCH_URLS["vowel"]).content
unzipped_data = ZipFile(BytesIO(zipped_data)).read("vowel0.dat").decode("utf-8")
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin([" positive"]).astype(int)
return data
def fetch_yeast_1(self):
"""Download and transform the Yeast 1 Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=153
"""
zipped_data = requests.get(FETCH_URLS["yeast_1"]).content
unzipped_data = ZipFile(BytesIO(zipped_data)).read("yeast1.dat").decode("utf-8")
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={8: "target"}, inplace=True)
data["target"] = data["target"].isin([" positive"]).astype(int)
return data
class BinaryDatasets(Datasets):
"""Class to download, transform and save binary class datasets."""
def fetch_banknote_authentication(self):
"""Download and transform the Banknote Authentication Data Set.
https://archive.ics.uci.edu/ml/datasets/banknote+authentication
"""
data = pd.read_csv(FETCH_URLS["banknote_authentication"], header=None)
data.rename(columns={4: "target"}, inplace=True)
return data
def fetch_arcene(self):
"""Download and transform the Arcene Data Set.
https://archive.ics.uci.edu/ml/datasets/Arcene
"""
url = FETCH_URLS["arcene"]
data, labels = [], []
for data_type in ("train", "valid"):
data.append(
pd.read_csv(
urljoin(url, f"ARCENE/arcene_{data_type}.data"),
header=None,
sep=" ",
).drop(columns=list(range(1998, 10001)))
)
labels.append(
pd.read_csv(
urljoin(
url,
("ARCENE/" if data_type == "train" else "")
+ f"arcene_{data_type}.labels",
),
header=None,
).rename(columns={0: "target"})
)
data = pd.concat(data, ignore_index=True)
labels = pd.concat(labels, ignore_index=True)
data = pd.concat([data, labels], axis=1)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_audit(self):
"""Download and transform the Audit Data Set.
https://archive.ics.uci.edu/ml/datasets/Audit+Data
"""
zipped_data = requests.get(FETCH_URLS["audit"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("audit_data/audit_risk.csv")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), engine="python")
data = (
data.drop(columns=["LOCATION_ID"])
.rename(columns={"Risk": "target"})
.dropna()
)
return data
def fetch_spambase(self):
"""Download and transform the Spambase Data Set.
https://archive.ics.uci.edu/ml/datasets/Spambase
"""
data = pd.read_csv(FETCH_URLS["spambase"], header=None)
data.rename(columns={57: "target"}, inplace=True)
return data
def fetch_parkinsons(self):
"""Download and transform the Parkinsons Data Set.
https://archive.ics.uci.edu/ml/datasets/parkinsons
"""
data = pd.read_csv(FETCH_URLS["parkinsons"])
data = pd.concat(
[
data.drop(columns=["name", "status"]),
data[["status"]].rename(columns={"status": "target"}),
],
axis=1,
)
data["target"] = data["target"].isin([0]).astype(int)
return data
def fetch_ionosphere(self):
"""Download and transform the Ionosphere Data Set.
https://archive.ics.uci.edu/ml/datasets/ionosphere
"""
data = pd.read_csv(FETCH_URLS["ionosphere"], header=None)
data = data.drop(columns=[0, 1]).rename(columns={34: "target"})
data["target"] = data["target"].isin(["b"]).astype(int)
return data
def fetch_breast_cancer(self):
"""Download and transform the Breast Cancer Wisconsin Data Set.
https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
"""
data = pd.read_csv(FETCH_URLS["breast_cancer"], header=None)
data = pd.concat(
[data.drop(columns=[0, 1]), data[[1]].rename(columns={1: "target"})], axis=1
)
data["target"] = data["target"].isin(["M"]).astype(int)
return data
class ContinuousCategoricalDatasets(Datasets):
"""Class to download, transform and save datasets with both continuous
and categorical features."""
@staticmethod
def _modify_columns(data, categorical_features):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1), categorical_features
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data, categorical_features = self._modify_columns(*fetch_data())
self.content_.append((name, data, categorical_features))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
def fetch_adult(self):
"""Download and transform the Adult Data Set.
https://archive.ics.uci.edu/ml/datasets/Adult
"""
data = pd.read_csv(FETCH_URLS["adult"], header=None, na_values=" ?").dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3, 5, 6, 7, 8, 9, 13]
return data, categorical_features
def fetch_abalone(self):
"""Download and transform the Abalone Data Set.
https://archive.ics.uci.edu/ml/datasets/Abalone
"""
data = pd.read_csv(FETCH_URLS["abalone"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0]
return data, categorical_features
def fetch_acute(self):
"""Download and transform the Acute Inflammations Data Set.
https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
"""
data = pd.read_csv(
FETCH_URLS["acute"], header=None, sep="\t", decimal=",", encoding="UTF-16"
)
data["target"] = data[6].str[0] + data[7].str[0]
data.drop(columns=[6, 7], inplace=True)
categorical_features = list(range(1, 6))
return data, categorical_features
def fetch_annealing(self):
"""Download and transform the Annealing Data Set.
https://archive.ics.uci.edu/ml/datasets/Annealing
"""
data = pd.read_csv(FETCH_URLS["annealing"], header=None, na_values="?")
# some features are dropped; they have too many missing values
missing_feats = (data.isnull().sum(0) / data.shape[0]) < 0.1
data = data.iloc[:, missing_feats.values]
data[2].fillna(data[2].mode().squeeze(), inplace=True)
data = data.T.reset_index(drop=True).T
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 1, 5, 9]
return data, categorical_features
def fetch_census(self):
"""Download and transform the Census-Income (KDD) Data Set.
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
"""
data = pd.read_csv(FETCH_URLS["census"], header=None)
categorical_features = (
list(range(1, 5))
+ list(range(6, 16))
+ list(range(19, 29))
+ list(range(30, 38))
+ [39]
)
# some features are dropped; they have too many missing values
cols_ids = [1, 6, 9, 13, 14, 20, 21, 29, 31, 37]
categorical_features = np.argwhere(
np.delete(
data.rename(columns={k: f"nom_{k}" for k in categorical_features})
.columns.astype("str")
.str.startswith("nom_"),
cols_ids,
)
).squeeze()
data = data.drop(columns=cols_ids).T.reset_index(drop=True).T
# some rows are dropped; they have rare missing values
data = data.iloc[
data.applymap(lambda x: x != " Not in universe").all(1).values, :
]
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
return data, categorical_features
def fetch_contraceptive(self):
"""Download and transform the Contraceptive Method Choice Data Set.
https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice
"""
data = pd.read_csv(FETCH_URLS["contraceptive"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [4, 5, 6, 8]
return data, categorical_features
def fetch_covertype(self):
"""Download and transform the Covertype Data Set.
https://archive.ics.uci.edu/ml/datasets/Covertype
"""
data = pd.read_csv(FETCH_URLS["covertype"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
wilderness_area = pd.Series(
np.argmax(data.iloc[:, 10:14].values, axis=1), name=10
)
soil_type = pd.Series(np.argmax(data.iloc[:, 14:54].values, axis=1), name=11)
data = (
data.drop(columns=list(range(10, 54)))
.join(wilderness_area)
.join(soil_type)[list(range(0, 12)) + ["target"]]
)
categorical_features = [10, 11]
return data, categorical_features
def fetch_credit_approval(self):
"""Download and transform the Credit Approval Data Set.
https://archive.ics.uci.edu/ml/datasets/Credit+Approval
"""
data = pd.read_csv(
FETCH_URLS["credit_approval"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 3, 4, 5, 6, 8, 9, 11, 12]
return data, categorical_features
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
https://archive.ics.uci.edu/ml/datasets/Dermatology
"""
data = pd.read_csv(
FETCH_URLS["dermatology"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = list(range(data.shape[1] - 1))
categorical_features.remove(33)
return data, categorical_features
def fetch_echocardiogram(self):
"""Download and transform the Echocardiogram Data Set.
https://archive.ics.uci.edu/ml/datasets/Echocardiogram
"""
data = pd.read_csv(
FETCH_URLS["echocardiogram"],
header=None,
error_bad_lines=False,
warn_bad_lines=False,
na_values="?",
)
data.drop(columns=[10, 11], inplace=True)
data.dropna(inplace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3]
return data, categorical_features
def fetch_flags(self):
"""Download and transform the Flags Data Set.
https://archive.ics.uci.edu/ml/datasets/Flags
"""
data = pd.read_csv(FETCH_URLS["flags"], header=None)
target = data[6].rename("target")
data = data.drop(columns=[0, 6]).T.reset_index(drop=True).T.join(target)
categorical_features = [
0,
1,
4,
8,
9,
10,
11,
12,
13,
14,
15,
21,
22,
23,
24,
25,
26,
27,
]
return data, categorical_features
def fetch_heart_disease(self):
"""Download and transform the Heart Disease Data Set.
https://archive.ics.uci.edu/ml/datasets/Heart+Disease
"""
data = (
pd.concat(
[
pd.read_csv(url, header=None, na_values="?")
for url in FETCH_URLS["heart_disease"]
],
ignore_index=True,
)
.drop(columns=[10, 11, 12])
.dropna()
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8]
return data, categorical_features
def fetch_hepatitis(self):
"""Download and transform the Hepatitis Data Set.
https://archive.ics.uci.edu/ml/datasets/Hepatitis
"""
data = (
pd.read_csv(FETCH_URLS["hepatitis"], header=None, na_values="?")
.drop(columns=[15, 18])
.dropna()
)
target = data[0].rename("target")
data = data.drop(columns=[0]).T.reset_index(drop=True).T.join(target)
categorical_features = list(range(1, 13)) + [16]
return data, categorical_features
def fetch_german_credit(self):
"""Download and transform the German Credit Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
data = pd.read_csv(FETCH_URLS["german_credit"], header=None, sep=" ")
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = (
np.argwhere(data.iloc[0, :-1].apply(lambda x: str(x)[0] == "A").values)
.squeeze()
.tolist()
)
return data, categorical_features
def fetch_heart(self):
"""Download and transform the Heart Data Set.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8, 10, 12]
return data, categorical_features
def fetch_thyroid(self):
"""Download and transform the Thyroid Disease Data Set.
Label 0 corresponds to no disease found.
Label 1 corresponds to one or multiple diseases found.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = (
pd.read_csv(FETCH_URLS["thyroid"], header=None, na_values="?")
.drop(columns=27)
.dropna()
.T.reset_index(drop=True)
.T
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
data["target"] = (
data["target"].apply(lambda x: x.split("[")[0]) != "-"
).astype(int)
categorical_features = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
18,
20,
22,
24,
26,
27,
]
return data, categorical_features
class MulticlassDatasets(Datasets):
"""Class to download, transform and save multiclass datasets."""
def fetch_first_order_theorem(self):
"""Download and transform the First Order Theorem Data Set.
https://www.openml.org/d/1475
"""
data = pd.read_csv(FETCH_URLS["first_order_theorem"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_gas_drift(self):
"""Download and transform the Gas Drift Data Set.
https://www.openml.org/d/1476
"""
data = pd.read_csv(FETCH_URLS["gas_drift"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_autouniv_au7(self):
"""Download and transform the AutoUniv au7 Data Set
https://www.openml.org/d/1552
"""
data = pd.read_csv(FETCH_URLS["autouniv_au7"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_autouniv_au4(self):
"""Download and transform the AutoUniv au4 Data Set
https://www.openml.org/d/1548
"""
data = pd.read_csv(FETCH_URLS["autouniv_au4"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_mice_protein(self):
"""Download and transform the Mice Protein Data Set
https://www.openml.org/d/40966
"""
data = pd.read_csv(FETCH_URLS["mice_protein"])
data.rename(columns={"class": "target"}, inplace=True)
data.drop(columns=["MouseID"], inplace=True)
data.replace("?", np.nan, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
mask2 = data.isna().sum() < 10
data = data.loc[:, mask & mask2].dropna().copy()
data.iloc[:, :-1] = data.iloc[:, :-1].astype(float)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_steel_plates(self):
"""Download and transform the Steel Plates Fault Data Set.
https://www.openml.org/d/40982
"""
data = pd.read_csv(FETCH_URLS["steel_plates"])
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_cardiotocography(self):
"""Download and transform the Cardiotocography Data Set.
https://www.openml.org/d/1560
"""
data = pd.read_csv(FETCH_URLS["cardiotocography"])
data.rename(columns={"Class": "target"}, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_waveform(self):
"""Download and transform the Waveform Database Generator (version 2) Data Set.
https://www.openml.org/d/60
"""
data = pd.read_csv(FETCH_URLS["waveform"])
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_volkert(self):
"""Download and transform the Volkert Data Set.
https://www.openml.org/d/41166
"""
data = | pd.read_csv(FETCH_URLS["volkert"]) | pandas.read_csv |
'''
Created on Sep 10, 2017
@author: twong
'''
import json
import logging
import random
import pandas as pd
import requests
_logger = logging.getLogger(__name__)
def _deserialize_roster_json(roster_json):
roster_cooked = json.loads(roster_json)
try:
players_json = roster_cooked['d'][0]
except KeyError:
raise RuntimeError("Failed to find data key 'd': Invalid roster JSON?")
except IndexError:
raise RuntimeError('Failed to find player JSON string: Invalid roster JSON or new format?')
players_table = json.loads(players_json)
if 'rows' not in players_table.keys():
raise RuntimeError("Failed to find 'rows' in the player table: Invalid roster JSON or new format?")
if 'cols' not in players_table.keys():
raise RuntimeError("Failed to find 'cols' in the player table: Invalid roster JSON or new format?")
players_table_rows = [[field['v'] for field in pos['c']] for pos in players_table['rows']]
players_table_cols = [c['id'] for c in players_table['cols']]
return pd.DataFrame(data=players_table_rows, columns=players_table_cols)
class Team(object):
PLAYER_NAME = 'FullName'
PLAYER_POSITION = 'PosShortName'
TEPFFL_TEAM_IDS = range(1611, 1623)
TEPFFL_ROSTER_SIZE_MAX = 19
_ROSTER_URL = None
_ROSTER_QUERY_TEMPLATE = '{}?rnd={}&seasonId={}&weekNumber={}&teamId={}'
_ROSTER_SEASON = None
_ROSTER_COLUMNS = [PLAYER_NAME, 'NflAbbreviation', PLAYER_POSITION]
@classmethod
def configure(cls, config):
cls._ROSTER_URL = config.get('tepffl', 'url')
cls._ROSTER_SEASON = config.get('tepffl', 'season')
def __init__(self, team_id, week, filename=None):
if self._ROSTER_URL is None:
raise RuntimeError('Failed to set TEP FFL roster URL')
if self._ROSTER_SEASON is None:
raise RuntimeError('Failed to set TEP FFL season ID')
self.team_id = team_id
roster_json = None
if filename is None:
roster_url = self._ROSTER_QUERY_TEMPLATE.format(
self._ROSTER_URL, int(random.uniform(0, 65536)), self._ROSTER_SEASON, week, self.team_id
)
_logger.debug('Getting roster from {}'.format(roster_url))
response = requests.get(roster_url)
if response.status_code == 200:
roster_json = response.text
else:
raise RuntimeError(
f'Failed to get roster from server: Got response code {response.status_code})'
)
else:
with open(filename) as f:
roster_json = f.read()
self._roster_df = _deserialize_roster_json(roster_json)
for c in self._ROSTER_COLUMNS:
if c not in self.df:
raise RuntimeError(f"Failed to find '{c}' in the roster data")
@property
def df(self):
return self._roster_df
@property
def roster(self):
return self.df[['FullName', 'NflAbbreviation', 'PosShortName']]
def add_tepffl_args(parser):
team_parser = parser.add_argument_group('Team options')
team_parser.add_argument(
'--team-id',
nargs='*',
type=int,
choices=Team.TEPFFL_TEAM_IDS,
help='Zero or more team IDs for which to retrieve rosters (default is to retrieve all rosters)',
)
def get_rosters(week, team_ids=None, position=None):
if team_ids is None:
team_ids = Team.TEPFFL_TEAM_IDS
teams = []
for team_id in team_ids:
_logger.debug('Gathering roster for team {}'.format(team_id))
team = Team(team_id, week)
if len(team.roster) < Team.TEPFFL_ROSTER_SIZE_MAX:
_logger.warning(
f'TEP FFL team with ID {team_id} has a short roster: Expected {Team.TEPFFL_ROSTER_SIZE_MAX}, got {len(team.roster)}'
)
if len(team.roster) > Team.TEPFFL_ROSTER_SIZE_MAX:
_logger.error(
f'TEP FFL team with ID {team_id} has an oversize roster: Expected {Team.TEPFFL_ROSTER_SIZE_MAX}, got {len(team.roster)}'
)
teams.append(team)
df = | pd.concat([t.roster for t in teams], ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
from scipy import stats
from ..base import AbstractDensity
class Multinomial:
def __init__(self, probs):
'''
Define a multinomial random variable object
:param probs: The probability of each class, with classes indexed as 0 to len(probs)-1
'''
assert isinstance(probs, np.ndarray)
self.idx = list(range(len(probs)))
self.probs = probs/probs.sum()
def rvs(self, n):
return np.random.choice(
a=self.idx,
size=n,
p=self.probs,
replace=True
)
def density(self, points):
if isinstance(points, pd.DataFrame):
assert points.shape[1] == 1
points = points.values[:,0]
return [self.probs[k] for k in points]
class BartSimpson(AbstractDensity):
'''
The "claw" in https://projecteuclid.org/download/pdf_1/euclid.aos/1176348653;
renamed as in http://www.stat.cmu.edu/~larry/=sml/densityestimation.pdf
'''
def __init__(self):
super().__init__()
# Guassians to mix over
self.gaussians = [
stats.norm(),
stats.norm(loc=-1, scale=0.1),
stats.norm(loc=-0.5, scale=0.1),
stats.norm(loc=0, scale=0.1),
stats.norm(loc=0.5, scale=0.1),
stats.norm(loc=1, scale=0.1)
]
# Mixing weights
self.multinomial = Multinomial(probs = np.array([0.5] + [0.1]*5))
def rvs(self, n):
''' Simulate n draws
'''
idxs = self.multinomial.rvs(n)
values, counts = np.unique(idxs, return_counts=True)
samples = [
self.gaussians[values[k]].rvs(counts[k]) for k in range(len(values))
]
samples = [v for sublist in samples for v in sublist]
np.random.shuffle(samples)
return | pd.DataFrame({'bart_simpson': samples}) | pandas.DataFrame |
"""
This script reads all the bootstrap performance result files, plots histograms, and calculates averages.
t-tests are done to compute p-values and confidence intervals are computed
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
from scipy import stats
matplotlib.rcParams.update({'font.size': 8})
# well_list = ["043", "125", "129", "153", "155", "170", "175"]
well_list = ["125"]
for well in well_list: # loop through all wells
# specify folder locations
out_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/rnn_lstm_comparison_results/mmps" + well
rnn_full_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_full_bootstrap_rnn/"
lstm_full_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_full_bootstrap_lstm/"
rnn_storms_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_storm_bootstrap_rnn/"
lstm_storms_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_storm_bootstrap_lstm/"
folder_list = [rnn_full_results_folder, lstm_full_results_folder, rnn_storms_results_folder,
lstm_storms_results_folder]
rmse_df_list = []
nse_df_list = []
mae_df_list = []
rmse_storms_df_list = []
nse_storms_df_list = []
mae_storms_df_list = []
for folder in folder_list:
folder_name1 = folder.split("/")[6].split("_")[2]
folder_name2 = folder.split("/")[6].split("_")[4]
folder_name = folder_name1 + "_" + folder_name2
print(folder_name)
rmse_t1_list, rmse_t9_list, rmse_t18_list = [], [], []
nse_t1_list, nse_t9_list, nse_t18_list = [], [], []
mae_t1_list, mae_t9_list, mae_t18_list = [], [], []
rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list = [], [], []
nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list = [], [], []
mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list = [], [], []
count = 0
for file in os.listdir(folder): # extract forecast data
if count % 100 == 0:
print(folder, "count is", count)
data = folder + file
if file.endswith("_RMSE.csv"):
# print(file)
rmse_df = pd.read_csv(data)
rmse_t1, rmse_t9, rmse_t18 = rmse_df[["0"]].iloc[0], rmse_df[["0"]].iloc[8], rmse_df[["0"]].iloc[17]
rmse_t1_list.append(rmse_t1[0])
rmse_t9_list.append(rmse_t9[0])
rmse_t18_list.append(rmse_t18[0])
if file.endswith("_NSE.csv"):
nse_df = pd.read_csv(data)
nse_t1, nse_t9, nse_t18 = nse_df[["0"]].iloc[0], nse_df[["0"]].iloc[8], nse_df[["0"]].iloc[17]
nse_t1_list.append(nse_t1[0])
nse_t9_list.append(nse_t9[0])
nse_t18_list.append(nse_t18[0])
if file.endswith("_MAE.csv"):
mae_df = pd.read_csv(data)
mae_t1, mae_t9, mae_t18 = mae_df[["0"]].iloc[0], mae_df[["0"]].iloc[8], mae_df[["0"]].iloc[17]
mae_t1_list.append(mae_t1[0])
mae_t9_list.append(mae_t9[0])
mae_t18_list.append(mae_t18[0])
if file.endswith("_RMSE_storms.csv"):
# print(file)
rmse_df = | pd.read_csv(data) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
from querying.tracking_query import get_play
def ft_in(x):
if '-' in x:
meas=x.split('-')
#this will be a list ['ft','in']
inches = int(meas[0])*12 + int(meas[1])
return inches
else:
return int(x)
def clock(x,df):
gameClock = df.loc[x]['gameClock']
quarter = df.loc[x]['quarter']
gameClock_split = gameClock.split(':')
minutes = gameClock_split[0]
seconds = gameClock_split[1]
total_minutes = 15-int(minutes) + 15 * (quarter - 1)
return (total_minutes * 60) + int(seconds)
def get_game_season(game_id, games):
return games[games['gameId']==game_id]['season'].values[0]
def preprocess_players(players_df):
# preprocessing steps
players_df['height'] = players_df['height'].apply(ft_in)
return players_df
def preprocess_tracking(track18, track19, track20, play_df, play_type):
'''
This function creates the tracking dataframes by play-type by year.
Parameters:
-----------
track18, track19, track20 - trackYY.csv dataframes
play_df - play.csv dataframe
play_type - string, play type, e.g., 'Extra Point'
...
Returns:
-----------
track_p18 - Tracking Play Type 2018 dataframe
track_p19 - Tracking Play Type 2019 dataframe
track_p20 - Tracking Play Type 2020 dataframe
'''
track18 = track18.copy()
track19 = track19.copy()
track20 = track20.copy()
#re-orient direction of play by offensive team direction :
#We must reorient this to reflect movement in the offense direction instead of the on-field coordinates
#(reorient the origin from the bottom left to top right for a change in direction).
#2018 tracking data
track18.loc[track18['playDirection'] == 'left', 'x'] = 120 -track18.loc[track18['playDirection']=='left','x']
track18.loc[track18['playDirection'] == 'left', 'y'] = 160/3 -track18.loc[track18['playDirection']=='left','y']
#note that we have 160/3 for the y direction since the football field is 160ft, but our units are yards
#2019 tracking data
track19.loc[track19['playDirection'] == 'left', 'x'] = 120 -track19.loc[track19['playDirection']=='left','x']
track19.loc[track19['playDirection'] == 'left', 'y'] = 160/3 -track19.loc[track19['playDirection']=='left','y']
#2020 tracking data
track20.loc[track20['playDirection'] == 'left', 'x'] = 120 -track20.loc[track20['playDirection']=='left','x']
track20.loc[track20['playDirection'] == 'left', 'y'] = 160/3 -track20.loc[track20['playDirection']=='left','y']
#divide play dataset by type of play
play_p = play_df.loc[play_df['specialTeamsPlayType']== play_type][['gameId', 'playId']]
#merge play_type with tracking for each year
track_p18 = pd.merge(play_p, track18, left_on = ['gameId', 'playId'], right_on = ['gameId', 'playId'])
track_p19 = | pd.merge(play_p, track19, left_on = ['gameId', 'playId'], right_on = ['gameId', 'playId']) | pandas.merge |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
self.assertRaises(KeyError, index.get_loc, (1, 1))
self.assertEqual(index.get_loc((2, 0)), slice(3, 5))
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
self.assertEqual(result, expected)
# self.assertRaises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert(rs == xp)
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
self.assertEqual(loc, expected)
self.assertTrue(new_index.equals(exp_index))
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
self.assertEqual(loc, expected)
self.assertIsNone(new_index)
self.assertRaises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)],
labels=[np.array([0, 0, 0, 0]),
np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
self.assertEqual(result, expected)
self.assertTrue(new_index.equals(index.droplevel(0)))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
(1, 3))
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
df.index[5] + timedelta(seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
assertRaisesRegexp(KeyError, "[Kk]ey length.*greater than MultiIndex"
" lexsort depth", index.slice_locs, (1, 0, 1),
(2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
self.assertEqual(result, (1, 5))
result = sorted_idx.slice_locs(None, ('qux', 'one'))
self.assertEqual(result, (0, 5))
result = sorted_idx.slice_locs(('foo', 'two'), None)
self.assertEqual(result, (1, len(sorted_idx)))
result = sorted_idx.slice_locs('bar', 'baz')
self.assertEqual(result, (2, 4))
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]],
sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(1, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((2, 2), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(2, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((1, 0), (6, 3))
self.assertEqual(result, (3, 8))
result = index.slice_locs(-1, 10)
self.assertEqual(result, (0, len(index)))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(index.is_unique)
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
self.assertNotIn('foo', result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(after=1)
self.assertNotIn(2, result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(before=1, after=2)
self.assertEqual(len(result.levels[0]), 2)
# after < before
self.assertRaises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2._tuple_index)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
self.assertTrue((r1 == [-1, -1, -1]).all())
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
assertRaisesRegexp(InvalidIndexError, "Reindexing only valid with"
" uniquely valued Index objects",
idx1.get_indexer, idx2)
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0]])
result = index.format()
self.assertEqual(result[3], '1 0 0 0')
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
self.assertEqual(result[1], 'foo two')
self.reset_display_options()
warnings.filters = warn_filters
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
(2, 'one'), (2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), (1, 'b'),
(2, 'a'), (2, 'a'), (2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
def test_bounds(self):
self.index._bounds
def test_equals(self):
self.assertTrue(self.index.equals(self.index))
self.assertTrue(self.index.equal_levels(self.index))
self.assertFalse(self.index.equals(self.index[:-1]))
self.assertTrue(self.index.equals(self.index._tuple_index))
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1],
labels=index.labels[:-1])
self.assertFalse(index.equals(index2))
self.assertFalse(index.equal_levels(index2))
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(self.index.equals(index))
self.assertFalse(self.index.equal_levels(index))
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(self.index.equals(index))
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
self.assertTrue(mi.identical(mi2))
mi = mi.set_names(['new1', 'new2'])
self.assertTrue(mi.equals(mi2))
self.assertFalse(mi.identical(mi2))
mi2 = mi2.set_names(['new1', 'new2'])
self.assertTrue(mi.identical(mi2))
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
self.assertTrue(mi.identical(mi3))
self.assertFalse(mi.identical(mi4))
self.assertTrue(mi.equals(mi4))
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
self.assertTrue(mi.is_(mi))
self.assertTrue(mi.is_(mi.view()))
self.assertTrue(mi.is_(mi.view().view().view().view()))
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
self.assertTrue(mi2.is_(mi))
self.assertTrue(mi.is_(mi2))
self.assertTrue(mi.is_(mi.set_names(["C", "D"])))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
self.assertTrue(mi.is_(mi2))
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
self.assertFalse(mi3.is_(mi2))
# shouldn't change
self.assertTrue(mi2.is_(mi))
mi4 = mi3.view()
mi4.set_levels([[1 for _ in range(10)], lrange(10)], inplace=True)
self.assertFalse(mi4.is_(mi3))
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
self.assertFalse(mi5.is_(mi))
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index._tuple_index)
expected = | MultiIndex.from_tuples(tups) | pandas.core.index.MultiIndex.from_tuples |
from __future__ import division
import pandas as pd
import numpy as np
import scipy.stats
import argparse
import datetime
def fileToList(group_list):
with open(group_list, 'r') as fh:
return [line.strip() for line in fh.readlines()]
def cleanDF(df, sample_names):
'''
Convert string nans to np.nan and string numbers to floats.
'''
df = df.replace(['na', 'NaN', 'Na', 'nan', 'NA', 'NAN', 'Nan'], np.nan)
df[sample_names] = df[sample_names].astype(float)
return df
def convertToOutliers(df, gene_column_name, sample_names, NUM_IQRs, up_or_down):
'''
Calculates the median, and inter-quartile range for each row/isoform.
Inter-quartile range is defined as the value difference between the 25th and 75th percentile.
Here, NaNs are ignored for each row, therefore a different number of values may be used for each row.
'''
df['row_iqr'] = scipy.stats.iqr(df[sample_names], axis=1, nan_policy='omit')
df['row_median'] = np.nanquantile(df[sample_names], q=0.5, axis=1)
outlier_df = pd.DataFrame()
outlier_df[gene_column_name] = df[gene_column_name]
if up_or_down == 'up':
df['row_medPlus'] = (df['row_median'] + (NUM_IQRs*df['row_iqr']))
for col in sample_names:
outlier_df[col] = (df[col] > df['row_medPlus']).astype(int)
elif up_or_down == 'down':
df['row_medMinus'] = (df['row_median'] - (NUM_IQRs*df['row_iqr']))
for col in sample_names:
outlier_df[col] = (df[col] < df['row_medMinus']).astype(int)
outlier_df[df[sample_names].isnull()] = np.nan
return outlier_df
def countNonNans(df, gene_column_name, sample_names, aggregate):
'''
Optional. If aggregate is set to True, adds up outliers in multiple rows,
if there are repeated values in gene_column_name.
If aggregation is set to False, just sets up the correct columns to feed
into the comparison generator.
'''
not_outlier_cols = [x + '_notOutliers' for x in sample_names]
outlier_cols = [x + '_outliers' for x in sample_names]
agged_outliers = pd.DataFrame()
if aggregate:
agged_outliers[not_outlier_cols] = df.groupby(by=gene_column_name)[sample_names].agg(lambda x: pd.Series(x==0).sum())
agged_outliers[outlier_cols] = df.groupby(by=gene_column_name)[sample_names].agg(lambda x: pd.Series(x==1).sum())
agged_outliers = agged_outliers.reset_index()
elif aggregate == False:
agged_outliers[[gene_column_name] + outlier_cols] = df[[gene_column_name]+sample_names]
agged_outliers[not_outlier_cols] = 1 - df[sample_names]
return agged_outliers
def makeFracTable(df, sample_list, gene_column_name):
cols_outliers = [x + '_outliers' for x in sample_list]
cols_notOutliers = [x + '_notOutliers' for x in sample_list]
num_total_psites = pd.DataFrame()
num_total_psites = pd.DataFrame()
for i, col in enumerate(cols_outliers):
num_total_psites[col] = df[cols_outliers[i]] + df[cols_notOutliers[i]]
frac_outliers = pd.DataFrame()
frac_outliers = df[cols_outliers] / num_total_psites[cols_outliers]
frac_outliers[gene_column_name] = outliers[gene_column_name]
frac_outliers.columns = [x.split('_')[0] for x in frac_outliers.columns]
return frac_outliers
def runOutliers(sample_data, sample_names, gene_column_name, up_or_down, aggregate):
sample_data = cleanDF(sample_data, sample_names)
outliers = convertToOutliers(sample_data, gene_column_name, sample_names, NUM_IQRs, up_or_down)
outliers = countNonNans(outliers, gene_column_name, sample_names, aggregate)
return outliers
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Parse some arguments")
parser.add_argument('--input_df', type=str)
parser.add_argument('--iqrs_over_median', type=float, default=1.5)
parser.add_argument('--gene_column_name', type=str, default='geneSymbol')
parser.add_argument('--output_prefix', type=str, default='outliers')
parser.add_argument('--sample_names_file', type=str, default='sample_roster.txt')
parser.add_argument('--aggregate', type=str, choices=['True', 'False'], default='True')
parser.add_argument('--up_or_down', type=str, choices=['up', 'down'], default='up')
parser.add_argument('--write_frac_table', type=str, choices=['True', 'False'], default='False')
args = parser.parse_args()
data_input = args.input_df
gene_column_name = args.gene_column_name
write_results_to = args.output_prefix
NUM_IQRs = args.iqrs_over_median
sample_names = args.sample_names_file
up_or_down = args.up_or_down
aggregate = args.aggregate == 'True'
write_frac_table = args.write_frac_table == 'True'
sample_data = | pd.read_csv(data_input, sep=',') | pandas.read_csv |
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from ber_public.deap import dim
from ber_public.deap import fab
from ber_public.deap import vent
def test_calculate_fabric_heat_loss():
"""Output is equivalent to DEAP 4.2.0 example A"""
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = pd.Series([85.7])
window_area = pd.Series([29.6])
door_area = pd.Series([1.85])
floor_uvalue = | pd.Series([0.14]) | pandas.Series |
"""This module contains pyspark wrangler utility tests.
isort:skip_file
"""
import pytest
import pandas as pd
from pywrangler.pyspark.util import ColumnCacher
from pyspark.sql import functions as F
pytestmark = pytest.mark.pyspark # noqa: E402
pyspark = pytest.importorskip("pyspark") # noqa: E402
from pywrangler.pyspark import util
def test_ensure_column(spark):
assert str(F.col("a")) == str(util.ensure_column("a"))
assert str(F.col("a")) == str(util.ensure_column(F.col("a")))
def test_spark_wrangler_validate_columns_raises(spark):
data = {"col1": [1, 2], "col2": [3, 4]}
df = spark.createDataFrame(pd.DataFrame(data))
with pytest.raises(ValueError):
util.validate_columns(df, ("col3", "col1"))
def test_spark_wrangler_validate_columns_not_raises(spark):
data = {"col1": [1, 2], "col2": [3, 4]}
df = spark.createDataFrame(pd.DataFrame(data))
util.validate_columns(df, ("col1", "col2"))
util.validate_columns(df, None)
def test_prepare_orderby(spark):
columns = ["a", "b"]
# test empty input
assert util.prepare_orderby(None) == []
# test broadcast
result = [F.col("a").asc(), F.col("b").asc()]
assert str(result) == str(util.prepare_orderby(columns, True))
# test exact
result = [F.col("a").asc(), F.col("b").desc()]
assert str(result) == str(util.prepare_orderby(columns, [True, False]))
# test reverse
result = [F.col("a").asc(), F.col("b").desc()]
assert str(result) == str(util.prepare_orderby(columns, [False, True],
reverse=True))
# raise unequal lengths
with pytest.raises(ValueError):
util.prepare_orderby(columns, [True, False, True])
def test_column_cacher(spark):
data = {"col1": [1, 2], "col2": [3, 4]}
df = spark.createDataFrame( | pd.DataFrame(data) | pandas.DataFrame |
import os,sys
import pandas as pd
import numpy as np
import skbio.io
import gffpandas.gffpandas as gffpd
from statistics import stdev
def find_locs(kmer, blast_df):
"""
Finds the start and stop locations of this k-mer in each genome
"""
locs = []
# filter blast results to just our kmer of interest
kmer_df = blast_df[blast_df['qseqid'] == kmer]
if kmer_df.shape[0]==0:
raise Exception("The k-mer {} was not found in the blast search".format(kmer))
# for every kmer hit in the genome, append the location
for i in range(kmer_df.shape[0]):
send = kmer_df['send'].values[i]
sstart = kmer_df['sstart'].values[i]
genome_id = kmer_df['sseqid'].values[i].split('_')[0]
contig_name = kmer_df['sseqid'].values[i]
locs.append([send,sstart,genome_id,contig_name])
# locs is 2d list, each row is (start, stop, genome_id, contig_name)
return locs
def find_gene(start, stop, genome_id, contig_name, prokka_loc):
"""
Finds the nearest gene upstream and downstream of the k-mer,
reports their distance using the prokka annotations.
"""
gene_up = ''
dist_up = -1
gene_down = ''
dist_down = -1
# prokka renames contigs but the numbers are consistent, so we need to pull the number
if("NODE" in contig_name):
orig_contig_name = contig_name.split('|')
assert(len(orig_contig_name)==2)
orig_contig_name = orig_contig_name[1]
contig_num = orig_contig_name.split('_')[1]
elif(contig_name.split('_')[0] == genome_id and len(contig_name.split('_'))==2):
contig_num = contig_name.split('_')[1]
# SRR5573065_SRR5573065.fasta|33_length=41292_depth=1.01x
elif(contig_name.split('_')[0] == genome_id and len(contig_name.split('_')) in [4,5]):
contig_num = contig_name.split('|')[1].split('_')[0]
else:
raise Exception("Unexpected contig name found: {}".format(contig_name))
if(prokka_loc[-5:-1]=='ncbi'):
gff_loc = "annotation/annotated_genomes/"
else:
gff_loc = "annotation/annotated_grdi_genomes/"
# scan through contigs until the correct number is found, then keep the contig name
with open("{0}{1}/{1}.gff".format(gff_loc,genome_id)) as file:
for line in file:
if("_{} ".format(contig_num) in line):
prokka_contig = line.split(" ")[1]
break
if('prokka_contig' not in locals()):
print("Contig number {2} and contig name {3} not located in {0}{1}/{1}.gff".format(gff_loc,genome_id, contig_num, contig_name))
return [gene_up, dist_up, gene_down, dist_down]
# columns are: ['seq_id','source','type','start','end','score','strand','phase','attributes']
#with open(prokka_loc+genome_id+'.pkl', 'rb') as fh:
# gff_df = skbio.io.read(fh, format="blast+6",into=pd.DataFrame,default_columns=True)
gff_df = pd.read_pickle(prokka_loc+genome_id+'.pkl')
# keep only the contig the kmer was found in and only show coding sequences (Prodigal)
contig_df = gff_df[gff_df['seq_id']==prokka_contig]
contig_df = contig_df[contig_df['type']=='CDS']
start = int(start)
stop = int(stop)
df_length = contig_df.values.shape[0]
# find the nearest gene/genes
# for every gene found by prokka, does it contain the kmer or is it near?
for gene_num, gene_anno in enumerate(contig_df.values):
gene_start = int(gene_anno[3])
gene_stop = int(gene_anno[4])
try:
if(start > gene_stop):
if(gene_num==df_length-1):
# we are after the last gene
gene_dict = dict(i.split('=') for i in gene_anno[8].split(';'))
dist_up = start - gene_stop
if(gene_dict['product']=='hypothetical protein'):
gene_up = "HypoProt:hypothetical protein"
else:
gene_up = gene_dict['gene']+':'+gene_dict['product']
break
# we are not at the correct gene yet
continue
elif(stop < gene_start):
# the kmer is before the current gene
gene_dict = dict(i.split('=') for i in gene_anno[8].split(';'))
dist_down = gene_start-stop
if(gene_dict['product']=='hypothetical protein'):
gene_down = "HypoProt:hypothetical protein"
else:
try:
gene_down = gene_dict['gene']+':'+gene_dict['product']
except KeyError:
gene_down = 'none:'+ dict(i.split('=') for i in gene_anno[8].split(';'))['product']
prev_gene_anno = contig_df.values[gene_num-1]
gene_dict = dict(i.split('=') for i in prev_gene_anno[8].split(';'))
dist_up = start - prev_gene_anno[4]
if(gene_dict['product']=='hypothetical protein'):
gene_up = "HypoProt:hypothetical protein"
else:
gene_up = gene_dict['gene']+':'+gene_dict['product']
break
elif(start >= gene_start and stop <= gene_stop):
# the kmer is inside of a gene
gene_dict = dict(i.split('=') for i in gene_anno[8].split(';'))
dist_up = 0
if(gene_dict['product']=='hypothetical protein'):
gene_up = "HypoProt:hypothetical protein"
else:
gene_up = gene_dict['gene']+':'+gene_dict['product']
break
elif(start <= gene_stop <= stop):
# kmer hanging over right end of a gene
gene_dict = dict(i.split('=') for i in gene_anno[8].split(';'))
dist_up = stop-gene_stop
if(gene_dict['product']=='hypothetical protein'):
gene_up = "HypoProt:hypothetical protein"
else:
gene_up = gene_dict['gene']+':'+gene_dict['product']
break
elif(start <= gene_start <= stop):
# kmer hanging over the left end of a gene
gene_dict = dict(i.split('=') for i in gene_anno[8].split(';'))
dist_up = gene_start-start
if(gene_dict['product']=='hypothetical protein'):
gene_up = "HypoProt:hypothetical protein"
else:
gene_up = gene_dict['gene']+':'+gene_dict['product']
break
else:
raise Exception("Unexpected kmer start: {} stop: {} in relation to gene start: {} stop: {}".format(start, stop, gene_start, gene_stop))
except KeyError:
gene_up = 'none:'+ dict(i.split('=') for i in gene_anno[8].split(';'))['product']
break
return [gene_up, dist_up, gene_down, dist_down]
def find_hits(kmers,blast_search):
"""
Input:
list of k-mers to search for, preformed 1 at a time
A PATH TO a blast search of those k-mers through your genomes
Output:
A pandas df where each row shows where a kmer was found in a genome,
Which gene is upstream and downstream and how far away they are.
Will have (# of kmers * number of genomes) rows
"""
with open(blast_search) as fh:
blast_df = skbio.io.read(fh, format="blast+6",into=pd.DataFrame,default_columns=True)
# each row in gene_hits will be [kmer, gene_up, dist_up, gene_down, dist_down, start, stop, genome_id, contig_name]
gene_hits = []
for kmer in top_feats:
# locs is 2d list, each row is (start, stop, genome_id, contig_name)
locs = gene_finder.find_locs(kmer, blast_df)
for loc in locs:
# gene_info is 1D list: gene_up, dist_up, gene_down, dist_down
gene_info = gene_finder.find_gene(*loc, prokka_loc) #TODO prokka loc as "annotation/gffpandas_ncbi/"
gene_hits.append([kmer]+gene_info+loc)
hits_df = pd.DataFrame(data = gene_hits,columns = ['kmer', 'gene_up', 'dist_up', 'gene_down', 'dist_down', 'start', 'stop', 'genome_id', 'contig_name'])
return hits_df
def merge_df(df_path, drug, dataset):
"""
Takes path to pandas df with the columns:
[kmer, gene_up, dist_up, gene_down, dist_down, start, stop, genome_id, contig_name]
and returns a df with the columns:
[drug, dataset, kmer, gene_up, gene_up_count, avg_dist_up, gene_down, gene_down_count, avg_dist_down]
"""
df = pd.read_pickle(df_path)
hit_summary = []
for kmer in set(df['kmer']):
# filter for only a single kmer
kmer_df = df[df['kmer']==kmer]
for gene_direc, gene_dist in [['gene_up','dist_up'],['gene_down','dist_down']]:
for gene in set(kmer_df[gene_direc]):
if(len(gene)==0):
continue
# filter for only a single gene
gene_df = kmer_df[kmer_df[gene_direc]==gene]
total_dist = 0
for dist in gene_df[gene_dist]:
total_dist += abs(float(dist))
count = gene_df.shape[0]
average_dist = total_dist/count
if(len(gene_df[gene_dist])<2):
std_dev = 0
else:
std_dev = stdev([abs(int(float(i))) for i in gene_df[gene_dist]])
try:
gene, name = gene.split(':')
except:
print("Gene: {}".format(gene))
print("{} {}".format(drug,dataset))
gene, carb, name = gene.split(':')
hit_summary.append([dataset, drug, kmer, gene, name, count, average_dist, std_dev])
return hit_summary
def hit_summary(dataset,out_path):
drugs = ['AMP','AMC','AZM','CHL','CIP','CRO','FIS','FOX','GEN','NAL','SXT','TET','TIO','STR','KAN']
all_hits = []
for drug in drugs:
if(dataset == 'grdi' and drug in ['FIS']):
continue
df_path = "annotations/{}_{}/hits_df.pkl".format(dataset, drug)
drug_list = merge_df(df_path, drug, dataset)
for hit in drug_list:
all_hits.append(hit)
data = np.asarray(all_hits)
what_amg = np.zeros((data.shape[0]), dtype = object)
amgs = | pd.read_csv("data/gene_labels.tsv",sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 18:10:18 2019
@author: <NAME>
Code will plot the keypoint coordinates vs time in order to assign the maximum
value from this plot to the real-world distance measurement. This will be
the label.
Coding Improvement Note: Make use of functions for things like this.
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks
from scipy.signal import medfilt
from scipy.signal import peak_prominences
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
#Edit data within file.
#Open file and set to a certain variable
ankle_df = pd.read_csv('jointTracker (16).csv', header=None) #This file has me pretty clearly tracked
rknee1_df = pd.read_csv('20191002_rknee_pos_1st.csv', header=None)
rknee2_df = pd.read_csv('20191002_rknee_pos_2nd.csv', header=None)
rknee3_df = pd.read_csv('20191002_rknee_pos_3rd.csv', header=None)
rknee4_df = pd.read_csv('20191002_rknee_pos_4th.csv', header=None)
rknee5_df = pd.read_csv('20191002_rknee_pos_5th.csv', header=None)
rknee6_df = pd.read_csv('20191002_rknee_pos_6th.csv', header=None)
rknee7_df = pd.read_csv('20191002_rknee_pos_7th.csv', header=None)
rknee8_df = pd.read_csv('20191002_rknee_pos_8th.csv', header=None)
real_measures = np.array([32,33,32,35,35,
32,32,32,33,35,
34,36,35,35,34,34,35,35,34,35,
31,33,37,34,33,33,33,35,35,35,
30,31,33,23,25,28,28,29,31,42,
32,31.5,24,29,37,36,31,34,28,33.5,
38,38,42,42,42,41,43,38,39,40,
32,34,41,36,36,35,37,36,38,40]) #Document real measures
real_measures_df = pd.DataFrame(data=real_measures[0:]) #Convert to a DataFrame
#Tabulate height and weight columns
heights_df = pd.DataFrame({"Height": [69]*50 + [69.5]*10 + [67]*10}) #Heights in inches
weights_df = pd.DataFrame({"Weight": [165]*50 + [215]*10 + [160]*10}) #Weights in pounds
#Assign x and y position values to variables
x_rknee1 = rknee1_df[0]
y_rknee1 = rknee1_df[1]
x_rknee2 = rknee2_df[0]
y_rknee2 = rknee2_df[1]
x_rknee3 = rknee3_df[0]
y_rknee3 = rknee3_df[1]
x_rknee4 = rknee4_df[0]
y_rknee4 = rknee4_df[1]
x_rknee5 = rknee5_df[0]
y_rknee5 = rknee5_df[1]
x_rknee6 = rknee6_df[0]
y_rknee6 = rknee6_df[1]
x_rknee7 = rknee7_df[0]
y_rknee7 = rknee7_df[1]
x_rknee8 = rknee8_df[0]
y_rknee8 = rknee8_df[1]
################################Obtain X peak prominences######################
#Plot the values to view visually
#plt.figure(1)
#plt.plot(x_rknee1)
#
#plt.figure(2)
#plt.plot(x_rknee2)
#
#plt.figure(3)
#plt.plot(x_rknee3)
#
#plt.figure(4)
#plt.plot(x_rknee4)
#Apply smoothing with median filter
filterx_rk_1 = medfilt(x_rknee1, kernel_size = 13)
filterx_rk_2 = medfilt(x_rknee2, kernel_size = 13)
filterx_rk_3 = medfilt(x_rknee3, kernel_size = 13)
filterx_rk_4 = medfilt(x_rknee4, kernel_size = 13)
filterx_rk_5 = medfilt(x_rknee5, kernel_size = 13)
filterx_rk_6 = medfilt(x_rknee6, kernel_size = 13)
filterx_rk_7 = medfilt(x_rknee7, kernel_size = 13)
filterx_rk_8 = medfilt(x_rknee8, kernel_size = 13)
#Plot values to view smoothed plot visually
plt.figure(10)
plt.plot(filterx_rk_1)
plt.figure(11)
plt.plot(filterx_rk_2)
plt.figure(12)
plt.plot(filterx_rk_3)
plt.figure(13)
plt.plot(filterx_rk_4)
plt.figure(14)
plt.plot(filterx_rk_5)
plt.figure(15)
plt.plot(filterx_rk_6)
plt.figure(16)
plt.plot(filterx_rk_7)
plt.figure(17)
plt.plot(filterx_rk_8)
#Obtain peaks and prominences
peaksx_rk1, _ = find_peaks(filterx_rk_1, height=180)
promsx_1 = peak_prominences(filterx_rk_1, peaksx_rk1)
promsx_1_df = pd.DataFrame(data=promsx_1[0][0:]) #, # values
#... index=data[1:,0], # 1st column as index
#... columns=data[0,1:]) # 1st row as the column names
# Convert to DataFrame
peaksx_rk2, _ = find_peaks(filterx_rk_2, height=200)
promsx_2 = peak_prominences(filterx_rk_2, peaksx_rk2)
promsx_2_df = pd.DataFrame(data=promsx_2[0][0:])
peaksx_rk3, _ = find_peaks(filterx_rk_3, height=220)
promsx_3 = peak_prominences(filterx_rk_3, peaksx_rk3)
promsx_3_df = pd.DataFrame(data=promsx_3[0][0:])
peaksx_rk4, _ = find_peaks(filterx_rk_4, height=180)
promsx_4 = peak_prominences(filterx_rk_4, peaksx_rk4)
promsx_4_df = pd.DataFrame(data=promsx_4[0][0:])
peaksx_rk5, _ = find_peaks(filterx_rk_5, height=230)
promsx_5 = peak_prominences(filterx_rk_5, peaksx_rk5)
promsx_5_df = pd.DataFrame(data=promsx_5[0][0:])
peaksx_rk6, _ = find_peaks(filterx_rk_6, height=200)
promsx_6 = peak_prominences(filterx_rk_6, peaksx_rk6)
promsx_6_df = | pd.DataFrame(data=promsx_6[0][0:]) | pandas.DataFrame |
from collections import Counter
import sys
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.feature_extraction import DictVectorizer
import sklearn.cluster.k_means_
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
from sklearn.cluster import SpectralClustering, DBSCAN
from sklearn. decomposition import PCA, KernelPCA, SparsePCA, TruncatedSVD, IncrementalPCA
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import numpy as np
from nltk.corpus import stopwords
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import csv
import pandas as pd
from pandas.plotting import scatter_matrix
def readAligedCorpus(words, path):
rval = [Counter() for i in range(len(words))]
stop_words = set(stopwords.words('german'))
tree = ET.parse(path)
root = tree.getroot()
body = root.find('body')
for tu in body.findall('tu'):
de = ''
en = ''
for tuv in tu.findall('tuv'):
atr = tuv.attrib
lang = atr.get('{http://www.w3.org/XML/1998/namespace}lang')
if lang == 'de':
for seg in tuv.findall('seg'):
de = seg.text.split()
if lang == 'en':
for seg in tuv.findall('seg'):
en = seg.text.lower()
en_words = en.split()
for i, word in enumerate(words):
if word in en_words:
counter = rval[i]
de = [token.lower() for token in de if token.isalpha() and not token in stop_words]
#whole aligned sentence as BOW
for de_w in de:
counter[de_w] += 1
return rval
def readFile(words, path):
with open(path, 'r', encoding='utf8') as f:
rval = []
stop_words = set(stopwords.words('english'))
rval = [Counter() for i in range(len(words))]
lines = f.readlines()
for line in lines:
tokens = line.split()
for i, word in enumerate(words):
if(word in tokens):
tokens = [token.lower() for token in tokens if token.isalpha() and not token in stop_words]
counter = rval[i]
idx = tokens.index(word)
#bow of 5 (2 on the left | 2 on the right)
bow = tokens[idx-2:idx+3]
#print(bow)
for w in bow:
counter[w] += 1
return rval
corpus = readFile(['apple', 'banana', 'oranges', 'watermelons', 'strawberries', 'grape', 'peach', 'cherry', 'pear', 'plum', 'melon', 'lemon', 'coconut', 'lime',
'office', 'home', 'building', 'house', 'apartment', 'city', 'town', 'village'], 'resources/corpora/OpenSubtitles/small/combined2')
corpus_biling = readAligedCorpus(['apple', 'banana', 'oranges', 'watermelons', 'strawberries', 'grape', 'peach', 'cherry', 'pear', 'plum', 'melon', 'lemon', 'coconut', 'lime',
'office', 'home', 'building', 'house', 'apartment', 'city', 'town', 'village'], 'resources/corpora/OpenSubtitles/very_small_parallel/vsmallaa')
#'apple', 'banana', 'oranges', 'watermelons', 'strawberries', 'grape', 'peach', 'cherry', 'pear', 'plum', 'melon', 'lemon', 'coconut', 'lime',
#'office', 'home', 'building', 'house', 'apartment', 'city', 'town', 'village'
#'shoes', 'shirt', 'pants', 'jacket', 'sweatshirt', 'socks'
#'car', 'plane', 'bicycle', 'motorcycle', 'scooter', 'bus', 'train'
#'new york', 'los angeles', 'chicago', 'houston', 'philadelphia', 'san antonio', 'san diego', 'dallas', 'san jose', 'austin', 'seattle'
#'wind', 'sun', 'water', 'fire'
#'chair', 'table', 'bed', 'closet', 'commode'
#'sister', 'brother', 'father', 'mother'
#'nose', 'eyes', 'mouth', 'face', 'hair'
vectorizer = DictVectorizer()
X = vectorizer.fit_transform(corpus).toarray()
vectorizer_biling = DictVectorizer()
X_biling = vectorizer_biling.fit_transform(corpus_biling).toarray()
X_combined = np.hstack((X, X_biling))
#sc = StandardScaler()
#X_std = sc.fit_transform(X)
#sc_biling = StandardScaler()
#X_biling_std = sc_biling = sc_biling.fit_transform(X_biling)
sc_combined = StandardScaler()
X_combined_std = sc_combined.fit_transform(X_combined)
#pca = PCA(n_components=2)
pca = KernelPCA(kernel='rbf')
#pca = SparsePCA()
#pca = TruncatedSVD()
#pca = IncrementalPCA()
#X_pca = pca.fit_transform(X_std)
#X_biling_pca = pca.fit_transform(X_biling_std)
X_combined_pca = pca.fit_transform(X_combined_std)
#kmeans = KMeans(n_clusters=2, init='random').fit(X_pca)
#f = kmeans.predict(X_pca)
#kmeans = KMeans(n_clusters=2, init='random').fit(X_biling_pca)
#f = kmeans.predict(X_biling_pca)
kmeans = KMeans(n_clusters=2, init='random').fit(X_combined_pca)
f = kmeans.predict(X_combined_pca)
print(f)
print('contains number one x times: ', list(f).count(1))
print('contains number zero x times: ', list(f).count(0))
#plot function from my warmup assignment
def plot(f):
arr = np.array(f)
if arr.shape[1] == 2:
x1 = arr[:, 0]
x2 = arr[:, 1]
plt.scatter(x1, x2)
plt.show()
elif arr.shape[1] == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = []
y = []
z = []
for line in f:
x.append(float(line[0]))
y.append(float(line[1]))
z.append(float(line[2]))
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
else:
m = np.array(f, dtype=float)
# first make some fake data with same layout as yours
data = | pd.DataFrame(m) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
| pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import click
import os
@click.command()
@click.argument('input_folder')
@click.argument('output_folder')
def main(input_folder, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
files = [[(x[0] + '/' + y, x[0].split('/')[-1].replace('DATA_', '').replace('_22', '')) for y in x[2]] for x in
os.walk(input_folder)
if '_22' in x[0].split('/')[-1]]
flat_files = [file for sublist in files for file in sublist]
csv_files = [file for file in flat_files if file[0].endswith('csv') and (
'pancancer' not in file[1])]
df_fiels_summary = pd.DataFrame()
for summary_file, folder_name in [
file for file in csv_files if file[0].endswith('files_summary_count_per_patient.csv')
]:
df_temp = pd.read_csv(summary_file)
df_temp = df_temp.groupby('indiv_name').count()
row = pd.DataFrame([[folder_name, df_temp.shape[0]]], columns=['folder', 'number_of_patients'])
df_fiels_summary = pd.concat([df_fiels_summary, row], sort=False)
if output_folder.endswith('/'):
df_fiels_summary.to_excel(output_folder + 'number_of_patients.xlsx', index=False)
else:
df_fiels_summary.to_excel(output_folder + '/number_of_patients.xlsx', index=False)
df_count_summary = pd.DataFrame()
for count_file, folder_name in [
file for file in csv_files if file[0].endswith('patient_mutation_count.csv')
]:
df_temp = pd.read_csv(count_file)
df_temp['cancer_id'] = folder_name
df_count_summary = pd.concat([df_count_summary, df_temp], sort=False)
if output_folder.endswith('/'):
df_count_summary.to_excel(output_folder + 'patient_mutation_count.xlsx', index=False)
else:
df_count_summary.to_excel(output_folder + '/patient_mutation_count.xlsx', index=False)
all_mutation_count = pd.DataFrame()
for count_file, folder_name in [
file for file in csv_files if file[0].endswith('patient_mutation_count.csv')
]:
df_temp = pd.read_csv(count_file)
df_temp['cancer'] = folder_name
all_mutation_count = pd.concat([all_mutation_count, df_temp], sort=False)
if output_folder.endswith('/'):
all_mutation_count.to_excel(output_folder + 'number_of_mutations_all_patients.xlsx', index=False)
else:
all_mutation_count.to_excel(output_folder + '/number_of_mutations_all_patients.xlsx', index=False)
df_all_mutation = pd.DataFrame()
for all_mutations_filtered_mut_type_gene, folder_name in [
file for file in csv_files if file[0].endswith('all_mutations.csv')
]:
print(folder_name)
df_temp = | pd.read_csv(all_mutations_filtered_mut_type_gene) | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
from systrade.trading.brokers import PaperBroker
T_START = pd.to_datetime('2019/07/10-09:30:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
T_END = pd.to_datetime('2019/07/10-10:00:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
TIMEINDEX = pd.date_range(start=T_START,end=T_END,freq='1min')
DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX)) ,
'tick1':np.arange(len(TIMEINDEX)-1,-1,-1)},
index=TIMEINDEX)
# DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX))},
# index=TIMEINDEX)
class TestPaperBroker:
def test_init(self):
testseries = pd.Series(np.arange(10))
with pytest.raises(TypeError):
broker = PaperBroker(testseries)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,slippage_time=1.0)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,transaction_cost=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,transaction_cost=-0.5)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,spread_pct=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=-0.5)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=200)
def test_next_extant_time(self):
broker = PaperBroker(DATA_DF)
t_get = pd.to_datetime('2019/07/10-09:35:05:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t_out = broker.next_extant_time(t_get)
t_expect = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
assert t_out==t_expect
t_get = pd.to_datetime('2019/07/10-11:35:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
t_out = broker.next_extant_time(t_get)
def test_get_timeindex_subset(self):
broker = PaperBroker(DATA_DF)
t0 = | pd.to_datetime('2019/07/10-09:29:00:000000', format='%Y/%m/%d-%H:%M:%S:%f') | pandas.to_datetime |
import requests
import os
import json
import pandas as pd
import numpy as np
from requests.exceptions import HTTPError
import re
import matplotlib.pyplot as plt
# Part 1 Get Data With API
## Function 1-Get dataset with specific game "platform" and "type"
def api_game(platform = 'pc', type = 'game'):
"""
The function returns game giveways information from the Game Giveaway Tracker API with specific requirement for plateform and game type.
Parameters
---
inputs:
platform: string value that is the insert game platform, eg: pc, steam, epic-games-store, ubisoft, gog, icthio, ps4, etc.
type: string value of game type, eg: game
Returns
---
output:
game_giveaway: pandas.DataFrame of the API
Columns:
_id: int64
_title: object
_worth: object
_thumbnail: float64
_image: object
_description: object
_instructions: object
_open_giveaway_url: object
_type: object
_platforms: object
_end_date: object
_users: int64
_status: bool
_gamerpower_url: object
_open_giveaway: object
Example
---
>>> df = api_game(type='game', platform= 'PC')
>>> df.shape
(1, 15)
"""
assert isinstance(platform,str) #parameters should be string
assert isinstance(type, str) #parameters should be string
try:
params = {'platform': platform, 'type': type}
r = requests.get('https://www.gamerpower.com/api/giveaways', params = params)
game_json=json.dumps(r.json(), indent=2)
game_j = json.loads(game_json)
game_giveaway = pd.DataFrame(game_j)
return game_giveaway
# Catch exception, however if the response was successful, no Exception will be raised
r.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
## Function 2: Get full dataset
def api_game_full():
"""
The function returns all game giveways information from the Game Giveaway Tracker API.
Parameters
---
Returns
---
output:
game_df: pandas.DataFrame of the API with stated parameters
Columns:
_id: int64
_title: object
_worth: object
_thumbnail: float64
_image: object
_description: object
_instructions: object
_open_giveaway_url: object
_type: object
_platforms: object
_end_date: object
_users: int64
_status: bool
_gamerpower_url: object
_open_giveaway: object
Example
---
>>> df = api_game_full()
>>> df.shape
(84, 15)
"""
try:
r = requests.get('https://www.gamerpower.com/api/giveaways')
game_json=json.dumps(r.json(), indent=2)
game_j = json.loads(game_json)
game_df = pd.DataFrame(game_j)
return game_df
# Catch exception, however if the response was successful, no Exception will be raised
r.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# Part 2 Functions With Full Dataset
## Function 1: Get descriptive statistics for each game type
def game_type(game_df):
"""
The function is used to clean the data at first.
Then return the new dataframe that contains number of games, user number mean, std, min, 25%, 50%, 75% and max value for each type.
Parameters
---
inputs:
game_df: pandas.DataFrame of the API which is generated in part1 function2.
Returns
---
output:
type_statistics: pandas.DataFrame descriptive statistics for each game type
Example
---
>>> game_df = api_game_full()
>>> type_statistics=game_type(game_df)
>>> type_statistics.shape
(3, 8)
"""
assert isinstance(game_df, pd.core.frame.DataFrame) # make sure input should be dataframe
df=game_df.drop(['thumbnail', 'image', 'description','open_giveaway_url', 'end_date'], axis=1) # drop column and clean data
g_type=df['type']
g_users=df['users']
g_list={'type': g_type, 'number of users': g_users}
gt_df= pd.DataFrame(g_list)
type_statistics=gt_df.groupby('type').describe()
type_statistics=type_statistics.rename(columns={'count': 'number of games for this type'})
return type_statistics
## Function 2: Get Z score and its graph for users for each type
def zscore_plot(game_df):
"""
The function is used to calculate Z score for number of users for each game type
Then get histogram graph for Z score
Parameters
---
inputs:
game_df: pandas.DataFrame of the API which is generated in part1 function2.
Returns
---
output:
a histogram plot for Z score of user number for each game type
Example
---
>>> game_df = api_game_full()
>>> zscore_plot(game_df)
a histogram plot shows up
<matplotlib.axes._subplots.AxesSubplot at 0x11bca3ac0>
"""
assert isinstance(game_df, pd.core.frame.DataFrame) # make sure input should be dataframe
g_type=game_df['type']
g_users=game_df['users']
g_list={'type': g_type, 'number of users': g_users}
gt_df= | pd.DataFrame(g_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys
import os
import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_market_info(
# top30_univ_dataset: Output[Dataset],
market_info_dataset: Output[Dataset],
today: str,
n_days: int
) -> str:
import pandas as pd
from pandas.tseries.offsets import CustomBusinessDay
from trading_calendars import get_calendar
import functools
import pickle
import logging
import networkx as nx
import os
from sqlalchemy import create_engine
# today = pd.Timestamp.now('Asia/Seoul').strftime('%Y%m%d')
# today = '20210809'
cal_KRX = get_calendar('XKRX')
custombd_KRX = CustomBusinessDay(holidays=cal_KRX.precomputed_holidays)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# Preference
#-----------------------------------------------------------------------------
AWS_DB_ID = 'gb_master'
AWS_DB_PWD = 'qwert12345'
AWS_DB_ADDRESS = 'kwdb-daily.cf6e7v8fhede.ap-northeast-2.rds.amazonaws.com'
AWS_DB_PORT = '3306'
DB_DATABASE_NAME_daily_naver = 'daily_naver'
PROJECT_ID = 'dots-stock'
db_daily_naver_con = create_engine('mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'
.format(AWS_DB_ID, AWS_DB_PWD, AWS_DB_ADDRESS, AWS_DB_PORT, DB_DATABASE_NAME_daily_naver),
encoding='utf8',
echo=False)
# @functools.lru_cache()
def get_market_from_naver_aws(date_ref):
'''
daily naver ์์ db๊ฐ ๊ทธ๋๋ก parsing ๋ด์ฉ ๋ฐ์์ค๊ธฐ
'''
with db_daily_naver_con.connect() as conn:
table_name = f'{date_ref}_daily_allstock_naver'
str_sql = f'select * from {table_name} order by ๋ฑ๋ฝ๋ฅ DESC'
df = pd.read_sql_query(str_sql, conn) # self.get_db_daily_naver_con())
df = df.reset_index().rename(columns={'index':'์์_์์น๋ฅ ', 'N':'์์_์๊ฐ์ด์ก'})
df['์์_์์น๋ฅ '] = df.์์_์์น๋ฅ + 1
return df
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
# 1. Market data
#------------------------------------------------------------------------------
def get_markets_aws(date_ref, n_days):
dates_n_days_ago = get_krx_on_dates_n_days_ago(date_ref, n_days)
df_market = pd.DataFrame()
for date in dates_n_days_ago:
df_ = get_market_from_naver_aws(date)
# logger.debug(f'date : {date} and df_.shape {df_.shape}' )
df_market = df_market.append(df_)
return df_market
df_market = get_markets_aws(date_ref=today, n_days=n_days)
df_market.to_csv(market_info_dataset.path)
return today
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_base_item(
market_info_dataset: Input[Dataset],
base_item_dataset: Output[Dataset]
):
import pandas as pd
# helper function
def get_top30_list(df_market):
cols_out = ['๋ ์ง','์ข
๋ชฉ์ฝ๋','์ข
๋ชฉ๋ช
']
return (df_market
.sort_values(['๋ ์ง','๋ฑ๋ฝ๋ฅ '], ascending=False)
.groupby('๋ ์ง')
.head(30)[cols_out])
df_market = pd.read_csv(market_info_dataset.path)
df_base_item = get_top30_list(df_market)
df_base_item.to_csv(base_item_dataset.path)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_bros(
today: str,
n_days: int,
bros_univ_dataset: Output[Dataset]
) -> str :
'''
Returns:
list
'''
import pandas as pd
import pandas_gbq
import networkx as nx
from trading_calendars import get_calendar
PROJECT_ID = 'dots-stock'
cal_KRX = get_calendar('XKRX')
# helper functions
#-----------------------------------------------------------------------------
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_corr_pairs_gbq(date_ref, period):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
sql = f'''
SELECT
DISTINCT source,
target,
corr_value,
period,
date
FROM
`dots-stock.krx_dataset.corr_ohlc_part1`
WHERE
date = "{date_ref_}"
AND period = {period}
ORDER BY
corr_value DESC
LIMIT
1000'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID)
return df
def find_bros(date_ref, period):
'''clique over 3 nodes '''
df_edgelist = get_corr_pairs_gbq(date_ref, period)
g = nx.from_pandas_edgelist(df_edgelist, edge_attr=True)
bros_ = nx.find_cliques(g)
bros_3 = [bros for bros in bros_ if len(bros) >=3]
set_bros = set([i for l_i in bros_3 for i in l_i])
g_gang = g.subgraph(set_bros)
df_gangs_edgelist = nx.to_pandas_edgelist(g_gang)
return df_gangs_edgelist
def find_gang(date_ref):
df_gang = pd.DataFrame()
for period in [20, 40, 60, 90, 120]:
df_ = find_bros(date, period=period)
df_gang = df_gang.append(df_)
return df_gang
# jobs
dates = get_krx_on_dates_n_days_ago(date_ref=today, n_days=n_days)
df_bros = pd.DataFrame()
for date in dates:
df = find_gang(date_ref=date)
df_bros = df_bros.append(df)
df_bros.to_csv(bros_univ_dataset.path)
return 'OK'
@component(
base_image="amancevice/pandas:1.3.2-slim"
)
def get_univ_for_price(
# date_ref: str,
base_item_dataset: Input[Dataset],
bros_dataset: Input[Dataset],
univ_dataset: Output[Dataset],
):
import pandas as pd
import logging
import json
logger = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
# base item
df_top30s = pd.read_csv(base_item_dataset.path,
index_col=0,
dtype={'๋ ์ง': str}).reset_index(drop=True)
# load edge_list to make bros
df_ed = pd.read_csv(bros_dataset.path, index_col=0).reset_index(drop=True)
df_ed_r = df_ed.copy()
df_ed_r.rename(columns={'target':'source', 'source':'target'}, inplace=True)
df_ed2 = df_ed.append(df_ed_r, ignore_index=True)
df_ed2['date'] = pd.to_datetime(df_ed2.date).dt.strftime('%Y%m%d')
dic_univ = {}
for date, df in df_top30s.groupby('๋ ์ง'):
logger.debug(f'date: {date}')
l_top30 = df.์ข
๋ชฉ์ฝ๋.to_list()
l_bro = df_ed2[(df_ed2.date == date) &
(df_ed2.source.isin(l_top30))].target.unique().tolist()
dic_univ[date] = list(set(l_top30 + l_bro ))
with open(univ_dataset.path, 'w', encoding='utf8') as f:
json.dump(dic_univ, f)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# packages_to_install = ["tables", "pandas_gbq", "finance-datareader", "bs4", "pickle5"] # add 20210715 FIX pipeline
)
def get_adj_prices(
today: str,
dic_univ_dataset: Input[Dataset],
adj_price_dataset: Output[Dataset]
) -> str:
import json
import FinanceDataReader as fdr
from ae_module.ae_logger import ae_log
import pandas as pd
# with open(dic_univ_dataset.path, 'rb') as f:
# dic_univ = pickle.load(f)
with open(dic_univ_dataset.path, 'r') as f:
dic_univ = json.load(f)
codes_stock = []
for v in dic_univ.values():
codes_stock.extend(v)
# drop duplicates
codes_stock = list(set(codes_stock))
def get_price_adj(code, start, end):
return fdr.DataReader(code, start=start, end=end)
def get_price(l_univ, date_start, date_end):
df_price = pd.DataFrame()
for code in l_univ :
df_ = get_price_adj(code, date_start, date_end)
df_['code'] = code
# df_['price'] = df_['Close'] / df_.Close.iloc[0]
df_price = df_price.append(df_)
return df_price
ae_log.debug(f'codes_stock {codes_stock.__len__()}')
date_start = '20210101'
date_end = today
df_adj_price = get_price(codes_stock, date_start=date_start, date_end=date_end)
df_adj_price.to_csv(adj_price_dataset.path)
ae_log.debug(df_adj_price.shape)
return 'good'
@component(
# base_image="gcr.io/deeplearning-platform-release/sklearn-cpu"
base_image="amancevice/pandas:1.3.2-slim"
)
def get_target(
df_price_dataset: Input[Dataset],
df_target_dataset: Output[Dataset]
):
import pandas as pd
import numpy as np
def make_target(df):
df_ = df.copy()
df_.sort_values(by='date', inplace=True)
df_['high_p1'] = df_.high.shift(-1)
df_['high_p2'] = df_.high.shift(-2)
df_['high_p3'] = df_.high.shift(-3)
df_['close_p1'] = df_.close.shift(-1)
df_['close_p2'] = df_.close.shift(-2)
df_['close_p3'] = df_.close.shift(-3)
df_['change_p1'] = (df_.close_p1 - df_.close) / df_.close
df_['change_p2'] = (df_.close_p2 - df_.close) / df_.close
df_['change_p3'] = (df_.close_p3 - df_.close) / df_.close
df_['change_p1_over5'] = df_['change_p1'] > 0.05
df_['change_p2_over5'] = df_['change_p2'] > 0.05
df_['change_p3_over5'] = df_['change_p3'] > 0.05
df_['change_p1_over10'] = df_['change_p1'] > 0.1
df_['change_p2_over10'] = df_['change_p2'] > 0.1
df_['change_p3_over10'] = df_['change_p3'] > 0.1
df_['close_high_1'] = (df_.high_p1 - df_.close) / df_.close
df_['close_high_2'] = (df_.high_p2 - df_.close) / df_.close
df_['close_high_3'] = (df_.high_p3 - df_.close) / df_.close
df_['close_high_1_over10'] = df_['close_high_1'] > 0.1
df_['close_high_2_over10'] = df_['close_high_2'] > 0.1
df_['close_high_3_over10'] = df_['close_high_3'] > 0.1
df_['close_high_1_over5'] = df_['close_high_1'] > 0.05
df_['close_high_2_over5'] = df_['close_high_2'] > 0.05
df_['close_high_3_over5'] = df_['close_high_3'] > 0.05
df_['target_over10'] = np.logical_or.reduce([
df_.close_high_1_over10,
df_.close_high_2_over10,
df_.close_high_3_over10])
df_['target_over5'] = np.logical_or.reduce([
df_.close_high_1_over5,
df_.close_high_2_over5,
df_.close_high_3_over5])
df_['target_close_over_10'] = np.logical_or.reduce([
df_.change_p1_over10,
df_.change_p2_over10,
df_.change_p3_over10])
df_['target_close_over_5'] = np.logical_or.reduce([
df_.change_p1_over5,
df_.change_p2_over5,
df_.change_p3_over5])
df_['target_mclass_close_over10_under5'] = \
np.where(df_['change_p1'] > 0.1,
1, np.where(df_['change_p1'] > -0.05, 0, -1))
df_['target_mclass_close_p2_over10_under5'] = \
np.where(df_['change_p2'] > 0.1,
1, np.where(df_['change_p2'] > -0.05, 0, -1))
df_['target_mclass_close_p3_over10_under5'] = \
np.where(df_['change_p3'] > 0.1,
1, np.where(df_['change_p3'] > -0.05, 0, -1))
df_.dropna(subset=['high_p3'], inplace=True)
return df_
def get_target_df(df_price):
df_price.reset_index(inplace=True)
df_price.columns = df_price.columns.str.lower()
df_target = df_price.groupby('code').apply(lambda df: make_target(df))
df_target = df_target.reset_index(drop=True)
# df_target['date'] = df_target.date.str.replace('-', '')
return df_target
df_price = pd.read_csv(df_price_dataset.path)
df_target = get_target_df(df_price=df_price)
df_target.to_csv(df_target_dataset.path)
@component(
base_image="gcr.io/deeplearning-platform-release/sklearn-cpu",
packages_to_install=["stockstats"]
)
def get_techindi(
df_price_dataset: Input[Dataset],
df_techini_dataset: Output[Dataset]
):
TECHNICAL_INDICATORS_LIST = ['macd',
'boll_ub',
'boll_lb',
'rsi_30',
'dx_30',
'close_30_sma',
'close_60_sma']
from stockstats import StockDataFrame as Sdf
from sklearn.preprocessing import MaxAbsScaler
import pandas as pd
class FeatureEngineer:
"""Provides methods for preprocessing the stock price data
Attributes
----------
use_technical_indicator : boolean
we technical indicator or not
tech_indicator_list : list
a list of technical indicator names (modified from config.py)
use_turbulence : boolean
use turbulence index or not
user_defined_feature:boolean
user user defined features or not
Methods
-------
preprocess_data()
main method to do the feature engineering
"""
def __init__(
self,
use_technical_indicator=True,
tech_indicator_list=TECHNICAL_INDICATORS_LIST,
user_defined_feature=False,
):
self.use_technical_indicator = use_technical_indicator
self.tech_indicator_list = tech_indicator_list
self.user_defined_feature = user_defined_feature
def preprocess_data(self, df):
"""main method to do the feature engineering
@:param config: source dataframe
@:return: a DataMatrices object
"""
#clean data
df = self.clean_data(df)
# add technical indicators using stockstats
if self.use_technical_indicator == True:
df = self.add_technical_indicator(df)
print("Successfully added technical indicators")
# add user defined feature
if self.user_defined_feature == True:
df = self.add_user_defined_feature(df)
print("Successfully added user defined features")
# fill the missing values at the beginning and the end
df = df.fillna(method="bfill").fillna(method="ffill")
return df
def clean_data(self, data):
"""
clean the raw data
deal with missing values
reasons: stocks could be delisted, not incorporated at the time step
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df=df.sort_values(['date','tic'],ignore_index=True)
df.index = df.date.factorize()[0]
merged_closes = df.pivot_table(index = 'date',columns = 'tic', values = 'close')
merged_closes = merged_closes.dropna(axis=1)
tics = merged_closes.columns
df = df[df.tic.isin(tics)]
return df
def add_technical_indicator(self, data):
"""
calculate technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df = df.sort_values(by=['tic','date'])
stock = Sdf.retype(df.copy())
unique_ticker = stock.tic.unique()
for indicator in self.tech_indicator_list:
indicator_df = pd.DataFrame()
for i in range(len(unique_ticker)):
try:
temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
temp_indicator = pd.DataFrame(temp_indicator)
temp_indicator['tic'] = unique_ticker[i]
temp_indicator['date'] = df[df.tic == unique_ticker[i]]['date'].to_list()
indicator_df = indicator_df.append(
temp_indicator, ignore_index=True
)
except Exception as e:
print(e)
df = df.merge(indicator_df[['tic','date',indicator]],on=['tic','date'],how='left')
df = df.sort_values(by=['date','tic'])
return df
def add_user_defined_feature(self, data):
"""
add user defined features
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df["daily_return"] = df.close.pct_change(1)
df['bb_u_ratio'] = df.boll_ub / df.close
df['bb_l_ratio'] = df.boll_lb / df.close
df['max_scale_MACD'] = MaxAbsScaler().fit_transform(df[['macd']])
# df['return_lag_1']=df.close.pct_change(2)
# df['return_lag_2']=df.close.pct_change(3)
# df['return_lag_3']=df.close.pct_change(4)
# df['return_lag_4']=df.close.pct_change(5)
return df
df_price = pd.read_csv(df_price_dataset.path)
df_price.columns = df_price.columns.str.lower()
df_price.rename(columns={'code':'tic'}, inplace=True)
fe = FeatureEngineer(user_defined_feature=True)
df_process = fe.preprocess_data(df_price)
df_process.rename(columns={'tic':'code'}, inplace=True)
df_process.to_csv(df_techini_dataset.path)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# packages_to_install = ["tables", "pandas_gbq", "finance-datareader", "bs4", "pickle5"] # add 20210715 FIX pipeline
)
def get_features(
# today: str,
dic_univ_dataset: Input[Dataset],
market_info_dataset: Input[Dataset],
bros_dataset: Input[Dataset],
base_item_dataset : Input[Dataset],
features_dataset: Output[Dataset]
):
import json
# import FinanceDataReader as fdr
# from ae_module.ae_logger import ae_log
import pandas as pd
import numpy as np
from collections import Counter
from pandas.tseries.offsets import CustomBusinessDay
from trading_calendars import get_calendar
cal_KRX = get_calendar('XKRX')
custombd_KRX = CustomBusinessDay(holidays=cal_KRX.precomputed_holidays)
def get_krx_on_dates_start_end(start, end):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(start=start,
end=end, freq='C',
holidays=cal_KRX.precomputed_holidays)
]
dates_krx_on = get_krx_on_dates_start_end('20210104', '20211231')
#dic_univ ๊ฐ์ ธ์ค๊ธฐ
with open(dic_univ_dataset.path, 'r') as f:
dic_univ = json.load(f)
print('dic_univ', dic_univ.keys())
#df_market_info ๊ฐ์ ธ์ค๊ธฐ
df_market = pd.read_csv(market_info_dataset.path,
index_col=0,
dtype={'๋ ์ง':str}
).reset_index(drop=True)
print('df_market', df_market.shape)
#df_base_item ๊ฐ์ ธ์ค๊ธฐ
df_base_item = pd.read_csv(base_item_dataset.path,
index_col=0).reset_index(drop=True)
#df_ed ๊ฐ์ ธ์ค๊ธฐ
df_ed = | pd.read_csv(bros_dataset.path, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 23:51:40 2020
@author: Narendrakumar
"""
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = | pd.read_csv('Churn_Modelling.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
scenario_filenames = ["OUTPUT_110011_20201117123025"]
scenario_labels =["Lockdown enabled,Self Isolation,Mask Compliance (0.5)"]
MAX_DAY = 250#250#120
POPULATION = 10000.0
FIGSIZE = [20,10]
plt.rcParams.update({'font.size': 22})
#### comparison of infections
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100*dfg["Infected_count"].values/POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Infected_count"])))+[MAX_DAY],list(100*dfg["Infected_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Infected (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/infected_count_comparison.png")
#### comparison of deaths
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100 * dfg["Death_count"].values / POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Death_count"])))+[MAX_DAY],list(100*dfg["Death_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Deceased (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/death_count_comparison.png")
#### comparison of recoveries
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100 * dfg["Recovered_count"].values / POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Recovered_count"])))+[MAX_DAY],list(100*dfg["Recovered_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Recovered (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/recovered_count_comparison.png")
#### comparison of number of notifications
try:
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100*dfg["notified_count"].values/POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["notified_count"])))+[MAX_DAY],list(100*dfg["notified_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("% of population notified to isolate")
plt.subplots_adjust(right=0.98, left=0.08)
plt.savefig("analyze_simulation_output/notified_count_comparison.png")
except Exception as e:
pass
# compare locked zones
try:
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (dfg["locked_zones"].values)[-1]
plt.plot(list(np.arange(len(dfg["locked_zones"])))+[MAX_DAY],list(dfg["locked_zones"].values)+[last_val],label=scenario_labels[i])
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Zone ID")
plt.subplots_adjust(right=0.98, left=0.08)
plt.savefig("analyze_simulation_output/locked_zones_comparison.png")
except Exception as e:
pass
# number of entities per zone:
try:
simulation_file = "simulation_output/" + scenario_filenames[0]
df = pd.read_csv(simulation_file+"_overall_agent_status.csv")
df["Date_Time"]=pd.to_datetime(df["Date_Time"])
dfg = df.query("Date_Time >'2020-01-01' and Date_Time <'2020-01-03'").drop_duplicates("currentLocationID")
residential_counts_zone = dfg.query("currentLocationType == 'residential'").groupby("zone_id").count()["id"].values
employment_counts_zone = dfg.query("currentLocationType == 'employment'").groupby("zone_id").count()["id"].values
school_counts_zone = dfg.query("currentLocationType == 'school'").groupby("zone_id").count()["id"].values
shopping_mall = dfg.query("currentLocationType == 'shopping_mall'").groupby("zone_id").count()["id"].values
zone_counts = np.vstack((residential_counts_zone,
employment_counts_zone,
shopping_mall,
school_counts_zone)).T
zone_counts = pd.DataFrame(zone_counts,columns=["residential","employment","shopping_mall","school"])
zone_counts.to_csv("analyze_simulation_output/locations_per_zone.csv")
except Exception as e:
print("error:",e)
# analyse individual user dataset
SHOPPING_MALL_VISITS = []
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
simulation_file = "simulation_output/" + scenario_filenames[i]
df1=pd.read_csv(simulation_file+"_overall_agent_status.csv")
df1["Date_Time"]= | pd.to_datetime(df1["Date_Time"]) | pandas.to_datetime |