prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Module providing functions to plot data collected during sleep studies."""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
import pandas as pd
import seaborn as sns
from fau_colors import colors_all
from biopsykit.utils.datatype_helper import AccDataFrame, GyrDataFrame, ImuDataFrame, SleepEndpointDict
_sleep_imu_plot_params = {
"background_color": ["#e0e0e0", "#9e9e9e"],
"background_alpha": [0.3, 0.3],
}
_bbox_default = dict(
fc=(1, 1, 1, plt.rcParams["legend.framealpha"]),
ec=plt.rcParams["legend.edgecolor"],
boxstyle="round",
)
def sleep_imu_plot(
data: Union[AccDataFrame, GyrDataFrame, ImuDataFrame],
datastreams: Optional[Union[str, Sequence[str]]] = None,
sleep_endpoints: Optional[SleepEndpointDict] = None,
downsample_factor: Optional[int] = None,
**kwargs,
) -> Tuple[plt.Figure, Iterable[plt.Axes]]:
"""Draw plot to visualize IMU data during sleep, and, optionally, add sleep endpoints information.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to plot. Data must either be acceleration data (:obj:`~biopsykit.utils.datatype_helper.AccDataFrame`),
gyroscope data (:obj:`~biopsykit.utils.datatype_helper.GyrDataFrame`), or IMU data
(:obj:`~biopsykit.utils.datatype_helper.ImuDataFrame`).
datastreams : str or list of str, optional
list of datastreams indicating which type of data should be plotted or ``None`` to only plot acceleration data.
If more than one type of datastream is specified each datastream is plotted row-wise in its own subplot.
Default: ``None``
sleep_endpoints : :obj:`~biopsykit.utils.datatype_helper.SleepEndpointDict`
dictionary with sleep endpoints to add to plot or ``None`` to only plot IMU data.
downsample_factor : int, optional
downsample factor to apply to raw input data before plotting or ``None`` to not downsample data before
plotting (downsample factor 1). Default: ``None``
**kwargs
optional arguments for plot configuration.
To configure which type of sleep endpoint annotations to plot:
* ``plot_sleep_onset``: whether to plot sleep onset annotations or not: Default: ``True``
* ``plot_wake_onset``: whether to plot wake onset annotations or not: Default: ``True``
* ``plot_bed_start``: whether to plot bed interval start annotations or not: Default: ``True``
* ``plot_bed_end``: whether to plot bed interval end annotations or not: Default: ``True``
* ``plot_sleep_wake``: whether to plot vspans of detected sleep/wake phases or not: Default: ``True``
To style general plot appearance:
* ``axs``: pre-existing axes for the plot. Otherwise, a new figure and axes objects are created and
returned.
* ``figsize``: tuple specifying figure dimensions
* ``palette``: color palette to plot different axes from input data
To style axes:
* ``xlabel``: label of x axis. Default: "Time"
* ``ylabel``: label of y axis. Default: "Acceleration :math:`[m/s^2]`" for acceleration data and
"Angular Velocity :math:`[°/s]`" for gyroscope data
To style legend:
* ``legend_loc``: location of legend. Default: "lower left"
* ``legend_fontsize``: font size of legend labels. Default: "smaller"
Returns
-------
fig : :class:`~matplotlib.figure.Figure`
figure object
axs : list of :class:`~matplotlib.axes.Axes`
list of subplot axes objects
"""
axs: List[plt.Axes] = kwargs.pop("ax", kwargs.pop("axs", None))
sns.set_palette(kwargs.get("palette", sns.light_palette(getattr(colors_all, "fau"), n_colors=4, reverse=True)[:-1]))
if datastreams is None:
datastreams = ["acc"]
if isinstance(datastreams, str):
# ensure list
datastreams = [datastreams]
fig, axs = _sleep_imu_plot_get_fig_axs(axs, len(datastreams), **kwargs)
downsample_factor = _sleep_imu_plot_get_downsample_factor(downsample_factor)
if len(datastreams) != len(axs):
raise ValueError(
"Number of datastreams to be plotted must match number of provided subplots! Expected {}, got {}.".format(
len(datastreams), len(axs)
)
)
for ax, ds in zip(axs, datastreams):
_sleep_imu_plot(
data=data,
datastream=ds,
downsample_factor=downsample_factor,
sleep_endpoints=sleep_endpoints,
ax=ax,
**kwargs,
)
fig.tight_layout()
fig.autofmt_xdate(rotation=0, ha="center")
return fig, axs
def _sleep_imu_plot_get_fig_axs(axs: List[plt.Axes], nrows: int, **kwargs):
figsize = kwargs.get("figsize", None)
if isinstance(axs, plt.Axes):
# ensure list (if only one Axes object is passed to sleep_imu_plot() instead of a list of Axes objects)
axs = [axs]
if axs is None:
fig, axs = plt.subplots(figsize=figsize, nrows=nrows)
else:
fig = axs[0].get_figure()
if isinstance(axs, plt.Axes):
# ensure list (if nrows == 1 only one axes object will be created, not a list of axes)
axs = [axs]
return fig, axs
def _sleep_imu_plot_get_downsample_factor(downsample_factor: int):
if downsample_factor is None:
downsample_factor = 1
# ensure int
downsample_factor = int(downsample_factor)
if downsample_factor < 1:
raise ValueError("'downsample_factor' must be >= 1!")
return downsample_factor
def _sleep_imu_plot(
data: pd.DataFrame,
datastream: str,
downsample_factor: int,
sleep_endpoints: SleepEndpointDict,
ax: plt.Axes,
**kwargs,
):
legend_loc = kwargs.get("legend_loc", "lower left")
legend_fontsize = kwargs.get("legend_fontsize", "smaller")
ylabel = kwargs.get("ylabel", {"acc": "Acceleration [$m/s^2$]", "gyr": "Angular Velocity [$°/s$]"})
xlabel = kwargs.get("xlabel", "Time")
if isinstance(data.index, pd.DatetimeIndex):
plt.rcParams["timezone"] = data.index.tz.zone
data_plot = data.filter(like=datastream)[::downsample_factor]
data_plot.plot(ax=ax)
if sleep_endpoints is not None:
kwargs.setdefault("ax", ax)
_sleep_imu_plot_add_sleep_endpoints(sleep_endpoints=sleep_endpoints, **kwargs)
if isinstance(data_plot.index, pd.DatetimeIndex):
# TODO add axis style for non-Datetime axes
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
ax.xaxis.set_minor_locator(mticks.AutoMinorLocator(6))
ax.set_ylabel(ylabel[datastream])
ax.set_xlabel(xlabel)
ax.legend(loc=legend_loc, fontsize=legend_fontsize, framealpha=1.0)
def _sleep_imu_plot_add_sleep_endpoints(sleep_endpoints: SleepEndpointDict, **kwargs):
bed_start = pd.to_datetime(sleep_endpoints["bed_interval_start"])
bed_end = pd.to_datetime(sleep_endpoints["bed_interval_end"])
sleep_onset = | pd.to_datetime(sleep_endpoints["sleep_onset"]) | pandas.to_datetime |
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from classifiers.gaussian_bayesian import BayesGaussian
from classifiers.parzen_bayesian import KDEClassifier
from classifiers.ensemble import Ensemble
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import csv
from evaluation import Evaluation
from data_reader import DataReader
from clustering import FuzzyClustering
import time
X, y = DataReader().get_preprocessed_data() # get_data()
n_samples = X.shape[0]
number_of_clusters = 10
fcm_results = {
'mpc': [],
'p_entropy': [],
'cr': [],
'acc': [],
'f_measure': []
}
t1=time.time()
best_J = 9999999
best_partition = []
best_prototypes = []
# parameters
# c = 10
# m = [1.1, 1.6, 2.0]
# T = 150
# e = 10^−10
m_candi = [1.1, 1.6, 2.0]
for mi in m_candi:
fcm = FuzzyClustering(T=150, c=number_of_clusters, e=10**-10, m=mi)
for it in range(0, 100):
print(f'Iteration {it} of 100')
J, partition, prototypes, U = fcm.fit_predict(X)
metrics = Evaluation(y, partition, U, number_of_clusters, n_samples)
mpc, p_entropy, acc, f_measure, corrected_rand = metrics.get_clustering_results()
fcm_results['mpc'].append(mpc)
fcm_results['p_entropy'].append(p_entropy)
fcm_results['cr'].append(corrected_rand)
fcm_results['acc'].append(acc)
fcm_results['f_measure'].append(f_measure)
if J is not None and J < best_J:
best_J = J
best_partition = partition
best_prototypes = prototypes
best_U = U
t2=time.time()
print("Tempo total de execução do modelo: %f" %(t2-t1))
df = | pd.DataFrame(fcm_results) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from numpy import linalg
from src import configs
def do_preprocessing(debug=False, save=True):
train = | pd.read_csv(configs.train, index_col='id') | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from .material_properties import MaterialProperties
from .material_transport_properties import MaterialTransportProperties
from .time_series import TimeSeries
__all__ = ['write_hot_start_file', 'read_bc_file',
'write_bc_file']
def write_hot_start_file(file_name, hot_start_list):
"""
Writes the ADH hot start file from a list of hot start data sets
Args:
file_name: file name for *.hot file
hot_start_list: list of HotStartDataSet classes
"""
with open(file_name, 'w') as mesh_file:
for ht in hot_start_list:
mesh_file.write('DATASET\nOBJTYPE "mesh2d"\n')
if len(ht.values.columns) > 1:
mesh_file.write('BEGVEC\n')
else:
mesh_file.write('BEGSCL\n')
mesh_file.write('ND {}\n'.format(len(ht.values)))
mesh_file.write('NC {}\n'.format(ht.number_of_cells))
mesh_file.write('NAME "{}"\n'.format(ht.name))
mesh_file.write('TS 0 0\n')
mesh_file.write(ht.values.to_csv(sep=' ', index=False, header=False).replace('\r\n', '\n'))
mesh_file.write('ENDDS\n')
def read_bc_file(file_name, bc_class):
"""
Reads the *.bc file and fills the AdhModel class
Args:
file_name: File name of the *.bc file
bc_class: Boundary Condition class
"""
# set all not required to deactivated
bc_class.operation_parameters.set_not_required(False)
bc_class.constituent_properties.set_not_required(False)
bc_class.model_constants.set_not_required(False)
bc_string_cards = {'NDS', 'EGS', 'MDS', 'MTS'}
bc_cards = {'NB', 'DB', 'BR', 'OB', 'OFF', 'WER', 'WRS', 'FLP', 'FGT', 'SLUICE', 'SLS'}
xy_series_cards = {'XY1', 'XY2', 'XYC', 'SERIES'}
pc_cards = {'PC', 'OC', 'OS', 'FLX', 'SOUT', 'FOUT'}
temp_data = {}
xy_data_list = []
with open(file_name, "r") as file:
for line_number, line in enumerate(file):
# remove new line character
line = line.rstrip()
line_split = line.split()
# remove blank strings
line_split[:] = (part for part in line_split if part != '')
# skip blank line, comment line
if len(line_split) == 0 or line_split[0] == '' or line_split[0][0] == '!':
continue
try:
if line_split[0] == 'OP':
read_op_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'IP':
read_ip_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'CN':
read_cn_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'MP':
read_mp_cards(line_split, bc_class)
elif line_split[0] in bc_string_cards:
read_bc_string_cards(line_split, temp_data)
elif line_split[0] in xy_series_cards:
read_xy_cards(line_split, temp_data)
elif line_split[0] == 'FR':
read_fr_cards(line_split, temp_data)
elif line_split[0] in pc_cards:
read_pc_cards(line_split, bc_class, temp_data)
elif line_split[0] in bc_cards:
read_bc_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'TC':
read_tc_cards(line_split, bc_class)
elif 'xy_type' in temp_data:
xyt = temp_data['xy_type']
if xyt == 'SERIES AWRITE':
labels = ['START_TIME', 'END_TIME', 'TIME_STEP_SIZE', 'UNITS']
xy_data_list.append([float(line_split[0]), float(line_split[1]), float(line_split[2]),
int(line_split[3])])
elif xyt == 'SERIES WIND' or xyt == 'SERIES WAVE':
labels = ['X', 'Y', 'Y2']
xy_data_list.append([float(line_split[0]), float(line_split[1]), float(line_split[2])])
else:
labels = ['X', 'Y']
xy_data_list.append([float(line_split[0]), float(line_split[1])])
# set the time step option in the output control if we read 'SERIES DT'
if xyt == 'SERIES DT':
bc_class.time_control.time_step_option = 'Time step series (SERIES DT)'
bc_class.time_control.max_time_step_size_time_series = temp_data['xy_id']
if len(xy_data_list) == temp_data['xy_number_points']:
ts = TimeSeries()
ts.series_type = xyt
if xyt == 'SERIES AWRITE':
# objs = list(bc_class.output_control.param.output_control_option.get_range())
bc_class.output_control.output_control_option = 'Specify autobuild (SERIES AWRITE)'
ts.units = temp_data['xy_units']
ts.output_units = temp_data['xy_output_units']
ts.time_series = pd.DataFrame.from_records(xy_data_list, columns=labels)
if 'xy_x_location' in temp_data:
ts.x_location = temp_data['xy_x_location']
ts.y_location = temp_data['xy_y_location']
temp_data.pop('xy_x_location')
temp_data.pop('xy_y_location')
xy_data_list = []
# set time series ID as both the key and in the ID column
ts.series_id = temp_data['xy_id']
bc_class.time_series[temp_data['xy_id']] = ts
# empty out temp_data #todo poor practice
temp_data.pop('xy_number_points')
temp_data.pop('xy_id')
temp_data.pop('xy_type')
temp_data.pop('xy_units')
temp_data.pop('xy_output_units')
except:
msg = 'Error reading line {} of file: {}.\nLine: {}'.format(line_number+1,
os.path.basename(file_name), line)
raise IOError(msg)
lists_to_data_frames(bc_class, temp_data)
def lists_to_data_frames(bc_class, temp_data):
"""
Converts temporary lists to DataFrames in the AdhModel class
Args:
bc_class: The ADH boundary condition class that holds the data
temp_data: Dictionary of data that is not stored in the ADH simulation but is needed while reading the file
"""
if 'bc_string_list' in temp_data:
labels = ['CARD', 'ID', 'ID_0', 'ID_1']
df = pd.DataFrame.from_records(temp_data['bc_string_list'], columns=labels)
for x in range(1, len(labels)):
df[labels[x]] = df[labels[x]].astype(dtype='Int64')
bc_class.boundary_strings = df
if 'bc_list' in temp_data:
labels = ['CARD', 'CARD_2', 'STRING_ID', 'XY_ID1', 'XY_ID2', 'XY_ID3']
df = pd.DataFrame.from_records(temp_data['bc_list'], columns=labels)
for x in range(2, len(labels)):
df[labels[x]] = df[labels[x]].astype(dtype='Int64')
bc_class.solution_controls = df
if 'nb_sdr_list' in temp_data:
labels = ['CARD', 'CARD_1', 'S_ID', 'COEF_A', 'COEF_B', 'COEF_C', 'COEF_D', 'COEF_E']
df = | pd.DataFrame.from_records(temp_data['nb_sdr_list'], columns=labels) | pandas.DataFrame.from_records |
# generator functions to simplify and streamline signal injection and recovery of filterbanks
import setigen as stg
import numpy as np
import pandas as pd
import os
import astropy.units as u
from turbo_seti.find_doppler.find_doppler import FindDoppler
from turbo_seti.find_event.find_event import read_dat
from blimpy import Waterfall
import math
import matplotlib.pyplot as plt
from setificient import get_test_files, get_data_files, generate_test_file
import logging
logger_name = 'multigen'
logger = logging.getLogger(logger_name)
import multigen_analysis
def blueprint():
s = """#waterfalls = stg.split_waterfall_generator()
# opt for one small portion of a waterfall file to make this faster!!
parameters = get_parameters(f_range, d_range, s_range, w_range) DONE
# waterfalls = (slice_waterfall(w, params) for params in parameters)
frames = (inject_frame(p) for p in parameters) DONE
finddoppler_results = (find_doppler_frame(frame) for frame in frames) DONE
#results = (get_results(fd) for fd in finddopplers)
final_results = pd.concat(finddoppler_results)
"""
print(s)
#def uniform_dist(v0, v1, n):
# """
# Returns generator of a uniform distribution
# """
# return (v for v in np.linspace(v0, v1, n))
def all_dist(
nsamples,
f_min,
f_max,
d_min=-10.0,
d_max=10.0,
s_min=30.0,
s_max=30.0,
w_min=4.0,
w_max=4.0,
nsignals=2):
"""
Generates values by stepping through the
input parameter ranges. Ensures the entire
range is spanned.
"""
counter = 0
while counter < nsamples*nsignals:
f = np.linspace(f_min, f_max, nsamples*(nsignals))[counter:counter+nsignals]
d = np.linspace(d_min, d_max, nsamples*(nsignals))[counter:counter+nsignals]
s = np.linspace(s_min, s_max, nsamples*(nsignals))[counter:counter+nsignals]
w = np.linspace(w_min, w_max, nsamples*(nsignals))[counter:counter+nsignals]
#f = np.linspace(f_min, f_max, nsamples*nsignals)[nsamples:nsamples-nsignals]
#d = np.linspace(d_min, d_max, nsamples*nsignals)[nsamples:nsamples-nsignals]
#s = np.linspace(s_min, s_max, nsamples*nsignals)[nsamples:nsamples-nsignals]
#w = np.linspace(w_min, w_max, nsamples*nsignals)[nsamples:nsamples-nsignals]
yield (f, d, s, w)
#nsamples -= nsignals
counter += nsignals
def parameters_generator(
nsamples,
f_min,
f_max,
d_min=-10.0,
d_max=10.0,
s_min=20.0,
s_max=40.0,
w_min=1.0,
w_max=40.0,
f_dist='uniform',
d_dist='uniform',
s_dist='uniform',
w_dist='uniform',
nsignals=2,
dt=0.0,
fchans=1024,
tchans=16):
"""
Generator of parameter values for signal injection
Parameters
----------
nsamples : int
Number of times to sample the parameter distributions
f_min : float
Minimum injection frequency
f_max : float
Maximum injection frequency
d_min : float, optional
Minimum injection drift
d_max : float, optional
Maximum injection drift
s_min : float, optional
Minimum injection SNR
s_max : float, optional
Maximum injection SNR
w_min : float, optional
Minimum injection width
w_max : float, optional
Maximum injection width
f_dist : str, optional
Injection frequency distribution ('uniform', 'gaussian', 'random')
d_dist : str, optional
Injection drift distribution ('uniform', 'gaussian', 'random')
s_dist : str, optional
Injection SNR distribution ('uniform', 'gaussian', 'random')
w_dist : str, optional
Injection width distribution ('uniform', 'gaussian', 'random')
nsignals : int, optional
Number of signals to inject
Notes
-----
You can replace any distribution given in numpy.random
for the frequency, drift rate, snr, and/or width distributions.
"""
#random.uniform(-1, 1)*(drift*frame.dt*frame.tchans
#separation =
f_dist, d_dist, s_dist, w_dist = [eval("np.random."+dist) for dist in [f_dist, d_dist, s_dist, w_dist]]
while nsamples > 0:
f_vals = f_dist(low=f_min, high=f_max, size=nsignals)
d_vals = d_dist(low=d_min, high=d_max, size=nsignals)
s_vals = s_dist(low=s_min, high=s_max, size=nsignals)
w_vals = w_dist(low=w_min, high=w_max, size=nsignals)
if nsignals > 1:
f_vals[1] = f_vals[0] + 5000 + np.random.uniform(-2,2)*1000#(np.random.uniform(-1,1)*(d_vals[0]*dt*tchans))
d_vals[1] = d_vals[0] + (np.random.uniform(-1,1)*5.0)
yield (
f_vals,
d_vals,
s_vals,
w_vals
)
nsamples -= 1
def Dtest_generator(
nsamples_per_drift,
f_min,
f_max,
d_min=-10.0,
d_max=10.0,
s_min=30.0,
s_max=30.0,
w_min=4.0,
w_max=4.0,
f_dist='uniform',
d_dist='uniform',
s_dist='uniform',
w_dist='uniform',
nsignals=1):
"""
Generates parameters for drift rate tests
"""
f = float(np.mean([f_min, f_max]))
s = float(np.mean([s_min, s_max]))
w = float(np.mean([w_min, w_max]))
d_vals = np.array([np.full(nsamples_per_drift, i) for i in np.arange(d_min, d_max, 0.5)]).flatten()
nsamples = len(d_vals)
counter=0
while counter < nsamples:
yield (f, d_vals[counter], s, w)
counter += 1
def Stest_generator(
nsamples_per_drift,
f_min,
f_max,
d_min=0.0,
d_max=0.0,
s_min=0.5,
s_max=100.0,
w_min=4.0,
w_max=4.0,
f_dist='uniform',
d_dist='uniform',
s_dist='uniform',
w_dist='uniform',
nsignals=1):
"""
Generates parameters for SNR tests
"""
f = float(np.mean([f_min, f_max]))
d = float(np.mean([d_min, d_max]))
w = float(np.mean([w_min, w_max]))
#d_vals = np.array([np.full(nsamples_per_drift, i) for i in np.arange(d_min, d_max, 0.5)]).flatten()
s_vals = np.array([np.full(nsamples_per_drift, i) for i in np.arange(s_min, s_max, 0.5)]).flatten()
nsamples = len(s_vals)
counter=0
while counter < nsamples:
yield (f, d, s_vals[counter], w) #d_vals[counter], s, w)
counter += 1
def inject_frame(
filename,
parameters,
nsignals):
"""
Returns a frame object with injected signals and
a tuple of injected injected signal parameters
Parameters
----------
filename : str
Path to filterbank file
parameters : tuple
Tuple of injection parameter values
nsignals : int, optional
Number of signals to inject into data
Notes
-----
For each signal the SNR is reduced
"""
#while niterations > 0:
frame = stg.Frame(filename)
frame.add_metadata({'signals':[]})
frequency, drift_rate, snr, width = parameters
#print(parameters)
#print(np.shape(parameters))
#if len(list(np.shape(parameters))) > 1:
if nsignals > 1:
for i in range(nsignals):
#for i in range(np.shape(parameters)[-1]):
#if len(list(np.shape(parameters)))==1:
#else:
f = float(frequency[i])
d = float(drift_rate[i])
s = float(snr[i])
w = float(width[i])
#logger.debug("f = ",f)
#logger.debug("d = ",d)
#logger.debug("s = ",s)
#logger.debug("w = ",w)
#print(len(list(np.shape(parameters))))
#if i > 0 and len(list(np.shape(parameters)))>1:
s *= .5
parameters[2][1] = s
fexs = f - np.copysign(1, d)*w
fex = (f + (d * frame.dt * frame.tchans)) + np.copysign(1, d)*w
#logger.debug("fexs = ",fexs)
#logger.debug("fex = ",fex)
frame.add_signal(
stg.constant_path(
f_start=f,
drift_rate=d*u.Hz/u.s),
stg.constant_t_profile(level=frame.get_intensity(snr=s)),
stg.gaussian_f_profile(width=w*u.Hz),
stg.constant_bp_profile(level=1),
bounding_f_range=(min(fexs, fex), max(fexs, fex))
)
frame.metadata['signals'].append([f, d, s, w])
else:
f = float(frequency)
d = float(drift_rate)
s = float(snr)
w = float(width)
fexs = f - np.copysign(1, d)*w
fex = (f + (d * frame.dt * frame.tchans)) + np.copysign(1, d)*w
frame.add_signal(
stg.constant_path(
f_start=f,
drift_rate=d*u.Hz/u.s),
stg.constant_t_profile(level=frame.get_intensity(snr=s)),
stg.gaussian_f_profile(width=w*u.Hz),
stg.constant_bp_profile(level=1),
bounding_f_range=(min(fexs, fex), max(fexs, fex))
)
frame.metadata['signals'].append([f, d, s, w])
return frame, parameters
def apply_find_doppler(frame, parameters, nsignals):
"""
Returns results of FindDopplering a frame object
Parameters
----------
frame : stg.Frame
Frame with injected signals
parameters : tuple
Parameters of injected signals
nsignals : int
Number of injected signals per frame
"""
#DATA
#0. num_recovered
#1. num_inserted
#2. injected frequency
#3. injected drift rate
#4. injected snr
#5. injected width
#6. [find_doppler data]
#7. [2nd sig freq, 2nd sig drift, 2nd sig, 2nd sig snr, 2nd sig, width]
#RESULTS
#ratio captured = (0) / (1)
#injected frequency = (2)
#detected (captured) frequency = (6)(0)(0)(1)
control = read_dat('testing_spliced_blc00010203040506o7o0111213141516o021222324252627_guppi_58806_43185_TIC458478250_0127.gpuspec.0000.dat')
def compare(df_1, df_2, col_):
"""
Compares DataFrame column values
"""
def vcompare(df1, df2, col):
return df1[col] != df2[col]
#comp = np.vectorize(vcompare)
#return comp(df_1, df_2, col_)
return vcompare(df_1, df_2, col_)
frame.save_hdf5('frame.h5')
FindDoppler('frame.h5', max_drift=11.0, snr=10).search()
hits = read_dat('frame.dat')
os.remove('frame.h5')
os.remove('frame.dat')
os.remove('frame.log')
# remove hits from known RFI
RFI = read_dat('_fchans_4096_testing_spliced_blc00010203040506o7o0111213141516o021222324252627_guppi_58806_43864_TIC154089169_0129.gpuspec.0000.dat')
print(hits.columns)
print(RFI.columns)
#try:
# hits = hits.loc[(hits['DriftRate'].eq(RFI['DriftRate'])) & (hits['SNR'].eq(RFI['SNR']))]
#except:
# pass
#hits = hits.loc[compare(hits, control, 'Freq') & compare(hits, control, 'DriftRate') & compare(hits, control, 'SNR')]
results = {}
#try:
results['nInjected'] = [len(frame.metadata['signals'])]
results['nDetected'] = [len(hits)]
results['RatioCaptured']=[len(hits) / len(frame.metadata['signals'])]#[len(hits) / np.shape(parameters)[-1]]
#except:
# results['RatioCaptured']=[len(hits) / nsignals]
if len(hits)==0:
#hits = {}
hits['Freq']=[0.0]
hits['DriftRate']=[0.0]
hits['SNR']=[0.0]
#if len(list(np.shape(parameters))) > 1:
if nsignals > 1:
results['injFreq'] = [parameters[0][0]/1.0e6]
else:
results['injFreq'] = [parameters[0] / 1.0e6]
#THREW AN ERROR WHEN MORE THAN ONE SIGNAL WAS DETECTED
#results['detFreq'] = [float(hits['Freq'])]
results['detFreq'] = [float(hits['Freq'][0])]
#if len(list(np.shape(parameters))) > 1:
if nsignals > 1:
results['injDrift'] = [parameters[1][0]]
else:
results['injDrift'] = [parameters[1]]
results['detDrift'] = [float(hits['DriftRate'][0])]#.tolist()
if nsignals > 1:#len(list(np.shape(parameters))) > 1:
results['injSNR'] = [parameters[2][0]]
else:
results['injSNR'] = [parameters[2]]
results['detSNR'] = [float(hits['SNR'][0])]
if nsignals > 1:#len(list(np.shape(parameters))) > 1:
results['injWidth'] = [parameters[3][0]]
else:
results['injWidth'] = [parameters[3]]
#results['Separation'] = [(float(np.diff(parameters[0]))*u.MHz).to_value('kHz')]
if nsignals > 1:#len(list(np.shape(parameters))) > 1:
results['Separation'] = [float(np.diff(parameters[0]))]#/1.0e3]
results['secFreq'] = [parameters[0][1]/1.0e6]
results['secDrift'] = [parameters[1][1]]
results['secSNR'] = [parameters[2][1]]
results['secWidth'] = [parameters[3][1]]
if len(hits)>1:
results['detsecFreq'] = [float(hits['Freq'][1])]
results['detsecDrift'] = [float(hits['DriftRate'][1])]
results['detsecSNR'] = [float(hits['SNR'][1])]
results['diffFreq'] = float(results['detFreq'][0]) - float(results['injFreq'][0])#.to_numpy()
results['absdiffFreq'] = abs(results['diffFreq'])
results['diffDrift'] = results['detDrift'][0]-results['injDrift'][0]
results['absdiffDrift'] = abs(results['diffDrift'])
results['diffSNR'] = results['detSNR'][0] - results['injSNR'][0]
results['absdiffSNR'] = abs(results['diffSNR'])
results_df = pd.DataFrame.from_dict(results)
#results_df['injFreq'] = [float(f[1:-1]) for f in results_df['injFreq']]
#results_df['injDrift'] = [float(d[1:-1]) for d in result_df['injDrift']]
#results_df['injSNR'] = [float(s[1:-1]) for s in df['injSNR']]
#results_df['injWidth'] = [float(w[1:-1]) for w in df['injWidth']]
#results_df = pd.concat([results_df, hits], axis=1)
#for col in ['Hit_ID','status','in_n_ons','RFI_in_range']:
# del results_df[col]
return results_df
def efficiency_pipeline(
filename,
niterations,
nsignals=2,
d_min=0.0,
d_max=0.0,
s_min=0.0,
s_max=100.0,
w_min=4.0,
w_max=4.0,
f_dist='uniform',
d_dist='uniform',
s_dist='uniform',
w_dist='uniform',
loglevel=logging.INFO,
dev=False):
"""
Top-level efficiency test routine
Parameters
----------
filename : str
Path to filterbank file
nsignals : int, optional
Number of signals to inject per frame
niterations : int, optional
Number of iterations to perform efficiency test
"""
logger.setLevel(loglevel)
tchans = stg.Frame(Waterfall(filename, max_load=20)).tchans
dt = stg.Frame(Waterfall(filename, max_load=20)).dt
fchans = stg.Frame(Waterfall(filename, max_load=20)).fchans
f_min, f_max = stg.Frame(Waterfall(filename, max_load=20)).fmin, stg.Frame(Waterfall(filename, max_load=20)).fmax
if dev:
frames = (inject_frame(filename, p, nsignals) for p in Stest_generator(
niterations,
f_min,
f_max,
d_min=d_min,
d_max=d_max,
s_min=s_min,
s_max=s_max,
w_min=w_min,
w_max=w_max,
nsignals=1))
#frames = (inject_frame(filename, p) for p in all_dist(
# niterations,
# f_min,
# f_max,
# d_min=d_min,
# d_max=d_max,
# s_min=s_min,
# s_max=s_max,
# w_min=w_min,
# w_max=w_max,
# nsignals=nsignals,
# dt=dt,
# fchans=fchans,
# tchans=tchans))
else:
frames = (inject_frame(filename, p, nsignals) for p in parameters_generator(
niterations,
f_min,
f_max,
d_min=d_min,
d_max=d_max,
s_min=s_min,
s_max=s_max,
w_min=w_min,
w_max=w_max,
f_dist=f_dist,
d_dist=d_dist,
s_dist=s_dist,
w_dist=w_dist,
nsignals=nsignals,
dt=dt,
fchans=fchans,
tchans=tchans))
results = (apply_find_doppler(fr[0], fr[-1], nsignals) for fr in frames)
results_df = | pd.concat(results, ignore_index=True) | pandas.concat |
import datetime as dt
import numpy as np
import pandas as pd
import pytest
from dutil.transform import ht
@pytest.mark.parametrize(
"data, expected",
[
((0, 1, 3, 5, -1), (0, 1, 5, -1)),
([0, 1, 3, 5, -1], [0, 1, 5, -1]),
([0, 1.0, 3232.22, 5.0, -1.0, np.nan], [0, 1.0, -1.0, np.nan]),
(np.array([0, 1, 3, 5, -1]), np.array([0, 1, 5, -1])),
(
np.array([0, 1.0, 3232.22, 5.0, -1.0, np.nan]),
np.array([0, 1.0, -1.0, np.nan]),
),
(pd.Series([0, 1, 3, 5, -1]), pd.Series([0, 1, 5, -1], index=[0, 1, 3, 4])),
(
| pd.Series([0, 1.0, 3232.22, -1.0, np.nan]) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
assert_frame_equal(observedw, observed)
assert_frame_equal(censoredw, censored)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
| assert_frame_equal(df, expected, check_like=True) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Add model years to an existing Scenario."""
# Sections of the code:
#
# I. Required python packages are imported
# II. Generic utilities for dataframe manipulation
# III. The main function, add_year()
# IV. Function add_year_set() for adding and modifying the sets
# V. Function add_year_par() for copying and modifying each parameter
# VI. Two utility functions, interpolate_1d() and interpolate_2d(), for
# calculating missing values
# %% I) Importing required packages
import numpy as np
import pandas as pd
# %% II) Utility functions for dataframe manupulation
def intpol(y1, y2, x1, x2, x):
"""Interpolate between (*x1*, *y1*) and (*x2*, *y2*) at *x*.
Parameters
----------
y1, y2 : float or pd.Series
x1, x2, x : int
"""
if x2 == x1 and y2 != y1:
print('>>> Warning <<<: No difference between x1 and x2,'
'returned empty!!!')
return []
elif x2 == x1 and y2 == y1:
return y1
else:
y = y1 + ((y2 - y1) / (x2 - x1)) * (x - x1)
return y
def slice_df(df, idx, level, locator, value):
"""Slice a MultiIndex DataFrame and set a value to a specific level.
Parameters
----------
df : pd.DataFrame
idx : list of indices
level: str
locator : list
value : int or str
"""
if locator:
df = df.reset_index().loc[df.reset_index()[level].isin(locator)].copy()
else:
df = df.reset_index().copy()
if value:
df[level] = value
return df.set_index(idx)
def mask_df(df, index, count, value):
"""Create a mask for removing extra values from *df*."""
df.loc[index, df.columns > (df.loc[[index]].notnull().cumsum(
axis=1) == count).idxmax(axis=1).values[0]] = value
def unit_uniform(df):
"""Make units in *df* uniform."""
column = [x for x in df.columns if x in ['commodity', 'emission']]
if column:
com_list = set(df[column[0]])
for com in com_list:
df.loc[df[column[0]] == com, 'unit'] = df.loc[
df[column[0]] == com, 'unit'].mode()[0]
else:
df['unit'] = df['unit'].mode()[0]
return df
# %% III) The main function
def add_year(sc_ref, sc_new, years_new, firstyear_new=None, lastyear_new=None,
macro=False, baseyear_macro=None, parameter='all', region='all',
rewrite=True, unit_check=True, extrapol_neg=None,
bound_extend=True):
"""Add years to *sc_ref* to produce *sc_new*.
:meth:`add_year` does the following:
1. calls :meth:`add_year_set` to add and modify required sets.
2. calls :meth:`add_year_par` to add new years and modifications to each
parameter if needed.
Parameters
-----------
sc_ref : ixmp.Scenario
Reference scenario.
sc_new : ixmp.Scenario
New scenario.
yrs_new : list of int
New years to be added.
firstyear_new : int, optional
New first model year for new scenario.
macro : bool
Add new years to parameters of the MACRO model.
baseyear_macro : int
New base year for the MACRO model.
parameter: list of str or 'all'
Parameters for adding new years.
rewrite: bool
Permit rewriting a parameter in new scenario when adding new years.
check_unit: bool
Harmonize the units for each commodity, if there is inconsistency
across model years.
extrapol_neg: float
When extrapolation produces negative values, replace with a multiple of
the value for the previous timestep.
bound_extend: bool
Duplicate data from the previous timestep when there is only one data
point for interpolation (e.g., permitting the extension of a bound to
2025, when there is only one value in 2020).
"""
# III.A) Adding sets and required modifications
years_new = sorted([x for x in years_new if str(x)
not in set(sc_ref.set('year'))])
add_year_set(sc_ref, sc_new, years_new, firstyear_new, lastyear_new,
baseyear_macro)
# -------------------------------------------------------------------------
# III.B) Adding parameters and calculating the missing values for the
# additonal years
if parameter in ('all', ['all']):
par_list = sorted(sc_ref.par_list())
elif isinstance(parameter, list):
par_list = parameter
elif isinstance(parameter, str):
par_list = [parameter]
else:
print('Parameters should be defined in a list of strings or as'
' a single string!')
if 'technical_lifetime' in par_list:
par_list.insert(0, par_list.pop(par_list.index('technical_lifetime')))
if region in ('all', ['all']):
reg_list = sc_ref.set('node').tolist()
elif isinstance(region, list):
reg_list = region
elif isinstance(region, str):
reg_list = [region]
else:
print('Regions should be defined in a list of strings or as'
' a single string!')
# List of parameters to be ignored (even not copied to the new
# scenario)
par_ignore = ['duration_period']
par_list = [x for x in par_list if x not in par_ignore]
if not macro:
par_macro = ['demand_MESSAGE', 'price_MESSAGE', 'cost_MESSAGE',
'gdp_calibrate', 'historical_gdp', 'MERtoPPP', 'kgdp',
'kpvs', 'depr', 'drate', 'esub', 'lotol', 'p_ref', 'lakl',
'prfconst', 'grow', 'aeei', 'aeei_factor', 'gdp_rate']
par_list = [x for x in par_list if x not in par_macro]
if not sc_new.set('cat_year', {'type_year': 'firstmodelyear'}).empty:
firstyear_new = sc_new.set('cat_year',
{'type_year': 'firstmodelyear'})['year']
else:
firstyear_new = min([int(x) for x in sc_new.set('year').tolist()])
if not sc_ref.set('cat_year', {'type_year': 'firstmodelyear'}).empty:
firstyear_ref = sc_ref.set('cat_year',
{'type_year': 'firstmodelyear'})['year']
else:
firstyear_ref = firstyear_new
for parname in par_list:
# For historical parameters extrapolation permitted (e.g., from
# 2010 to 2015)
if 'historical' in parname:
extrapol = True
yrs_new = [x for x in years_new if x < int(firstyear_new)]
elif int(firstyear_ref) > int(firstyear_new):
extrapol = True
yrs_new = years_new
else:
extrapol = False
yrs_new = years_new
if 'bound' in parname:
bound_ext = bound_extend
else:
bound_ext = True
year_list = [x for x in sc_ref.idx_sets(parname) if 'year' in x]
if len(year_list) == 2 or parname in ['land_output']:
# The loop over "node" is only for reducing the size of tables
for node in reg_list:
add_year_par(sc_ref, sc_new, yrs_new, parname, [node],
firstyear_new, extrapol, rewrite, unit_check,
extrapol_neg, bound_ext)
else:
add_year_par(sc_ref, sc_new, yrs_new, parname, reg_list,
firstyear_new, extrapol, rewrite, unit_check,
extrapol_neg, bound_ext)
sc_new.set_as_default()
print('> All required parameters were successfully '
'added to the new scenario.')
# %% Submodules needed for running the main function
# IV) Adding new years to sets
def add_year_set(sc_ref, sc_new, years_new, firstyear_new=None,
lastyear_new=None, baseyear_macro=None):
"""Add new years to sets.
:meth:`add_year_set` adds additional years to an existing scenario, by
starting to make a new scenario from scratch. After modification of the
year-related sets, all other sets are copied from *sc_ref* to *sc_new*.
See :meth:`add_year` for parameter descriptions.
"""
# IV.A) Treatment of the additional years in the year-related sets
# A.1. Set - year
yrs_old = list(map(int, sc_ref.set('year')))
horizon_new = sorted(yrs_old + years_new)
sc_new.add_set('year', [str(yr) for yr in horizon_new])
# A.2. Set _ type_year
yr_typ = sc_ref.set('type_year').tolist()
sc_new.add_set('type_year', sorted(yr_typ + [str(yr) for yr in years_new]))
# A.3. Set _ cat_year
yr_cat = sc_ref.set('cat_year')
# A.4. Changing the first year if needed
if firstyear_new:
if not yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear'].empty:
yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear',
'year'] = firstyear_new
else:
yr_cat.loc[len(yr_cat.index)] = ['firstmodelyear', firstyear_new]
if lastyear_new:
if not yr_cat.loc[yr_cat['type_year'] == 'lastmodelyear'].empty:
yr_cat.loc[yr_cat['type_year'] == 'lastmodelyear',
'year'] = lastyear_new
else:
yr_cat.loc[len(yr_cat.index)] = ['lastmodelyear', lastyear_new]
# A.5. Changing the base year and initialization year of macro if a new
# year specified
if baseyear_macro:
if not yr_cat.loc[yr_cat['type_year'] == 'baseyear_macro',
'year'].empty:
yr_cat.loc[yr_cat['type_year'] == 'baseyear_macro',
'year'] = baseyear_macro
if not yr_cat.loc[yr_cat['type_year'] == 'initializeyear_macro',
'year'].empty:
yr_cat.loc[yr_cat['type_year'] == 'initializeyear_macro',
'year'] = baseyear_macro
yr_pair = []
for yr in years_new:
yr_pair.append([yr, yr])
yr_pair.append(['cumulative', yr])
yr_cat = yr_cat.append(pd.DataFrame(yr_pair,
columns=['type_year', 'year']),
ignore_index=True
).sort_values('year').reset_index(drop=True)
# A.6. Changing the cumulative years based on the new first model year
if 'firstmodelyear' in set(yr_cat['type_year']):
firstyear_new = int(yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear',
'year'])
yr_cat = yr_cat.drop(yr_cat.loc[(yr_cat['type_year'] == 'cumulative'
) & (yr_cat['year'] < firstyear_new)
].index)
sc_new.add_set('cat_year', yr_cat)
# IV.B) Copying all other sets
set_list = [s for s in sc_ref.set_list() if 'year' not in s]
# Sets with one index set
index_list = [x for x in set_list if not isinstance(sc_ref.set(x),
pd.DataFrame)]
for set_name in index_list:
if set_name not in sc_new.set_list():
sc_new.init_set(set_name, idx_sets=None, idx_names=None)
sc_new.add_set(set_name, sc_ref.set(set_name).tolist())
# The rest of the sets
for set_name in [x for x in set_list if x not in index_list]:
new_set = [x for x in sc_ref.idx_sets(set_name
) if x not in sc_ref.set_list()]
if set_name not in sc_new.set_list() and not new_set:
sc_new.init_set(set_name,
idx_sets=sc_ref.idx_sets(set_name),
idx_names=sc_ref.idx_names(set_name))
sc_new.add_set(set_name, sc_ref.set(set_name))
sc_new.commit('sets added!')
print('> All the sets updated and added to the new scenario.')
# %% V) Adding new years to parameters
def add_year_par(sc_ref, sc_new, yrs_new, parname, reg_list, firstyear_new,
extrapolate=False, rewrite=True, unit_check=True,
extrapol_neg=None, bound_extend=True):
"""Add new years to parameters.
This function adds additional years to a parameter. The value of the
parameter for additional years is calculated mainly by interpolating and
extrapolating data from existing years.
See :meth:`add_year` for parameter descriptions.
"""
# V.A) Initialization and checks
par_list_new = sc_new.par_list()
idx_names = sc_ref.idx_names(parname)
horizon = sorted([int(x) for x in list(set(sc_ref.set('year')))])
node_col = [x for x in idx_names if x in ['node', 'node_loc', 'node_rel']]
year_list = [x for x in idx_names if x in ['year', 'year_vtg', 'year_act',
'year_rel']]
if parname not in par_list_new:
sc_new.check_out()
sc_new.init_par(parname, idx_sets=sc_ref.idx_sets(parname),
idx_names=sc_ref.idx_names(parname))
sc_new.commit('New parameter initiated!')
if node_col:
par_old = sc_ref.par(parname, {node_col[0]: reg_list})
par_new = sc_new.par(parname, {node_col[0]: reg_list})
sort_order = [node_col[0], 'technology',
'commodity', 'mode', 'emission'] + year_list
nodes = par_old[node_col[0]].unique().tolist()
else:
par_old = sc_ref.par(parname)
par_new = sc_new.par(parname)
sort_order = ['technology', 'commodity'] + year_list
nodes = ['N/A']
if not par_new.empty and not rewrite:
print('> Parameter "' + parname + '" already has data in new scenario'
' and left unchanged for node/s: {}.'.format(reg_list))
return
if par_old.empty:
print('> Parameter "' + parname + '" is empty in reference scenario'
' for node/s: {}!'.format(reg_list))
return
# Sorting the data to make it ready for dataframe manupulation
sort_order = [x for x in sort_order if x in idx_names]
if sort_order:
par_old = par_old.sort_values(sort_order).reset_index(drop=True)
rem_idx = [x for x in par_old.columns if x not in sort_order]
par_old = par_old.reindex(columns=sort_order + rem_idx)
sc_new.check_out()
if not par_new.empty and rewrite:
print('> Parameter "' + parname + '" is being removed from new'
' scenario to be updated for node/s in {}...'.format(nodes))
sc_new.remove_par(parname, par_new)
# A uniform "unit" for values in different years
if 'unit' in par_old.columns and unit_check:
par_old = unit_uniform(par_old)
# ---------------------------------------------------------------------------
# V.B) Adding new years to a parameter based on time-related indexes
# V.B.1) Parameters with no time index
if len(year_list) == 0:
sc_new.add_par(parname, par_old)
sc_new.commit(parname)
print('> Parameter "' + parname + '" just copied to new scenario '
'since has no time-related entries.')
# V.B.2) Parameters with one index related to time
elif len(year_list) == 1:
year_col = year_list[0]
df = par_old.copy()
df_y = interpolate_1d(df, yrs_new, horizon, year_col, 'value',
extrapolate, extrapol_neg, bound_extend)
sc_new.add_par(parname, df_y)
sc_new.commit(' ')
print('> Parameter "{}" copied and new years'
' added for node/s: "{}".'.format(parname, nodes))
# V.B.3) Parameters with two indexes related to time (such as 'input')
elif len(year_list) == 2:
year_col = 'year_act'
year_ref = [x for x in year_list if x != year_col][0]
def f(x, i):
return x[i + 1] - x[i] > x[i] - x[i - 1]
year_diff = [x for x in horizon[1:-1] if f(horizon, horizon.index(x))]
print('> Parameter "{}" is being added for node/s'
' "{}"...'.format(parname, nodes))
# Flagging technologies that have lifetime for adding new timesteps
yr_list = [int(x) for x in set(sc_new.set('year')
) if int(x) > int(firstyear_new)]
min_step = min(np.diff(sorted(yr_list)))
par_tec = sc_new.par('technical_lifetime', {'node_loc': nodes})
# Technologies with lifetime bigger than minimum time interval
par_tec = par_tec.loc[par_tec['value'] > min_step]
df = par_old.copy()
if parname == 'relation_activity':
tec_list = []
else:
tec_list = [t for t in (set(df['technology'])
) if t in list(set(par_tec['technology']))]
df_y = interpolate_2d(df, yrs_new, horizon, year_ref, year_col,
tec_list, par_tec, 'value', extrapolate,
extrapol_neg, year_diff, bound_extend)
sc_new.add_par(parname, df_y)
sc_new.commit(parname)
print('> Parameter "{}" copied and new years added'
' for node/s: "{}".'.format(parname, nodes))
# %% VI) Required functions
def interpolate_1d(df, yrs_new, horizon, year_col, value_col='value',
extrapolate=False, extrapol_neg=None, bound_extend=True):
"""Interpolate data with one year dimension.
This function receives a parameter data as a dataframe, and adds new data
for the additonal years by interpolation and extrapolation.
Parameters
----------
df : pandas.DataFrame
The dataframe of the parameter to which new years to be added.
yrs_new : list of int
New years to be added.
horizon: list of int
The horizon of the reference scenario.
year_col : str
The header of the column to which the new years should be added, e.g.
`'year_act'`.
value_col : str
The header of the column containing values.
extrapolate : bool
Allow extrapolation when a new year is outside the parameter years.
extrapol_neg : bool
Allow negative values obtained by extrapolation.
bound_extend : bool
Allow extrapolation of bounds for new years
"""
horizon_new = sorted(horizon + yrs_new)
idx = [x for x in df.columns if x not in [year_col, value_col]]
if not df.empty:
df2 = df.pivot_table(index=idx, columns=year_col, values=value_col)
# To sort the new years smaller than the first year for
# extrapolation (e.g. 2025 values are calculated first; then
# values of 2015 based on 2020 and 2025)
year_before = sorted([x for x in yrs_new if x < min(df2.columns
)], reverse=True)
if year_before and extrapolate:
for y in year_before:
yrs_new.insert(len(yrs_new), yrs_new.pop(yrs_new.index(y)))
for yr in yrs_new:
if yr > max(horizon):
extrapol = True
else:
extrapol = extrapolate
# a) If this new year greater than modeled years, do extrapolation
if yr > max(df2.columns) and extrapol:
if yr == horizon_new[horizon_new.index(max(df2.columns)) + 1]:
year_pre = max([x for x in df2.columns if x < yr])
if len([x for x in df2.columns if x < yr]) >= 2:
year_pp = max([x for x in df2.columns if x < year_pre])
df2[yr] = intpol(df2[year_pre], df2[year_pp],
year_pre, year_pp, yr)
if bound_extend:
df2[yr] = df2[yr].fillna(df2[year_pre])
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_pre] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_pre] >= 0),
yr] = df2.loc[(df2[yr] < 0
) & (df2[year_pre] >= 0),
year_pre] * extrapol_neg
else:
df2[yr] = df2[year_pre]
# b) If the new year is smaller than modeled years, extrapolate
elif yr < min(df2.columns) and extrapol:
year_next = min([x for x in df2.columns if x > yr])
# To make sure the new year is not two steps smaller
cond = (year_next == horizon_new[horizon_new.index(yr) + 1])
if len([x for x in df2.columns if x > yr]) >= 2 and cond:
year_nn = min([x for x in df2.columns if x > year_next])
df2[yr] = intpol(df2[year_next], df2[year_nn],
year_next, year_nn, yr)
df2[yr][np.isinf(df2[year_next])] = df2[year_next]
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_next] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_next] >= 0), yr
] = df2.loc[(df2[yr] < 0
) & (df2[year_next] >= 0),
year_next] * extrapol_neg
elif bound_extend and cond:
df2[yr] = df2[year_next]
# c) Otherise, do intrapolation
elif yr > min(df2.columns) and yr < max(df2.columns):
year_pre = max([x for x in df2.columns if x < yr])
year_next = min([x for x in df2.columns if x > yr])
df2[yr] = intpol(df2[year_pre], df2[year_next],
year_pre, year_next, yr)
# Extrapolate for new years if the value exists for the
# previous year but not for the next years
# TODO: here is the place that should be changed if the
# new year should go to the time step before the existing one
if [x for x in df2.columns if x > year_next]:
year_nn = min([x for x in df2.columns if x > year_next])
df2[yr] = df2[yr].fillna(intpol(df2[year_next],
df2[year_nn], year_next,
year_nn, yr))
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_next] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_next] >= 0), yr
] = df2.loc[(df2[yr] < 0
) & (df2[year_next] >= 0),
year_next] * extrapol_neg
if bound_extend:
df2[yr] = df2[yr].fillna(df2[year_pre])
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
df2 = pd.melt(df2.reset_index(), id_vars=idx,
value_vars=[x for x in df2.columns if x not in idx],
var_name=year_col, value_name=value_col
).dropna(subset=[value_col]).reset_index(drop=True)
df2 = df2.sort_values(idx).reset_index(drop=True)
else:
print('+++ WARNING: The submitted dataframe is empty, so returned'
' empty results!!! +++')
df2 = df
return df2
# %% VI.B) Interpolating parameters with two dimensions related to time
def interpolate_2d(df, yrs_new, horizon, year_ref, year_col, tec_list, par_tec,
value_col='value', extrapolate=False, extrapol_neg=None,
year_diff=None, bound_extend=True):
"""Interpolate parameters with two dimensions related year.
This function receives a dataframe that has 2 time-related columns (e.g.,
"input" or "relation_activity"), and adds new data for the additonal years
in both time-related columns by interpolation and extrapolation.
Parameters
----------
df : pandas.DataFrame
The dataframe of the parameter to which new years to be added.
yrs_new : list of int
New years to be added.
horizon: list of int
The horizon of the reference scenario.
year_ref : str
The header of the first column to which the new years should be added,
e.g. `'year_vtg'`.
year_col : str
The header of the column to which the new years should be added, e.g.
`'year_act'`.
tec_list : list of str
List of technologies in the parameter ``technical_lifetime``.
par_tec : pandas.DataFrame
Parameter ``technical_lifetime``.
value_col : str
The header of the column containing values.
extrapolate : bool
Allow extrapolation when a new year is outside the parameter years.
extrapol_neg : bool
Allow negative values obtained by extrapolation.
year_diff : list of int
List of model years with different time intervals before and after them
bound_extend : bool
Allow extrapolation of bounds for new years based on one data point
"""
def idx_check(df1, df2):
return df1.loc[df1.index.isin(df2.index)]
if df.empty:
return df
print('+++ WARNING: The submitted dataframe is empty, so'
' returned empty results!!! +++')
df_tec = df.loc[df['technology'].isin(tec_list)]
idx = [x for x in df.columns if x not in [year_col, value_col]]
df2 = df.pivot_table(index=idx, columns=year_col, values='value')
df2_tec = df_tec.pivot_table(index=idx, columns=year_col, values='value')
# -------------------------------------------------------------------------
# First, changing the time interval for the transition period
# (e.g., year 2010 in old R11 model transits from 5 year to 10 year)
horizon_new = sorted(horizon + [x for x in yrs_new if x not in horizon])
def f(x, i):
return x[i + 1] - x[i] > x[i] - x[i - 1]
yr_diff_new = [x for x in horizon_new[1:-1] if f(horizon_new,
horizon_new.index(x))]
# Generating duration_period_sum matrix for masking
df_dur = pd.DataFrame(index=horizon_new[:-1], columns=horizon_new[1:])
for i in df_dur.index:
for j in [x for x in df_dur.columns if x > i]:
df_dur.loc[i, j] = j - i
# Adding data for new transition year
if yr_diff_new and tec_list and year_diff not in yr_diff_new:
yrs = [x for x in horizon if x <= yr_diff_new[0]]
year_next = min([x for x in df2.columns if x > yr_diff_new[0]])
df_yrs = slice_df(df2_tec, idx, year_ref, yrs, [])
if yr_diff_new[0] in df2.columns:
df_yrs = df_yrs.loc[~ | pd.isna(df_yrs[yr_diff_new[0]]) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 17:30:42 2020
@author: bruger
his module grab the age distribution and saves the dataframe
"""
import pandas as pd
import requests
from pathlib import Path
try:
0/0 # uncomment to force read from github
agedistribution_df = pd.read_excel('data/agedistribution.xlsx',index_col=0)
print('Agedistribution read from file')
except:
ageurl = 'https://raw.githubusercontent.com/neherlab/covid19_scenarios/master/src/assets/data/ageDistribution.json'
agelist = requests.get(ageurl).json()['all']
agedic = {d['name'] : {'POP__'+str(age['ageGroup']): float(age['population']) for age in d['data']}
for d in agelist }
agedistribution_df = | pd.DataFrame(agedic) | pandas.DataFrame |
# GNU Lesser General Public License v3.0 only
# Copyright (C) 2020 Artefact
# <EMAIL>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from unittest.mock import MagicMock, patch
try:
import dask.bag as db
import dask.dataframe as dd
except ImportError:
raise ImportError("please install dask: pip install dask[complete]")
try:
import pandas as pd
except ImportError:
raise ImportError("please install pandas: pip install pandas")
import pytest
from nlpretext.preprocessor import Preprocessor
from nlpretext.textloader import TextLoader
from pandas.testing import assert_frame_equal
# pylint: disable=protected-access
@patch("dask.bag.read_text")
def test__read_text_txt(mock_read_text):
# Given
files_path = "some_path/to_read.txt"
file_format = "txt"
encoding = "utf-8"
text_column = "text"
mock_read_text.return_value = db.from_sequence(["This is a text \n", "This is another text \n"])
expected_result = dd.from_pandas(
pd.DataFrame({text_column: ["This is a text", "This is another text"]}), npartitions=2
)
# When
dummy_instance = TextLoader(file_format=file_format, encoding=encoding, text_column=text_column)
actual_result = dummy_instance._read_text_txt(files_path)
# Then
mock_read_text.assert_called_once_with(files_path, encoding=encoding)
assert_frame_equal(expected_result.compute(), actual_result.compute().reset_index(drop=True))
@patch("dask.dataframe.read_json")
def test__read_text_json(mock_read_json):
# Given
files_path = "some_path/to_read.json"
file_format = "json"
encoding = "utf-8"
text_column = "text"
text_ddf = dd.from_pandas(
| pd.DataFrame({text_column: ["This is a text", "This is another text"]}) | pandas.DataFrame |
from json_extract import flatten_json
import requests
from tabulate import tabulate
import math
import numpy
import pandas
import urllib.request, urllib.parse
import json
source_csv = pandas.read_csv("~/Downloads/ncvoter_statewide_latsandlongs_copy.csv", sep="\t")
count = 0
missing = 0
successful = 0
source_csv['latlong_found'] = source_csv['latitude']
source_csv['latlong_found'] = numpy.where(pandas.isna(source_csv.latitude),'N', 'Y')
print(tabulate(source_csv.groupby(['latlong_found', 'removed']).size().reset_index().rename(columns={0:'count'}), headers='keys', tablefmt='psql', showindex=False))
print("processing...")
for index, row in source_csv.iterrows():
if str(row['api_failed_to_resolve']) == "Y":
continue # we have previously tried to request this and it was unable to be resolved
if count == 49900:
break # reached our daily limit
if (str(row['removed']) == 'N' and pandas.isna(row['latitude'])):
adminDistrict = "NC"
postalCode = str(int(row['res_zipcode'])) if not pandas.isna(row['res_zipcode']) else '-'
locality = str(row['res_city_desc']) if not pandas.isna(row['res_city_desc']) else '-'
addressLine = str(row['res_street_address']) if not pandas.isna(row['res_street_address']) else '-'
maxResults = 1
BingMapsAPIKey = ""
# You can substitute a hyphen (-) for any structured URL parameter when there is no value.
url_header = f'http://dev.virtualearth.net/REST/v1/Locations/?countryRegion=US&adminDistrict={adminDistrict}&locality={locality}&postalCode={postalCode}&addressLine={addressLine}&maxResults={maxResults}&key={BingMapsAPIKey}'
# url_header_encoded = urllib.parse.quote(url_header)
# print(url_header)
response = requests.get(url_header)
count += 1
if response.status_code == 200:
successful += 1
coords = flatten_json(response.json())
latitude = coords['resourceSets_0_resources_0_point_coordinates_0']
longitude = coords['resourceSets_0_resources_0_point_coordinates_1']
# print(str(latitude) + ", " + str(longitude))
source_csv.loc[index, 'latitude'] = latitude
source_csv.loc[index, 'longitude'] = longitude
source_csv.loc[index, 'api_failed_to_resolve'] = "N"
source_csv.loc[index, 'geolocation_source'] = "bing"
else:
missing += 1
source_csv.loc[index, 'api_failed_to_resolve'] = "Y"
source_csv['latlong_found_after'] = source_csv['latitude']
source_csv['latlong_found_after'] = numpy.where( | pandas.isna(source_csv.latitude) | pandas.isna |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr( | Timestamp(2015, 11, 12) | pandas.Timestamp |
import numpy as np
import pandas as pd
import pickle5 as pkl
def read(model, class_name, method):
with open(f"/disks/bigger/xai_methods/distances/dataframes/{class_name}/{model}/{method}.pkl", "rb") as f:
data = pkl.load(f).values
return data
def main():
hermitries = []
for model in ["densenet121_unnormalized", "mnasnet1.0_unnormalized", "resnet50_unnormalized"]:
for class_name in ["chocolatesauce", "printer", "tennisball"]:
validation_distances = read(model, class_name, "validation_features")
hermitry_threshold = np.percentile(validation_distances, 95)
for method in ["AnchorLime", "KernelShap", "Lime", "Occlusion"]:
d = read(model, class_name, method)
hermits = np.sum(d > hermitry_threshold)
hermitry = hermits / len(d)
hermitries.append({"Hermitry": hermitry,
"XAI Method": method,
"Model": model,
"Class Name": class_name})
df = | pd.DataFrame(hermitries) | pandas.DataFrame |
from unittest import TestCase, main
import os
import dask.dataframe as dd
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import skbio
from qiime2 import Metadata
from qiime2.plugin.testing import TestPluginBase
from q2_sidle import (KmerMapFormat,
KmerAlignFormat,
SidleReconFormat,
ReconSummaryFormat
)
import q2_sidle._transformer as t
class TestTransform(TestCase):
def setUp(self):
### The IO transformations get tested in the plugin set up and in the
### format testing. So, I just want to make sure that I can import/
### export formats correcting and I get those transformations correct.
self.base_dir = \
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'files/types')
def test_kmer_map_to_dataframe(self):
known = pd.DataFrame(
data=[['Batman', 'Batman', 'Gotham', 'WANTCAT', 'CATCATCAT', 50],
['Superman', 'Superman', 'Metropolis', 'CATDAD', 'DADCAT',
50]],
columns=['seq-name', 'kmer', 'region', 'fwd-primer', 'rev-primer',
'kmer-length'],
index=pd.Index(['Batman', 'Superman'], name='db-seq')
)
filepath = os.path.join(self.base_dir, 'kmer-map.tsv')
format = KmerMapFormat(filepath, mode='r')
test = t._1(format)
self.assertTrue(isinstance(test, pd.DataFrame))
| pdt.assert_frame_equal(known, test) | pandas.testing.assert_frame_equal |
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import argparse
import os
import pandas as pd
import numpy as np
import pickle
from econ.econ_predictor import econ_predictor
# import econ.econ_utils as econ_utils
#
# import os,sys,inspect
# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # .../covid-xprize-comp/ongoing/predictors
# parentdir = os.path.dirname(currentdir) # .../covid-xprize-comp/ongoing
# sys.path.insert(0,parentdir)
# print(sys.path)
from tempgeolstm.tempgeolstm_predictor import tempGeoLSTMPredictor
from tempgeolgbm.tempgeolgbm_predictor import tempGeoLGBMPredictor
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # ..../covid-xprize-comp/ongoing/predictors
print(ROOT_DIR)
ALPHA = 0.50 # 0.50 seems to be the optimal value
# LSTM weights
# If you'd like to use a model, copy it to "trained_model_weights.h5"
# or change this MODEL_FILE path to point to your model.
MODEL_WEIGHTS_FILE = os.path.join(ROOT_DIR, "tempgeolstm", "models", "model_alldata.h5")
# LGBM weights
MODEL_FILE = os.path.join(ROOT_DIR, "tempgeolgbm", "models", "model_alldata.pkl")
ECON_MODEL_FILE = os.path.join(ROOT_DIR, 'econ', 'models', 'econ_models_1.pkl')
COUNTRIES_FILE = os.path.join(ROOT_DIR, "models", "countries.txt")
DATA_DIR = os.path.join(ROOT_DIR, os.pardir, 'data')
DATA_FILE = os.path.join(DATA_DIR, "OxCGRT_latest.csv")
# print(os.path.abspath(DATA_FILE)) # sanity check
TEMPERATURE_DATA_FILE = os.path.join(DATA_DIR, "temperature_data.csv")
NPI_COLUMNS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing',
'H6_Facial Coverings']
# --start_date 2020-12-01 --end_date 2020-12-31 --interventions_plan data/future_ip.csv --output_file 2020-12-01_2020_12_31.csv
def predict(start_date: str,
end_date: str,
path_to_ips_file: str,
output_file_path) -> None:
"""
Generates and saves a file with daily new cases predictions for the given countries, regions and intervention
plans, between start_date and end_date, included.
:param start_date: day from which to start making predictions, as a string, format YYYY-MM-DDD
:param end_date: day on which to stop making predictions, as a string, format YYYY-MM-DDD
:param path_to_ips_file: path to a csv file containing the intervention plans between inception date (Jan 1 2020)
and end_date, for the countries and regions for which a prediction is needed
:param output_file_path: path to file to save the predictions to
:return: Nothing. Saves the generated predictions to an output_file_path CSV file
with columns "CountryName,RegionName,Date,PredictedDailyNewCases"
"""
# !!! YOUR CODE HERE !!!
# Generate the predictions
start_date_dt = pd.to_datetime(start_date, format='%Y-%m-%d')
end_date_dt = pd.to_datetime(end_date, format='%Y-%m-%d')
npis_df = pd.read_csv(path_to_ips_file,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
# GeoID is CountryName / RegionName
# np.where usage: if A then B else C
npis_df["GeoID"] = np.where(npis_df["RegionName"].isnull(),
npis_df["CountryName"],
npis_df["CountryName"] + ' / ' + npis_df["RegionName"])
# Fill any missing NPIs by assuming they are the same as previous day
for npi_col in NPI_COLUMNS:
npis_df.update(npis_df.groupby(['CountryName', 'RegionName'])[npi_col].ffill().fillna(0))
predictors = ['econ', "LSTM", "LGBM"]
for model in predictors:
if model == "LSTM":
# predictor = tempGeoLSTMPredictor(path_to_model_weights=MODEL_WEIGHTS_FILE, path_to_geos=COUNTRIES_FILE)
predictor = tempGeoLSTMPredictor(path_to_model_weights=MODEL_WEIGHTS_FILE, use_embedding=False)
lstm_predictions_df = get_predictions(predictor, model, npis_df, start_date_dt, end_date_dt, output_file_path)
elif model == "LGBM":
# Load LGBM
predictor = tempGeoLGBMPredictor()
with open(MODEL_FILE, 'rb') as model_file:
predictor.predictor = pickle.load(model_file)
lgbm_predictions_df = get_predictions(predictor, model, npis_df, start_date_dt, end_date_dt, output_file_path)
elif model == 'econ':
# econ prediction try-catch loop
try:
# get econ_predictions
econ_df = econ_predictor(
start_date_str=start_date,
end_date_str=end_date,
DATA_DIR=DATA_DIR,
MODEL_FILE=ECON_MODEL_FILE,
path_to_hist_ips_file=os.path.join(DATA_DIR, "2020-09-30_historical_ip.csv"),
path_to_future_ips_file=path_to_ips_file)
print('econ pred success')
except:
print('econ pred fail')
continue
ensemble_predictions = get_ensemble_pred(ALPHA, lstm_predictions_df, lgbm_predictions_df)
# econ csv try-catch
try:
ensemble_predictions['QuarterEnd'] = ensemble_predictions['Date'] + | pd.tseries.offsets.QuarterEnd() | pandas.tseries.offsets.QuarterEnd |
# READ/WRITE REPORTS AS JSON
import json
import pandas as pd
from pandas.io.json import json_normalize
from swmmio.utils import spatial
from swmmio.graphics import swmm_graphics as sg
def decode_report(rpt_path):
#read report from json into a dict
with open(rpt_path, 'r') as f:
read_rpt = json.loads(f.read())
#parse the geojson
def df_clean(uncleandf):
cleaned_cols = [x.split('.')[-1] for x in uncleandf.columns]
uncleandf.columns = cleaned_cols
clean_df = uncleandf.rename(columns={'coordinates':'coords'}).drop(['type'], axis=1)
clean_df = clean_df.set_index(['Name'])
return clean_df
#parse conduit data into a dataframe
conds_df = json_normalize(read_rpt['conduits']['features'])
conds_df = df_clean(conds_df)
#parse node data into a dataframe
nodes_df = json_normalize(read_rpt['nodes']['features'])
nodes_df = df_clean(nodes_df)
#parse parcel data into a dataframe
pars_df = json_normalize(read_rpt['parcels']['features'])
pars_df = df_clean(pars_df)
rpt_dict = {'conduits':conds_df, 'nodes':nodes_df, 'parcels':pars_df}
rpt_dict.update()
return {'conduits':conds_df, 'nodes':nodes_df, 'parcels':pars_df}
def encode_report(rpt, rpt_path):
rpt_dict = {}
#write parcel json files
parcels = spatial.read_shapefile(sg.config.parcels_shapefile)
parcels = parcels[['PARCELID', 'coords']] #omit 'ADDRESS', 'OWNER1'
flooded = rpt.alt_report.parcel_flooding #proposed flooding condition
flooded = pd.merge(flooded, parcels, right_on='PARCELID', left_index=True)
rpt_dict['parcels'] = spatial.write_geojson(flooded, geomtype='polygon')
#non null delta category parcels
delta_parcels = rpt.flood_comparison.loc[ | pd.notnull(rpt.flood_comparison.Category) | pandas.notnull |
# -*- coding: utf-8 -*-
from typing import List, Union, Mapping, Dict, Tuple, Callable
import yaml
import os, sys, time
from shutil import copyfile, copy
import glob
import numpy as np
import pandas as pd
from ...model.core_model import AbstractCoreModel
from ...scope.scope import Scope
from ...database.database import Database
from ...util.loggers import get_module_logger
from ...util.docstrings import copydoc
from ...exceptions import *
from .parsers import *
_logger = get_module_logger(__name__)
def copy_model_outputs_1(
local_model,
remote_repository,
file
):
copyfile(
os.path.join(local_model, "Outputs", file),
os.path.join(remote_repository, "Outputs", file)
)
def copy_model_outputs_ext(
local_model,
remote_repository,
basename,
ext=('.bin', '.dcb')
):
for x in ext:
copy_model_outputs_1(
local_model,
remote_repository,
os.path.splitext(basename)[0] + x
)
ALL = slice(None)
class FilesCoreModel(AbstractCoreModel):
"""
Setup connections and paths to a file reading core model
Args:
configuration:
The configuration for this
core model. This can be passed as a dict, or as a str
which gives the filename of a YAML file that will be
loaded.
scope:
The exploration scope, as a Scope object or as
a str which gives the filename of a YAML file that will be
loaded.
safe:
Load the configuration YAML file in 'safe' mode.
This can be disabled if the configuration requires
custom Python types or is otherwise not compatible with
safe mode. Loading configuration files with safe mode
off is not secure and should not be done with files from
untrusted sources.
db:
An optional Database to store experiments and results.
name:
A name for this model, given as an alphanumeric string.
The name is required by ema_workbench operations.
If not given, "FilesCoreModel" is used.
"""
def __init__(self,
configuration: Union[str, Mapping],
scope: Union[Scope, str],
safe: bool = True,
db: Database = None,
name: str = 'FilesCoreModel',
):
super().__init__(
configuration=configuration,
scope=scope,
safe=safe,
db=db,
name=name,
metamodel_id=0,
)
self.model_path = self.config['model_path']
"""Path: The directory of the 'live' model instance."""
self.rel_output_path = self.config.get('rel_output_path', 'Outputs')
"""Path: The path to 'live' model outputs, relative to `model_path`."""
self.archive_path = self.config['model_archive']
"""Path: The directory where archived models are stored."""
self.allow_short_circuit = self.config.get('allow_short_circuit', True)
"""Bool: Allow model runs to be skipped if measures already appear in the database."""
self._parsers = []
def add_parser(self, parser):
"""
Add a FileParser to extract performance measures.
Args:
parser (FileParser): The parser to add.
"""
if not isinstance(parser, FileParser):
raise TypeError("parser must be an instance of FileParser")
self._parsers.append(parser)
def model_init(self, policy):
super().model_init(policy)
def run_model(self, scenario, policy):
"""
Runs an experiment through core model.
This method overloads the `run_model` method given in
the EMA Workbench, and provides the correct execution
of the GBNRTC model within that framework.
For each experiment, the core model is called to:
1. set experiment variables
2. run the experiment
3. run post-processors associated with specified
performance measures
4. (optionally) archive model outputs
5. record performance measures to database
Note that this method does *not* return any outcomes.
Outcomes are instead written into self.outcomes_output,
and can be retrieved from there.
Args:
scenario (Scenario): A dict-like object that
has key-value pairs for each uncertainty.
policy (Policy): A dict-like object that
has key-value pairs for each lever.
Raises:
UserWarning: If there are no experiments associated with
this type.
"""
_logger.debug("run_core_model read_experiment_parameters")
experiment_id = self.db.read_experiment_id(self.scope.name, None, scenario, policy)
if experiment_id is not None and self.allow_short_circuit:
# opportunity to short-circuit run by loading pre-computed values.
precomputed = self.db.read_experiment_measures(
self.scope.name,
design=None,
experiment_id=experiment_id,
)
if not precomputed.empty:
self.outcomes_output = dict(precomputed.iloc[0])
return
if experiment_id is None:
experiment_id = self.db.write_experiment_parameters_1(
self.scope.name, 'ad hoc', scenario, policy
)
xl = {}
xl.update(scenario)
xl.update(policy)
m_names = self.scope.get_measure_names()
m_out = pd.DataFrame()
_logger.debug(f"run_core_model setup {experiment_id}")
self.setup(xl)
_logger.debug(f"run_core_model run {experiment_id}")
self.run()
_logger.debug(f"run_core_model post_process {experiment_id}")
self.post_process(xl, m_names)
_logger.debug(f"run_core_model wrap up {experiment_id}")
measures_dictionary = self.load_measures(m_names)
m_df = | pd.DataFrame(measures_dictionary, index=[experiment_id]) | pandas.DataFrame |
"""
ReadData
========
Converts the data from matlab to a HDF5 data structure.
Data is stored in row-major order- where each row is a next sample.
"""
import deepdish as dd
import numpy as np
import scipy.io as sio
import glob
import os
from collections import Counter
import pandas as pd
def Load_Rest():
"""Load bands data processed by kasie.
First dimension is electrode (59),
Second dimension is band ('all_spec','theta','alpha', 'smr','beta12-22', 'beta15-22', 'beta22-30', 'trained', 'ratio')
Third dimension is before/after (2)
Returns
-------
DataFrame:
tidy long format
"""
in_path = '/Users/ryszardcetnarski/Desktop/Nencki/Badanie_NFB/Dane/pasma_rest/'
filtering_var = 'Abs_amp_OO'
all_subjects = mat2py_read(in_path = in_path, filtering_var = filtering_var, reshape = False)
#Select electrodes
channels = pd.read_csv('/Users/ryszardcetnarski/Desktop/Nencki/Badanie_NFB/Dane/channels.csv')
ch_idx = channels[channels['Channel'].isin(['F3','F4','P3', 'P4'])].index.tolist()
bands_dict = ['all_spec','theta','alpha', 'smr','beta12-22', 'beta15-22', 'beta22-30', 'trained', 'ratio']
bands_dict.extend(bands_dict)
#Add conditions info
conditions_info = pd.read_csv('/Users/ryszardcetnarski/Desktop/Nencki/Badanie_NFB/Dane/subjects_conditions.csv')
tmp = []
period =['before' for i in range(9)]
period.extend(['after' for i in range(9)])
for name, subject in all_subjects.items():
#Use F as he argument to flatten to ravel around columns not rows
bands = subject[ch_idx,:,:].mean(axis =0).flatten('F')
df = pd.DataFrame({'band_values': bands, 'band_names': bands_dict})
df['subject'] =name[11::]
df['period'] = period
# print(name[11::])
#TODO, make the selection nicer, why cant I access the value (the loc returns a pd.Series)
condition = conditions_info.loc[conditions_info['subject'] == name[11::]]['condition'].values[0]
df['condition'] =condition
tmp.append(df)
return pd.concat(tmp, ignore_index = True)
def Load_Rest_Signal(filtering_var):
in_path = '/Users/ryszardcetnarski/Desktop/Nencki/Badanie_NFB/Dane/sygnal_rest/mat_format/'
all_subjects = mat2py_read(in_path = in_path, filtering_var = filtering_var, reshape = False)
channels = | pd.read_csv('/Users/ryszardcetnarski/Desktop/Nencki/Badanie_NFB/Dane/channels.csv') | pandas.read_csv |
# -*- coding: UTF-8 -*-
# create_paper_figures.py
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.lines import Line2D
try:
import cantera as ct
except:
raise Exception("I am not seeing cantera installed. Find more information about installing it on https://www.cantera.org/.")
try:
import cantera_tools as ctt
import analysis_methods as am
except:
raise Exception("I am having trouble loading special modules. Make sure you run this script from within the 'code' folder.")
image_path = '../results'
if not os.path.exists(image_path):
os.makedirs(image_path)
# set plot style
sns.set_palette('colorblind',n_colors=4)
sns.set_style('white')
sns.set_context('paper',font_scale=1.5)
sns.set_style('ticks',{'ytick.direction': 'in','xtick.direction': 'in'})
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
# get data for models
# cluster number
model_molecule_to_cluster_number = {'full': {'propane':26, 'ethyl':25,'methyl':24,
'ethene':22,'H-atom':23,'n-propyl':20,
'methane':19,'ethane':18,'ethenyl':17,
'ethyne':16,'propene':14,},
'drg' : {'propane':8, 'ethyl':6,'methyl':5,
'ethene':2,'H-atom':4,'n-propyl':3,
'methane':1,'ethane':7,},
'3rxn' : {'propane':5, 'ethyl':3,'methyl':2,
'ethene':1,
'methane':0,'ethane':4,},
'6rxn' : {'propane':7, 'ethyl':5,'methyl':4,
'ethene':2,'H-atom':3,
'methane':1,'ethane':6,},
}
# initial propane isotopologue concentrations
delta_total= -28
psia = 5.4
edge_labeled_delta = delta_total + psia / 3.
center_labeled_delta = delta_total - 2. * psia / 3.
edge_labeled_fraction = am.getEnrichementFractionFromDelta(edge_labeled_delta)
center_labeled_fraction = am.getEnrichementFractionFromDelta(center_labeled_delta)
fraction_propane = 0.0049 # see supplemental
initialMoleFractions={
"CCC": fraction_propane * (1-center_labeled_fraction) * (1-edge_labeled_fraction)**2,
"CCC-2": fraction_propane * center_labeled_fraction * edge_labeled_fraction**2,
"CCC-3": fraction_propane * edge_labeled_fraction**2*(1-center_labeled_fraction),
"CCC-4": fraction_propane * 2*edge_labeled_fraction * (1-edge_labeled_fraction) * center_labeled_fraction,
"CCC-5": fraction_propane * 2*edge_labeled_fraction *(1-center_labeled_fraction) * (1-edge_labeled_fraction),
"CCC-6": fraction_propane * center_labeled_fraction*(1-edge_labeled_fraction)**2,
"[He]": 1-fraction_propane,
}
main_paths = [('full', '../mechanisms/full_model'),
('drg','../mechanisms/drg_model'),
('3rxn','../mechanisms/three_reaction_model'),
('6rxn','../mechanisms/six_reaction_model')]
################################
# Figures 2 and 3
print('creating figures 2 and 3')
enrichment_results = []
concentrations = {}
ethyl_psie_all = pd.DataFrame()
# run all four simulations
for name, mainPath in main_paths:
cluster_info = pd.read_csv(os.path.join(mainPath, 'isotopomer_cluster_info.csv'),index_col='name')
molecule_to_cluster_number = model_molecule_to_cluster_number[name]
temp = 850+273
times = np.linspace(1e-4,95. / temp,100)
solution = ct.Solution(os.path.join(mainPath,'chem.cti'))
conditions = temp, 2e5, initialMoleFractions
output = ctt.run_simulation(solution, times, conditions=conditions,
condition_type = 'constant-temperature-and-pressure',
output_species = True,
output_reactions = False)
species = output['species']
# find enrichments and total concentration
delta_enrichments = pd.DataFrame(columns=list(molecule_to_cluster_number.keys()), index = times)
concentration_data = pd.DataFrame(columns=list(molecule_to_cluster_number.keys()), index = times)
ethyl_psie = | pd.Series() | pandas.Series |
from model_lstm.utils import data_management as dm
from model_lstm.config import config
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger(__name__)
lstm_pipeline = dm.load_fitted_pipeline()
def predict_many(X):
df = pd.DataFrame({"text":X})
pred = lstm_pipeline.predict(df)
return pred
def predict_one(X, proba=False):
X = str(X)
df = | pd.DataFrame({"text":[X]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn import datasets, linear_model
from __future__ import division
class LRPI:
def __init__(self, normalize=False, n_jobs=1, t_value = 2.13144955):
self.normalize = normalize
self.n_jobs = n_jobs
self.LR = linear_model.LinearRegression(normalize=self.normalize, n_jobs= self.n_jobs)
self.t_value = t_value
def fit(self, X_train, y_train):
self.X_train = | pd.DataFrame(X_train.values) | pandas.DataFrame |
import numpy as np
import pandas as pd
# Compute moving averages across a defined window. Used to compute regimes
# INTERPRETATION: The regime is the short MAV minus the long MAV. A positive value indicates
# a bullish trend, so we want to buy as soon as the regime turns positive.
# Therefore, we want to identify in our data window points where the regime
# transitions from negative to positive (to buy) or from positive to negative (to sell)
def compute_mav_regime(short_interval, long_interval, data):
# Labels for new columns
short_label = "%sd_mav" % (str(short_interval))
long_label = "%sd_mav" % (str(long_interval))
# Compute the moving averages
data[short_label] = np.round(data["Close"].rolling(window = short_interval, center = False).mean(), 2)
data[long_label] = np.round(data["Close"].rolling(window = long_interval, center = False).mean(), 2)
# Filter out the empty filler data (i.e. data for days needed to compute MAV_0
# but which itself does not have a MAV value calculated for it)
data = data.dropna(how = "any")
regime = (data[short_label] - data[long_label] > 0).apply(lambda x: 1 if x==True else -1)
return regime
# regime = data[short_label] - data[long_label] > 0
# regime = regime.apply(lambda x: 1 if x==True else -1)
# return regime
# Compute gain/loss days and use to calculate on-balance volume (OBV)
# INTERPRETATION: OBV correlates volume to the stock's ability to appreciate on a day-to-day basis.
# therefore, if we see that OBV is rising and price is not, it's a good time to buy because the rising
# OBV suggests that price is soon to follow.
# Therefore, we want a way to compare OBV and price (maybe MAV?). The higher OBV/MAV, the stronger
# the buy signal is. As that value decreases we will know to sell
def compute_obv(data):
indicator_col = (data["Close"] - data["Open"] > 0).apply(lambda x: 1 if x==True else -1)
obv_col = (data["Volume"]*indicator_col).cumsum()
return obv_col
# Compute moving average convergence-divergence (MACD) as a difference of exponential moving averages
# and also compute signal line, report both signals (MACD sign, as well as MACD against signal line)
# INTERPRETATION: Same as regime, simply using a different scheme of averages
# TODO - Fix these calculations - the EWM return type does not allow for series subtraction
def compute_macd(data):
exp_26 = np.round(data["Close"].ewm(span = 26).mean(), 2)
exp_12 = np.round(data["Close"].ewm(span = 12).mean(), 2)
macd = (exp_12 - exp_26 > 0).apply(lambda x: 1 if x==True else -1)
macd_signal = (macd - macd.ewm(span = 9).mean() > 0).apply(lambda x: 1 if x==True else -1)
return macd_signal
################################################
################################################
# TODO: Insert method to do RSI calculations
# See http://www.investopedia.com/terms/r/rsi.asp
################################################
################################################
def pandas_candlestick_ohlc(dat, stick = "day", otherseries = None):
"""
:param dat: pandas DataFrame object with datetime64 index, and float columns "Open", "High", "Low", and "Close", likely created via DataReader from "yahoo"
:param stick: A string or number indicating the period of time covered by a single candlestick. Valid string inputs include "day", "week", "month", and "year", ("day" default), and any numeric input indicates the number of trading days included in a period
:param otherseries: An iterable that will be coerced into a list, containing the columns of dat that hold other series to be plotted as lines
This will show a Japanese candlestick plot for stock data stored in dat, also plotting other series if passed.
"""
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
dayFormatter = DateFormatter('%d') # e.g., 12
# Create a new DataFrame which includes OHLC data for each period specified by stick input
transdat = dat.loc[:,["Open", "High", "Low", "Close"]]
if (type(stick) == str):
if stick == "day":
plotdat = transdat
stick = 1 # Used for plotting
elif stick in ["week", "month", "year"]:
if stick == "week":
transdat["week"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[1]) # Identify weeks
elif stick == "month":
transdat["month"] = | pd.to_datetime(transdat.index) | pandas.to_datetime |
import requests
import time
import pandas as pd
states_list = ['Alaska', 'Alabama', 'Arkansas', 'Arizona', 'California',
'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia',
'Hawaii', 'Iowa', 'Idaho', 'Illinois', 'Indiana', 'Kansas', 'Kentucky',
'Louisiana', 'Massachusetts', 'Maryland', 'Maine', 'Michigan',
'Minnesota', 'Missouri', 'Mississippi', 'Montana', 'North Carolina',
'North Dakota', 'Nebraska', 'New Hampshire', 'New Jersey', 'New Mexico',
'Nevada', 'New York', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania',
'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas',
'Utah', 'Virginia', 'Vermont', 'Washington', 'Wisconsin',
'West Virginia', 'Wyoming']
def eval_candidates(clist, cnames):
out = {}
curr_rep_order = 1000
curr_dem_order = 1000
curr_extra_order = 1000
for candidate_dict in clist:
candidate = candidate_dict['candidate_key']
if candidate in cnames:
party_id = candidate_dict['party_id']
order = candidate_dict['order']
if (party_id == 'democrat') and (order < curr_dem_order):
out['dem'] = candidate
elif (party_id == 'republican') and (order < curr_rep_order):
out['rep'] = candidate
elif order < curr_extra_order:
out['extra'] = candidate
return out
def build_state_dataframe(state_data):
vote_dfs = {}
#first loop gathers presidential race timestamps (and other data). Timestamps
#are needed before running through loops for other races.
for race in state_data:
race_records = []
state = race['race_id'][:2]
race_type = race['race_id'][3:7]
race_id = race['race_id'][:-11]
if race_type in ('G-P-', 'G-S-', 'S-S-', 'G-H-'):
print(race_id)
past_elections_data = {}
if race_type == 'G-P-':
past_elections_data['votes2016'] = race['votes2016']
past_elections_data['margin2016'] = race['margin2016']
past_elections_data['votes2012'] = race['votes2012']
past_elections_data['margin2012'] = race['margin2012']
candidates = {}
N = len(race['timeseries'])
for i, vote_dict in enumerate(race['timeseries']):
if i == 0: #the first timestamp always seems to be out of order and have zero votes
candidate_names = [*vote_dict['vote_shares'].keys()]
candidates = eval_candidates(race['candidates'], candidate_names)
#get the timestamp and check that it is lower than the next value in the timeseries
curr_timestamp = vote_dict['timestamp']
if i < N - 1:
next_timestamp = race['timeseries'][i + 1]['timestamp']
if pd.Timestamp(curr_timestamp) > pd.Timestamp(next_timestamp):
continue
vote_record = {}
vote_record['timestamp'] = curr_timestamp
vote_record['votes2020'] = vote_dict['votes']
vote_record['vf_dem'] = vote_dict['vote_shares'].get(candidates.get('dem'), 0)
vote_record['vf_rep'] = vote_dict['vote_shares'].get(candidates.get('rep'), 0)
vote_record['vf_extra'] = vote_dict['vote_shares'].get(candidates.get('extra'), 0)
vote_record.update(past_elections_data)
race_records.append(vote_record)
if len(race_records) == 0:
continue
race_df = | pd.DataFrame.from_records(race_records) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
INPUT_DIR = "~/data/query-result/"
OUTPUT_DIR = "~/data/summary-stats/"
RES_LIST = ['cpu', 'mem', 'net_send', 'net_receive', 'disk_read', 'disk_write']
METRIC_LIST = ['_util_per_instance_95p', '_util_per_instance_max', '_util_per_pool', '_util_per_pod']
COST_MAP = {'action-classify': 0.248, 'action-gke': 1.22, 'db': 0.663, 'db-preempt': 0.663, 'druid-preempt': 0.663,
'druid-ssd-preempt': 0.704, 'mixed': 0.248, 'mixed-preempt': 0.248, 'nginx': 0.266, 'ping-gke': 0.69}
PERCENTILES = [.5, .95, .99]
END_TIME = 1514995200917
#END_TIME = 1515028900917
class StatsAggregator(object):
def __init__(self, metric_name):
self.metric_name = metric_name
def get_csv_list(self, res_list, data_dir):
csv_list = {}
for res in res_list:
csv_file = data_dir + res + self.metric_name + ".csv"
csv_list[res] = csv_file
print("Constructed list of csv filess:", csv_list)
return csv_list
def process_csv(self, res, csvfile, outfile):
df = pd.read_csv(csvfile, sep=',')
summary_df = pd.DataFrame()
for nodepool in df['node_pool'].unique():
stats_pool = df.loc[(df['node_pool'] == nodepool) & (df['time'] <= END_TIME)]
summary_df[nodepool] = stats_pool.value.describe(PERCENTILES)
print("Summarizing %d data points for resource %s, node pool %s"
%(len(stats_pool), res, nodepool))
fig_name = res + self.metric_name + "_" + nodepool
stats_pool.loc[:, 'time'] = pd.to_datetime(stats_pool['time'], unit='ms')
stats_pool.plot(x='time', y='value', title=fig_name)
plt.ylabel('Percent (%)')
plt.legend().set_visible(False)
plt.savefig(fig_name+".png")
print("\nWriting summary stats of %s resource for all node pools to %s\n" %(res, outfile))
summary_df.to_csv(outfile)
plt.close('all')
def compute_waste_res(self, res, csv_util, csv_num, outfile):
df_util = pd.read_csv(csv_util, sep=',')
df_num = pd.read_csv(csv_num, sep=',')
waste_list = []
for nodepool in df_util['node_pool'].unique():
util_pool = df_util.loc[(df_util['node_pool'] == nodepool) & (df_util['time'] <= END_TIME)][['time', 'value']]
num_pool = df_num.loc[(df_num['node_pool'] == nodepool) & (df_num['time'] <= END_TIME)][['time', 'value']]
num_avg = num_pool.value.mean()
print("Average provisioned instances for nodepool %s: %.1f" %(nodepool, num_avg))
util_pool['time'] = (util_pool['time'] / 1000).astype('int64')
num_pool['time'] = (num_pool['time'] / 1000).astype('int64')
df_joined = util_pool.set_index('time').join(num_pool.set_index('time'), how='inner',
lsuffix='_util', rsuffix='_num')
waste_num = ( (1 - df_joined.value_util/100) * df_joined.value_num ).mean()
waste_cost = waste_num * COST_MAP[nodepool]
waste_list.append({'node pool': nodepool, 'live instances': num_avg,
'unused instances': waste_num, 'wasted cost': waste_cost})
print("Average hourly cost wasted for %s resource in nodepool %s: %.2f" %(res, nodepool, waste_cost))
waste_df = pd.DataFrame(waste_list)
waste_df.to_csv(outfile)
def compute_waste_mixed(self, res_list, csv_list, csv_num, outfile):
if len(res_list) > 2:
print("Cannot combine more than two resources!")
return
df_util1 = pd.read_csv(csv_list[res_list[0]], sep=',')
df_util2 = pd.read_csv(csv_list[res_list[1]], sep=',')
df_num = | pd.read_csv(csv_num, sep=',') | pandas.read_csv |
"""
Code for transforming EIA data that pertains to more than one EIA Form.
This module helps normalize EIA datasets and infers additonal connections
between EIA entities (i.e. utilities, plants, units, generators...). This
includes:
- compiling a master list of plant, utility, boiler, and generator IDs that
appear in any of the EIA 860 or 923 tables.
- inferring more complete boiler-generator associations.
- differentiating between static and time varying attributes associated with
the EIA entities, storing the static fields with the entity table, and the
variable fields in an annual table.
The boiler generator association inferrence (bga) takes the associations
provided by the EIA 860, and expands on it using several methods which can be
found in :func:`pudl.transform.eia._boiler_generator_assn`.
"""
import importlib.resources
import logging
import networkx as nx
import numpy as np
import pandas as pd
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
def _occurrence_consistency(entity_id, compiled_df, col,
cols_to_consit, strictness=.7):
"""
Find the occurence of plants & the consistency of records.
We need to determine how consistent a reported value is in the records
across all of the years or tables that the value is being reported, so we
want to compile two key numbers: the number of occurances of the entity and
the number of occurances of each reported record for each entity. With that
information we can determine if the reported records are strict enough.
Args:
entity_id (list): a list of the id(s) for the entity. Ex: for a plant
entity, the entity_id is ['plant_id_eia']. For a generator entity,
the entity_id is ['plant_id_eia', 'generator_id'].
compiled_df (pandas.DataFrame): a dataframe with every instance of the
column we are trying to harvest.
col (str): the column name of the column we are trying to harvest.
cols_to_consit (list): a list of the columns to determine consistency.
This either the [entity_id] or the [entity_id, 'report_date'],
depending on whether the entity is static or annual.
strictness (float): How consistent do you want the column records to
be? The default setting is .7 (so 70% of the records need to be
consistent in order to accept harvesting the record).
Returns:
pandas.DataFrame: this dataframe will be a transformed version of
compiled_df with NaNs removed and with new columns with information
about the consistency of the reported values.
"""
# select only the colums you want and drop the NaNs
# we want to drop the NaNs because
col_df = compiled_df[entity_id + ['report_date', col, 'table']].copy()
if pc.column_dtypes["eia"][col] == pd.StringDtype():
nan_str_mask = (col_df[col] == "nan").fillna(False)
col_df.loc[nan_str_mask, col] = pd.NA
col_df = col_df.dropna()
if len(col_df) == 0:
col_df[f'{col}_consistent'] = pd.NA
col_df[f'{col}_consistent_rate'] = pd.NA
col_df['entity_occurences'] = pd.NA
col_df = col_df.drop(columns=['table'])
return col_df
# determine how many times each entity occurs in col_df
occur = (
col_df
.groupby(by=cols_to_consit, observed=True)
.agg({'table': "count"})
.reset_index()
.rename(columns={'table': 'entity_occurences'})
)
# add the occurances into the main dataframe
col_df = col_df.merge(occur, on=cols_to_consit)
# determine how many instances of each of the records in col exist
consist_df = (
col_df
.groupby(by=cols_to_consit + [col], observed=True)
.agg({'table': 'count'})
.reset_index()
.rename(columns={'table': 'record_occurences'})
)
# now in col_df we have # of times an entity occurred accross the tables
# and we are going to merge in the # of times each value occured for each
# entity record. When we merge the consistency in with the occurances, we
# can determine if the records are more than 70% consistent across the
# occurances of the entities.
col_df = col_df.merge(consist_df, how='outer').drop(columns=['table'])
# change all of the fully consistent records to True
col_df[f'{col}_consistent_rate'] = (
col_df['record_occurences'] / col_df['entity_occurences'])
col_df[f'{col}_consistent'] = (
col_df[f'{col}_consistent_rate'] > strictness)
col_df = col_df.sort_values(f'{col}_consistent_rate')
return col_df
def _lat_long(dirty_df, clean_df, entity_id_df, entity_id,
col, cols_to_consit, round_to=2):
"""Harvests more complete lat/long in special cases.
For all of the entities were there is not a consistent enough reported
record for latitude and longitude, this function reduces the precision of
the reported lat/long by rounding down the reported records in order to get
more complete set of consistent records.
Args:
dirty_df (pandas.DataFrame): a dataframe with entity records that have
inconsistently reported lat/long.
clean_df (pandas.DataFrame): a dataframe with entity records that have
consistently reported lat/long.
entity_id_df (pandas.DataFrame): a dataframe with a complete set of
possible entity ids
entity_id (list): a list of the id(s) for the entity. Ex: for a plant
entity, the entity_id is ['plant_id_eia']. For a generator entity,
the entity_id is ['plant_id_eia', 'generator_id'].
col (string): the column name of the column we are trying to harvest.
cols_to_consit (list): a list of the columns to determine consistency.
This either the [entity_id] or the [entity_id, 'report_date'],
depending on whether the entity is static or annual.
round_to (integer): This is the number of decimals places we want to
preserve while rounding down.
Returns:
pandas.DataFrame: a dataframe with all of the entity ids. some will
have harvested records from the clean_df. some will have harvested
records that were found after rounding. some will have NaNs if no
consistently reported records were found.
"""
# grab the dirty plant records, round and get a new consistency
ll_df = dirty_df.round(decimals={col: round_to})
logger.debug(f"Dirty {col} records: {len(ll_df)}")
ll_df['table'] = 'special_case'
ll_df = _occurrence_consistency(entity_id, ll_df, col, cols_to_consit)
# grab the clean plants
ll_clean_df = clean_df.dropna()
# find the new clean plant records by selecting the True consistent records
ll_df = ll_df[ll_df[f'{col}_consistent']].drop_duplicates(subset=entity_id)
logger.debug(f"Clean {col} records: {len(ll_df)}")
# add the newly cleaned records
ll_clean_df = ll_clean_df.append(ll_df,)
# merge onto the plants df w/ all plant ids
ll_clean_df = entity_id_df.merge(ll_clean_df, how='outer')
return ll_clean_df
def _add_timezone(plants_entity):
"""Adds plant IANA timezones from lat / lon.
Args:
plants_entity (pandas.DataFrame): Plant entity table, including columns
named "latitude", "longitude", and optionally "state"
Returns:
:class:`pandas.DataFrame`: A DataFrame containing the same table, with a
"timezone" column added. Timezone may be missing if lat / lon is
missing or invalid.
"""
plants_entity["timezone"] = plants_entity.apply(
lambda row: pudl.helpers.find_timezone(
lng=row["longitude"], lat=row["latitude"],
state=row["state"], strict=False
),
axis=1,
)
return plants_entity
def _add_additional_epacems_plants(plants_entity):
"""Adds the info for plants that have IDs in the CEMS data but not EIA data.
The columns loaded are plant_id_eia, plant_name, state, latitude, and
longitude. Note that a side effect will be resetting the index on
plants_entity, if onecexists. If that's a problem, modify the code below.
Note that some of these plants disappear from the CEMS before the
earliest EIA data PUDL processes, so if PUDL eventually ingests older
data, these may be redundant.
The set of additional plants is every plant that appears in the hourly CEMS
data (1995-2017) that never appears in the EIA 923 or 860 data (2009-2017
for EIA 923, 2011-2017 for EIA 860).
Args:
plants_entity (pandas.DataFrame) The plant entity table that will be
appended to
Returns:
pandas.DataFrame: The same plants_entity table, with the addition of
some missing EPA CEMS plants.
"""
# Add the plant IDs that are missing and update the values for the others
# The data we're reading is a CSV in pudl/metadata/
# SQL would call this whole process an upsert
# See also: https://github.com/pandas-dev/pandas/issues/22812
cems_df = pd.read_csv(
importlib.resources.open_text(
'pudl.package_data.epa.cems',
'plant_info_for_additional_cems_plants.csv'),
index_col=["plant_id_eia"],
usecols=["plant_id_eia", "plant_name_eia",
"state", "latitude", "longitude"],
)
plants_entity = plants_entity.set_index("plant_id_eia")
cems_unmatched = cems_df.loc[~cems_df.index.isin(plants_entity.index)]
# update will replace columns and index values that add rows or affect
# non-matching columns. It also requires an index, so we set and reset the
# index as necessary. Also, it only works in-place, so we can't chain.
plants_entity.update(cems_df, overwrite=True)
return plants_entity.append(cems_unmatched).reset_index()
def _compile_all_entity_records(entity, eia_transformed_dfs):
"""
Compile all of the entity records from each table they appear in.
Comb through each of the dataframes in the eia_transformed_dfs dictionary
to pull out every instance of the entity id.
"""
# we know these columns must be in the dfs
entity_id = pc.entities[entity][0]
static_cols = pc.entities[entity][1]
annual_cols = pc.entities[entity][2]
base_cols = pc.entities[entity][0] + ['report_date']
# empty list for dfs to be added to for each table below
dfs = []
# for each df in the dict of transformed dfs
for table_name, transformed_df in eia_transformed_dfs.items():
# inside of main() we are going to be adding items into
# eia_transformed_dfs with the name 'annual'. We don't want to harvest
# from our newly harvested tables.
if 'annual' not in table_name:
# if the df contains the desired columns the grab those columns
if set(base_cols).issubset(transformed_df.columns):
logger.debug(f" {table_name}...")
# create a copy of the df to muck with
df = transformed_df.copy()
# we know these columns must be in the dfs
cols = []
# check whether the columns are in the specific table
for column in static_cols + annual_cols:
if column in df.columns:
cols.append(column)
df = df[(base_cols + cols)]
df = df.dropna(subset=entity_id)
# add a column with the table name so we know its origin
df['table'] = table_name
dfs.append(df)
# remove the static columns, with an exception
if ((entity in ('generators', 'plants'))
and (table_name in ('ownership_eia860',
'utilities_eia860',
'generators_eia860'))):
cols.remove('utility_id_eia')
transformed_df = transformed_df.drop(columns=cols)
eia_transformed_dfs[table_name] = transformed_df
# add those records to the compliation
compiled_df = pd.concat(dfs, axis=0, ignore_index=True, sort=True)
# strip the month and day from the date so we can have annual records
compiled_df['report_date'] = compiled_df['report_date'].dt.year
# convert the year back into a date_time object
year = compiled_df['report_date']
compiled_df['report_date'] = pd.to_datetime({'year': year,
'month': 1,
'day': 1})
logger.debug(' Casting harvested IDs to correct data types')
# most columns become objects (ack!), so assign types
compiled_df = compiled_df.astype(pc.entities[entity][3])
return compiled_df
def _manage_strictness(col, eia860_ytd):
"""
Manage the strictness level for each column.
Args:
col (str): name of column
eia860_ytd (boolean): if True, the etl run is attempting to include
year-to-date updated from EIA 860M.
"""
strictness_default = .7
# the longitude column is very different in the ytd 860M data (it appears
# to have an additional decimal point) bc it shows up in the generator
# table but it is a plant level data point, it mucks up the consistency
strictness_cols = {
'plant_name_eia': 0,
'utility_name_eia': 0,
'longitude': 0 if eia860_ytd else .7
}
return strictness_cols.get(col, strictness_default)
def harvesting(entity, # noqa: C901
eia_transformed_dfs,
entities_dfs,
eia860_ytd=False,
debug=False):
"""Compiles consistent records for various entities.
For each entity(plants, generators, boilers, utilties), this function
finds all the harvestable columns from any table that they show up
in. It then determines how consistent the records are and keeps the values
that are mostly consistent. It compiles those consistent records into
one normalized table.
There are a few things to note here. First being that we are not expecting
the outcome here to be perfect! We choose to pull the most consistent
record as reported across all the EIA tables and years, but we also
required a "strictness" level of 70% (this is currently a hard coded
argument for _occurrence_consistency). That means at least 70% of the
records must be the same for us to use that value. So if values for an
entity haven't been reported 70% consistently, then it will show up as a
null value. We built in the ability to add special cases for columns where
we want to apply a different method to, but the only ones we added was for
latitude and longitude because they are by far the dirtiest.
We have determined which columns should be considered "static" or "annual".
These can be found in constants in the `entities` dictionary. Static means
That is should not change over time. Annual means there is annual
variablity. This distinction was made in part by testing the consistency
and in part by an understanding of how the entities and columns relate in
the real world.
Args:
entity (str): plants, generators, boilers, utilties
eia_transformed_dfs (dict): A dictionary of tbl names (keys) and
transformed dfs (values)
entities_dfs(dict): A dictionary of entity table names (keys) and
entity dfs (values)
eia860_ytd (boolean): if True, the etl run is attempting to include
year-to-date updated from EIA 860M.
debug (bool): If True, this function will also return an additional
dictionary of dataframes that includes the pre-deduplicated
compiled records with the number of occurances of the entity and
the record to see consistency of reported values.
Returns:
tuple: A tuple containing:
eia_transformed_dfs (dict): dictionary of tbl names (keys) and
transformed dfs (values)
entity_dfs (dict): dictionary of entity table names (keys) and
entity dfs (values)
Raises:
AssertionError: If the consistency of any record value is <90%.
Todo:
* Return to role of debug.
* Determine what to do with null records
* Determine how to treat mostly static records
"""
# we know these columns must be in the dfs
entity_id = pc.entities[entity][0]
static_cols = pc.entities[entity][1]
annual_cols = pc.entities[entity][2]
logger.debug(" compiling plants for entity tables from:")
compiled_df = _compile_all_entity_records(entity, eia_transformed_dfs)
# compile annual ids
annual_id_df = compiled_df[
['report_date'] + entity_id].copy().drop_duplicates()
annual_id_df.sort_values(['report_date'] + entity_id,
inplace=True, ascending=False)
# create the annual and entity dfs
entity_id_df = annual_id_df.drop(
['report_date'], axis=1).drop_duplicates(subset=entity_id)
entity_df = entity_id_df.copy()
annual_df = annual_id_df.copy()
special_case_cols = {'latitude': [_lat_long, 1],
'longitude': [_lat_long, 1]}
consistency = pd.DataFrame(columns=['column', 'consistent_ratio',
'wrongos', 'total'])
col_dfs = {}
# determine how many times each of the columns occur
for col in static_cols + annual_cols:
if col in annual_cols:
cols_to_consit = entity_id + ['report_date']
if col in static_cols:
cols_to_consit = entity_id
strictness = _manage_strictness(col, eia860_ytd)
col_df = _occurrence_consistency(
entity_id, compiled_df, col, cols_to_consit, strictness=strictness)
# pull the correct values out of the df and merge w/ the plant ids
col_correct_df = (
col_df[col_df[f'{col}_consistent']].
drop_duplicates(subset=(cols_to_consit + [f'{col}_consistent']))
)
# we need this to be an empty df w/ columns bc we are going to use it
if col_correct_df.empty:
col_correct_df = pd.DataFrame(columns=col_df.columns)
if col in static_cols:
clean_df = entity_id_df.merge(
col_correct_df, on=entity_id, how='left')
clean_df = clean_df[entity_id + [col]]
entity_df = entity_df.merge(clean_df, on=entity_id)
if col in annual_cols:
clean_df = annual_id_df.merge(
col_correct_df, on=(entity_id + ['report_date']), how='left')
clean_df = clean_df[entity_id + ['report_date', col]]
annual_df = annual_df.merge(
clean_df, on=(entity_id + ['report_date']))
# get the still dirty records by using the cleaned ids w/null values
# we need the plants that have no 'correct' value so
# we can't just use the col_df records when the consistency is not True
dirty_df = col_df.merge(
clean_df[clean_df[col].isnull()][entity_id])
if col in special_case_cols.keys():
clean_df = special_case_cols[col][0](
dirty_df, clean_df, entity_id_df, entity_id, col,
cols_to_consit, special_case_cols[col][1])
if debug:
col_dfs[col] = col_df
# this next section is used to print and test whether the harvested
# records are consistent enough
total = len(col_df.drop_duplicates(subset=cols_to_consit))
# if the total is 0, the ratio will error, so assign null values.
if total == 0:
ratio = np.NaN
wrongos = np.NaN
logger.debug(f" Zero records found for {col}")
if total > 0:
ratio = (
len(col_df[(col_df[f'{col}_consistent'])].
drop_duplicates(subset=cols_to_consit)) / total
)
wrongos = (1 - ratio) * total
logger.debug(
f" Ratio: {ratio:.3} "
f"Wrongos: {wrongos:.5} "
f"Total: {total} {col}"
)
if ratio < 0.9:
if debug:
logger.error(f'{col} has low consistency: {ratio:.3}.')
else:
raise AssertionError(
f'Harvesting of {col} is too inconsistent at {ratio:.3}.')
# add to a small df to be used in order to print out the ratio of
# consistent records
consistency = consistency.append({'column': col,
'consistent_ratio': ratio,
'wrongos': wrongos,
'total': total}, ignore_index=True)
mcs = consistency['consistent_ratio'].mean()
logger.info(
f"Average consistency of static {entity} values is {mcs:.2%}")
if entity == "plants":
entity_df = _add_additional_epacems_plants(entity_df)
entity_df = _add_timezone(entity_df)
eia_transformed_dfs[f'{entity}_annual_eia'] = annual_df
entities_dfs[f'{entity}_entity_eia'] = entity_df
if debug:
return entities_dfs, eia_transformed_dfs, col_dfs
return (entities_dfs, eia_transformed_dfs)
def _boiler_generator_assn(
eia_transformed_dfs,
eia923_years=pc.working_partitions['eia923']['years'],
eia860_years=pc.working_partitions['eia860']['years'],
debug=False
):
"""
Creates a set of more complete boiler generator associations.
Creates a unique unit_id_pudl for each collection of boilers and generators
within a plant that have ever been associated with each other, based on
the boiler generator associations reported in EIA860. Unfortunately, this
information is not complete for years before 2014, as the gas turbine
portion of combined cycle power plants in those earlier years were not
reporting their fuel consumption, or existence as part of the plants.
For years 2014 and on, EIA860 contains a unit_id_eia value, allowing the
combined cycle plant compoents to be associated with each other. For many
plants not listed in the reported boiler generator associations, it is
nonetheless possible to associate boilers and generators on a one-to-one
basis, as they use identical strings to describe the units.
In the end, between the reported BGA table, the string matching, and the
unit_id_eia values, it's possible to create a nearly complete mapping of
the generation units, at least for 2014 and later.
Args:
eia_transformed_dfs (dict): a dictionary of post-transform dataframes
representing the EIA database tables.
eia923_years (list-like): a list of the years of EIA 923 data that
should be used to infer the boiler-generator associations. By
default it is all the working years of data.
eia860_years (list-like): a list of the years of EIA 860 data that
should be used to infer the boiler-generator associations. By
default it is all the working years of data.
debug (bool): If True, include columns in the returned dataframe
indicating by what method the individual boiler generator
associations were inferred.
Returns:
eia_transformed_dfs (dict): Returns the same dictionary of dataframes
that was passed in, and adds a new dataframe to it representing
the boiler-generator associations as records containing
plant_id_eia, generator_id, boiler_id, and unit_id_pudl
Raises:
AssertionError: If the boiler - generator association graphs are not
bi-partite, meaning generators only connect to boilers, and boilers
only connect to generators.
AssertionError: If all the boilers do not end up with the same unit_id
each year.
AssertionError: If all the generators do not end up with the same
unit_id each year.
"""
# if you're not ingesting both 860 and 923, the bga is not compilable
if not (eia860_years and eia923_years):
return pd.DataFrame()
# compile and scrub all the parts
logger.info("Inferring complete EIA boiler-generator associations.")
bga_eia860 = (
eia_transformed_dfs['boiler_generator_assn_eia860'].copy()
.pipe(_restrict_years, eia923_years, eia860_years)
.astype({
'generator_id': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
"plant_id_eia": int,
})
)
# grab the generation_eia923 table, group annually, generate a new tag
gen_eia923 = eia_transformed_dfs['generation_eia923'].copy()
gen_eia923 = gen_eia923.set_index(pd.DatetimeIndex(gen_eia923.report_date))
gen_eia923 = (
_restrict_years(gen_eia923, eia923_years, eia860_years)
.astype({
'generator_id': pd.StringDtype(),
"plant_id_eia": int,
})
.groupby([pd.Grouper(freq='AS'), 'plant_id_eia', 'generator_id'])
.net_generation_mwh.sum()
.reset_index()
.assign(missing_from_923=False)
)
# compile all of the generators
gens_eia860 = (
eia_transformed_dfs['generators_eia860'].copy()
.pipe(_restrict_years, eia923_years, eia860_years)
.astype({
'generator_id': pd.StringDtype(),
"plant_id_eia": int,
})
)
gens = pd.merge(
gen_eia923,
gens_eia860,
on=['plant_id_eia', 'report_date', 'generator_id'],
how='outer'
)
gens = (
gens[[
'plant_id_eia',
'report_date',
'generator_id',
'unit_id_eia',
'net_generation_mwh',
'missing_from_923'
]]
.drop_duplicates()
.astype({
'generator_id': pd.StringDtype(),
"plant_id_eia": int,
})
)
# create the beginning of a bga compilation w/ the generators as the
# background
bga_compiled_1 = pd.merge(
gens,
bga_eia860,
on=['plant_id_eia', 'generator_id', 'report_date'],
how='outer'
)
# Create a set of bga's that are linked, directly from bga8
bga_assn = bga_compiled_1[bga_compiled_1['boiler_id'].notnull()].copy()
bga_assn.loc[:, 'bga_source'] = 'eia860_org'
# Create a set of bga's that were not linked directly through bga8
bga_unassn = bga_compiled_1[bga_compiled_1['boiler_id'].isnull()].copy()
bga_unassn = bga_unassn.drop(['boiler_id'], axis=1)
# Side note: there are only 6 generators that appear in bga8 that don't
# apear in gens9 or gens8 (must uncomment-out the og_tag creation above)
# bga_compiled_1[bga_compiled_1['og_tag'].isnull()]
bf_eia923 = (
eia_transformed_dfs['boiler_fuel_eia923'].copy()
.pipe(_restrict_years, eia923_years, eia860_years)
.astype({
"boiler_id": | pd.StringDtype() | pandas.StringDtype |
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import pickle
import nltk
import string
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from scipy import interp
import matplotlib as mpl
import numpy as np
import seaborn as sns
df = | pd.read_excel('/Users/dyuwan/Downloads/song_dataset_lyrics.xlsx') | pandas.read_excel |
import yfinance as yf
from datetime import date, datetime, timedelta
from pandas.tseries.offsets import BDay
import pandas_market_calendars as mcal
import pandas as pd
import numpy as np
import requests
from lxml import html
import ssl
def get_df_list(sym):
def get_page(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Cache-Control': 'max-age=0',
'Pragma': 'no-cache',
'Referrer': 'https://google.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/77.0.3865.120 Safari/537.36 '
}
return requests.get(url, headers=headers)
def parse_rows(table_rows):
parsed_rows = []
for table_row in table_rows: # iterate all rows in the table of <div class="D(tbr)>
parsed_row = []
el = table_row.xpath("./div") # tree.xpath("./div"), create a list of div rows
none_count = 0
for rs in el: # iterrate all span in each row
try:
(text,) = rs.xpath('.//span/text()[1]')
parsed_row.append(text)
except ValueError:
parsed_row.append(np.NaN)
none_count += 1
if none_count < 4: # none_count = 5 dates, then no values for the row
parsed_rows.append(parsed_row)
return pd.DataFrame(parsed_rows)
def clean_data(df):
df = df.set_index(0)
df = df.transpose()
cols = list(df.columns)
cols[0] = 'Date' # rename Breakdown col to Date
df = df.set_axis(cols, axis='columns', inplace=False) # update cols names
numeric_columns = list(df.columns)[1::] # all col except date col
try:
for column_name in numeric_columns:
df[column_name] = df[column_name].str.replace(',', '') # Remove the thousands separator
df[column_name] = df[column_name].astype(np.float64) # Convert the column to float
except AttributeError:
pass
return df
def scrape_table(url):
page = get_page(url)
tree = html.fromstring(page.content) # page in tree structure, XPath or CSSSelect;
table_rows = tree.xpath(
"//div[contains(@class, 'D(tbr)')]")
assert len(table_rows) > 0 # ensure rows are found or nothing
df = parse_rows(table_rows)
df = clean_data(df)
return df
def get_df(symbol):
# fiscal year financial
stock = yf.Ticker(symbol)
dff = stock.financials
dfb = stock.balancesheet
dfc = stock.cashflow
df0 = pd.concat([dff, dfb, dfc], axis=0, sort=False) / 10 ** 6
df0 = df0.transpose()
# earning and eps in financial
url_is = "https://finance.yahoo.com/quote/{0}/financials?p={0}".format(symbol)
dfe = scrape_table(url_is)
sharelist = list(dfe['Basic'].dropna())
idx0 = df0.index
df1 = df0.reset_index(drop=True)
df1['Basic'] = pd.Series(sharelist)
df1 = df1.set_index(idx0).sort_index(axis=0)
# calendar year financial
ly = dff.columns.values[0].astype('M8[D]').astype('O')
lyy1 = int(ly.strftime('%Y%m%d'))
lyy2 = ly.year * 10 ** 4 + 12 * 10 ** 2 + 20
if lyy1 > lyy2:
ly1 = ly.year
dly1 = df1.iloc[-1]
dly2 = df1.iloc[-2]
dly3 = df1.iloc[-3]
else:
ly1 = ly.year - 1
lys = str(ly.year) + '01' + '01'
lys = date(year=int(lys[0:4]), month=int(lys[4:6]), day=int(lys[6:8]))
perc = (ly - lys).days + 1
perc = perc / 365
dly1 = df1.iloc[-1] * (1 - perc) + df1.iloc[-2] * perc
dly2 = df1.iloc[-2] * (1 - perc) + df1.iloc[-3] * perc
dly3 = df1.iloc[-3] * (1 - perc) + df1.iloc[0] * perc
ly2 = ly1 - 1
ly3 = ly1 - 2
lyidx = [ly3, ly2, ly1]
dcy = pd.concat([dly3, dly2, dly1], axis=1)
dcy.columns = lyidx
dcy = dcy.transpose()
df1 = pd.concat([df1, dcy], axis=0, sort=False)
# TTM financial
lastfiscalquarter = stock.quarterly_balancesheet.columns.values[0].astype('M8[D]').astype('O')
dffttm = stock.quarterly_financials.sum(axis=1)
dfbttm = stock.quarterly_balancesheet[lastfiscalquarter]
dfcttm = stock.quarterly_cashflow.sum(axis=1)
dfttm0 = pd.concat([dffttm, dfbttm, dfcttm])
dfttm1 = dfttm0.replace({0: np.nan}) / 10 ** 6
dfttm1 = | pd.DataFrame(dfttm1, columns=['TTM']) | pandas.DataFrame |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Bio import pairwise2
from scipy import interp
from scipy.stats import linregress
from sklearn.metrics import roc_curve, auc, precision_recall_curve
import thoipapy
import thoipapy.validation.bocurve
from thoipapy.utils import make_sure_path_exists
def collect_indiv_validation_data(s, df_set, logging, namedict, predictors, THOIPA_predictor_name, subsets):
"""
Parameters
----------
s
df_set
logging
namedict
predictors
THOIPA_predictor_name
Returns
-------
"""
logging.info("start collect_indiv_validation_data THOIPA_PREDDIMER_TMDOCK")
ROC_AUC_df = pd.DataFrame()
PR_AUC_df = pd.DataFrame()
mean_o_minus_r_by_sample_df = pd.DataFrame()
AUBOC_from_complete_data_ser = pd.Series()
AUC_AUBOC_name_list = []
linechar_name_list = []
AUBOC_list = []
df_o_minus_r_mean_df = pd.DataFrame()
roc_auc_mean_list = []
roc_auc_std_list = []
# indiv_validation_dir: Path = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation"
indiv_validation_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/indiv_validation_data.xlsx"
thoipapy.utils.make_sure_path_exists(indiv_validation_data_xlsx, isfile=True)
# if not os.path.isdir(os.path.dirname(BOAUC10_barchart_pdf)):
# os.makedirs(os.path.dirname(BOAUC10_barchart_pdf))
for predictor in predictors:
BO_data_df = pd.DataFrame()
xv_dict = {}
ROC_AUC_dict = {}
PR_AUC_dict = {}
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
auc_pkl = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/roc_auc/{predictor}/ROC_AUC_data.pkl"
BO_curve_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/BO_Curve_data.csv"
bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/bocurve_data.xlsx"
BO_linechart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/BO_linechart.png"
BO_barchart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/AUBOC_barchart.png"
df_o_minus_r_mean_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/df_o_minus_r_mean.csv"
thoipapy.utils.make_sure_path_exists(auc_pkl, isfile=True)
thoipapy.utils.make_sure_path_exists(BO_curve_data_csv, isfile=True)
for i in df_set.index:
sys.stdout.write(".")
sys.stdout.flush()
acc = df_set.loc[i, "acc"]
database = df_set.loc[i, "database"]
acc_db = acc + "-" + database
merged_data_csv_path: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/predictions/merged/{database}.{acc}.merged.csv"
merged_data_df = pd.read_csv(merged_data_csv_path, engine="python")
# invert some predictors so that a high number always indicates a predicted interface residue
merged_data_df["LIPS_L*E"] = -1 * merged_data_df["LIPS_L*E"]
merged_data_df["PREDDIMER"] = -1 * merged_data_df["PREDDIMER"]
merged_data_df["TMDOCK"] = -1 * merged_data_df["TMDOCK"]
if database == "crystal" or database == "NMR":
# invert the interface score of structural data so that a high number indicates an interface residue
merged_data_df["interface_score"] = -1 * merged_data_df["interface_score"]
# toggle whether to use boolean (interface) or continuous data (interface_score). Here we want continuous data
experiment_col = "interface_score"
BO_single_prot_df = thoipapy.validation.bocurve.calc_best_overlap_from_selected_column_in_df(acc_db, merged_data_df, experiment_col, predictor)
if BO_data_df.empty:
BO_data_df = BO_single_prot_df
else:
BO_data_df = pd.concat([BO_data_df, BO_single_prot_df], axis=1, join="outer")
df_for_roc = merged_data_df.dropna(subset=[experiment_col, predictor])
fpr, tpr, thresholds = roc_curve(df_for_roc.interface, df_for_roc[predictor], drop_intermediate=False)
precision, recall, thresholds_PRC = precision_recall_curve(df_for_roc.interface, df_for_roc[predictor])
pr_auc = auc(recall, precision)
PR_AUC_dict[acc_db] = pr_auc
roc_auc = auc(fpr, tpr)
ROC_AUC_dict[acc_db] = roc_auc
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
xv_dict[acc_db] = {"fpr": fpr, "tpr": tpr, "roc_auc": roc_auc, "precision": precision, "recall": recall, "pr_auc": pr_auc}
# save dict as pickle
with open(auc_pkl, "wb") as f:
pickle.dump(xv_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
BO_data_df.to_csv(BO_curve_data_csv)
# parse BO data csv
# print out mean values
thoipapy.validation.bocurve.parse_BO_data_csv_to_excel(BO_curve_data_csv, bocurve_data_xlsx, s["n_residues_AUBOC_validation"], logging, predictor)
# ROC AUC validation
ROC_AUC_ser = pd.Series(ROC_AUC_dict)
ROC_AUC_ser.sort_values(inplace=True, ascending=False)
roc_auc_mean_list.append(ROC_AUC_ser.mean())
roc_auc_std_list.append(ROC_AUC_ser.std())
# precision-recall AUC validation
PR_AUC_ser = pd.Series(PR_AUC_dict)
PR_AUC_ser.sort_values(inplace=True, ascending=False)
# BO curve AUBOC validation
mean_o_minus_r_by_sample_ser = pd.read_excel(bocurve_data_xlsx, sheet_name="mean_o_minus_r_by_sample", index_col=0)["mean_o_minus_r_by_sample"].copy()
df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
df_o_minus_r.columns = pd.Series(df_o_minus_r.columns).replace(namedict)
df_o_minus_r_mean = df_o_minus_r.T.mean()
# df_o_minus_r_mean_df= pd.concat([df_o_minus_r_mean_df,df_o_minus_r_mean],axis=1, join="outer")
df_o_minus_r_mean_df[predictor] = df_o_minus_r_mean
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df_o_minus_r_mean.iloc[:s["n_residues_AUBOC_validation"]]
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
AUBOC_list.append(AUBOC)
AUBOC_from_complete_data_ser[predictor] = AUBOC
linechar_name_list.append(predictor)
AUC_AUBOC_name_list.append("{}-AUC".format(predictor))
AUC_AUBOC_name_list.append("{}-AUBOC".format(predictor))
thoipapy.figs.create_BOcurve_files.save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict,
logging, ROC_AUC_ser)
ROC_AUC_df[predictor] = ROC_AUC_ser
PR_AUC_df[predictor] = PR_AUC_ser
mean_o_minus_r_by_sample_df[predictor] = mean_o_minus_r_by_sample_ser
means_df = pd.DataFrame()
means_df["ROC_AUC"] = ROC_AUC_df.mean()
means_df["PR_AUC"] = PR_AUC_df.mean()
means_df["mean_o_minus_r_by_sample"] = mean_o_minus_r_by_sample_df.mean()
means_df["AUBOC_from_complete_data"] = AUBOC_from_complete_data_ser
""" means_df looks like this:
ROC_AUC PR_AUC AUBOC
THOIPA_5_LOO 0.629557 0.505823 1.202355
PREDDIMER 0.566582 0.416761 0.515193
TMDOCK 0.598387 0.421462 0.666720
"""
std_df = pd.DataFrame()
std_df["ROC_AUC"] = ROC_AUC_df.std()
std_df["PR_AUC"] = PR_AUC_df.std()
std_df["mean_o_minus_r_by_sample"] = mean_o_minus_r_by_sample_df.std()
SEM_df = pd.DataFrame()
SEM_df["ROC_AUC"] = ROC_AUC_df.std() / np.sqrt(ROC_AUC_df.shape[0])
SEM_df["PR_AUC"] = PR_AUC_df.std() / np.sqrt(PR_AUC_df.shape[0])
SEM_df["mean_o_minus_r_by_sample"] = mean_o_minus_r_by_sample_df.std() / np.sqrt(mean_o_minus_r_by_sample_df.shape[0])
with pd.ExcelWriter(indiv_validation_data_xlsx) as writer:
means_df.to_excel(writer, sheet_name="means")
std_df.to_excel(writer, sheet_name="std")
SEM_df.to_excel(writer, sheet_name="SEM")
ROC_AUC_df.to_excel(writer, sheet_name="ROC_AUC_indiv")
PR_AUC_df.to_excel(writer, sheet_name="PR_AUC_indiv")
# mean_o_minus_r_by_sample_df.to_excel(writer, sheet_name="BO_AUBOC_indiv")
mean_o_minus_r_by_sample_df.to_excel(writer, sheet_name="mean_o_minus_r_by_sample")
df_o_minus_r_mean_df.to_excel(writer, sheet_name="BO_o_minus_r")
if "TMDOCK" in PR_AUC_df.columns and "PREDDIMER" in PR_AUC_df.columns:
df_THOIPA_vs_others = pd.DataFrame()
df_THOIPA_vs_others["THOIPA_better_TMDOCK"] = PR_AUC_df[THOIPA_predictor_name] > PR_AUC_df.TMDOCK
df_THOIPA_vs_others["THOIPA_better_PREDDIMER"] = PR_AUC_df[THOIPA_predictor_name] > PR_AUC_df.PREDDIMER
df_THOIPA_vs_others["THOIPA_better_both"] = df_THOIPA_vs_others[["THOIPA_better_TMDOCK", "THOIPA_better_PREDDIMER"]].sum(axis=1) == 2
n_THOIPA_better_both = df_THOIPA_vs_others["THOIPA_better_both"].sum()
logging.info("THOIPA has higher precision-recall AUC than both TMDOCK and PREDDIMER for {}/{} proteins in {}".format(n_THOIPA_better_both, PR_AUC_df.shape[0], s["setname"]))
df_THOIPA_vs_others.to_excel(writer, sheet_name="THOIPA_vs_others")
# #sys.stdout.write(roc_auc_mean_list)
# AUBOC_mean_df = pd.DataFrame.from_records([AUBOC_list], columns=linechar_name_list)
# #AUBOC_mean_df.to_csv(mean_AUBOC_file)
# AUBOC_mean_df.to_excel(writer, sheet_name="AUBOC_mean")
# df_o_minus_r_mean_df.columns = linechar_name_list
# #ROC_AUC_df.columns = AUC_AUBOC_name_list
# ROC_AUC_df.index.name = "acc_db"
# #ROC_AUC_df.to_csv(AUC_AUBOC_file)
# THOIPA_best_set = s["THOIPA_best_set"]
#
# # AUC for barchart, 4 predictors, mean AUC of all proteins in dataset
# #logging.info("_finder : {}".format(mean_roc_auc_barchart_csv))
# AUC_4pred_mean_all_indiv_prot_df = pd.DataFrame(index = linechar_name_list)
# #AUC_4pred_mean_all_indiv_prot_df = pd.DataFrame([roc_auc_mean_list, roc_auc_std_list], index = linechar_name_list, columns=["mean", "std"])
# AUC_4pred_mean_all_indiv_prot_df["roc_auc_mean"] = roc_auc_mean_list
# AUC_4pred_mean_all_indiv_prot_df["roc_auc_std"] = roc_auc_std_list
# AUC_4pred_mean_all_indiv_prot_df["n"] = df_set.shape[0]
# AUC_4pred_mean_all_indiv_prot_df["SEM"] = AUC_4pred_mean_all_indiv_prot_df.roc_auc_std / AUC_4pred_mean_all_indiv_prot_df["n"].apply(np.sqrt)
# #AUC_4pred_mean_all_indiv_prot_df.to_csv(mean_roc_auc_barchart_csv)
# AUC_4pred_mean_all_indiv_prot_df.to_excel(writer, sheet_name="ROC_AUC_mean_indiv")
def create_indiv_validation_figs(s, logging, namedict, predictors, THOIPA_predictor_name, subsets):
perc_interf_vs_PR_cutoff_linechart_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/perc_interf_vs_PR_cutoff_linechart_data.csv"
indiv_validation_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/indiv_validation_data.xlsx"
indiv_validation_figs_dir: Path = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/figs"
make_sure_path_exists(indiv_validation_figs_dir)
indiv_ROC_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "indiv_ROC_AUC_barchart.png"
indiv_PR_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "indiv_PR_AUC_barchart.png"
AUBOC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "indiv_AUBOC_barchart.png"
BOCURVE_linechart_png: Union[Path, str] = indiv_validation_figs_dir / "BOcurve_linechart.png"
mean_ROC_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "mean_ROC_AUC_barchart.png"
mean_PR_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "mean_PR_AUC_barchart.png"
ROC_AUC_vs_PR_AUC_scatter_png: Union[Path, str] = indiv_validation_figs_dir / "ROC_AUC_vs_PR_AUC_scatter.png"
perc_interf_vs_PR_cutoff_linechart_png: Union[Path, str] = indiv_validation_figs_dir / "perc_interf_vs_PR_cutoff_linechart.png"
ROC_AUC_df = pd.read_excel(indiv_validation_data_xlsx, sheet_name="ROC_AUC_indiv", index_col=0)
PR_AUC_df = pd.read_excel(indiv_validation_data_xlsx, sheet_name="PR_AUC_indiv", index_col=0)
mean_o_minus_r_by_sample_df = pd.read_excel(indiv_validation_data_xlsx, sheet_name="mean_o_minus_r_by_sample", index_col=0)
df_o_minus_r_mean_df = pd.read_excel(indiv_validation_data_xlsx, sheet_name="BO_o_minus_r", index_col=0)
create_ROC_AUC_barchart(ROC_AUC_df, indiv_ROC_AUC_barchart_png, namedict, THOIPA_predictor_name)
create_PR_AUC_barchart(PR_AUC_df, indiv_PR_AUC_barchart_png, namedict, THOIPA_predictor_name)
create_barchart_o_minus_r_bocurve_each_TMD_indiv(mean_o_minus_r_by_sample_df, AUBOC_barchart_png, namedict, THOIPA_predictor_name)
create_BOcurve_linechart(df_o_minus_r_mean_df, BOCURVE_linechart_png)
create_mean_ROC_AUC_barchart(ROC_AUC_df, mean_ROC_AUC_barchart_png)
create_mean_PR_AUC_barchart(PR_AUC_df, mean_PR_AUC_barchart_png)
create_scatter_ROC_AUC_vs_PR_AUC(s, predictors, ROC_AUC_vs_PR_AUC_scatter_png)
# for the complete list of proteins
create_linechart_perc_interf_vs_PR_cutoff(s, predictors, perc_interf_vs_PR_cutoff_linechart_png, perc_interf_vs_PR_cutoff_linechart_data_csv)
# for each subset(e.g. ETRA) separately. Saved in "by_subset" subfolder
for subset in subsets:
perc_interf_vs_PR_cutoff_linechart_single_database_png: Union[Path, str] = indiv_validation_figs_dir / f"by_subset/{subset}_perc_interf_vs_PR_cutoff_linechart.png"
perc_interf_vs_PR_cutoff_linechart_single_database_data_csv: Union[Path, str] = Path(
s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/{subset}_perc_interf_vs_PR_cutoff_linechart_data.csv"
create_linechart_perc_interf_vs_PR_cutoff(s, predictors, perc_interf_vs_PR_cutoff_linechart_single_database_png, perc_interf_vs_PR_cutoff_linechart_single_database_data_csv, subset=subset)
logging.info("finished run_indiv_validation_THOIPA_PREDDIMER_TMDOCK")
def precision_recall_curve_rises_above_threshold(precision, recall, threshold=0.5):
"""Determines whether the PR curve rises above a threshold P and R value at any point.
BOTH precision and recall need to simultaneously be above the threshold at some stage, for
the result to be True.
Validation inspired by <NAME>., & <NAME>. (2010). Blind predictions of protein interfaces
by docking calculations in CAPRI. Proteins: Structure, Function and Bioinformatics, 78(15), 3085–3095.
https://doi.org/10.1002/prot.22850
Parameters
----------
precision : np.ndarray
array of precision values in precision-recall curve
recall : np.ndarray
array of recall values in precision-recall curve
threshold : float
minimum value that has to be attained by BOTH precision and recall
Returns
-------
PR_ rises_above_threshold : boolean
whether the thresholds were exceeded at any stage in the curve
Usage
-----
precision, recall, thresholds_PRC = precision_recall_curve(true_interface, prediction)
PR_rises_above_threshold = precision_recall_curve_rises_above_threshold(precision, recall)
"""
df_pr = pd.DataFrame()
df_pr["precision"] = precision
df_pr["recall"] = recall
df_pr = df_pr > threshold
df_pr = df_pr.all(axis=1)
if True in df_pr.tolist():
PR_rises_above_threshold = True
else:
PR_rises_above_threshold = False
return PR_rises_above_threshold
def create_scatter_ROC_AUC_vs_PR_AUC(s, predictors, ROC_AUC_vs_PR_AUC_scatter_png):
fig, ax = plt.subplots(figsize=(8, 8))
for predictor in predictors:
auc_pkl = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/roc_auc/{predictor}/ROC_AUC_data.pkl"
with open(auc_pkl, "rb") as f:
xv_dict = pickle.load(f)
roc_auc_list = []
pr_auc_list = []
for key in xv_dict:
roc_auc_list.append(xv_dict[key]["roc_auc"])
pr_auc_list.append(xv_dict[key]["pr_auc"])
ax.scatter(roc_auc_list, pr_auc_list, alpha=0.75, label=predictor)
lr = linregress(roc_auc_list, pr_auc_list)
yfit = np.array(roc_auc_list) * lr[0] + lr[1]
ax.plot(roc_auc_list, yfit)
ax.legend()
ax.set_xlabel("ROC_AUC")
ax.set_ylabel("PR_AUC")
fig.savefig(ROC_AUC_vs_PR_AUC_scatter_png)
def create_mean_ROC_AUC_barchart(ROC_AUC_df, mean_ROC_AUC_barchart_png):
# plt.close("all")
# # plt.rcParams.update({'font.size': 2})
# mean_roc_auc_name = [linechar_name_list[0],'\n{}\n'.format(linechar_name_list[1]),linechar_name_list[2],'\n{}\n'.format(linechar_name_list[3])]
# figsize = np.array([3.42, 3.42]) # DOUBLE the real size, due to problems on Bo computer with fontsizes
# fig, ax = plt.subplots(figsize=figsize)
# # replace the protein names
# x = y_pos = np.arange(len(linechar_name_list))
# plt.bar(x, roc_auc_mean_list, width=0.6, color = 'rgbk', alpha=0.5)
# plt.xticks(y_pos, mean_roc_auc_name,fontsize=6)
# plt.ylabel("performance value\n(mean auc)")
#
# #ax.set_ylabel("performance value\n(auc)")
# ax.set_ylim(0, 0.70)
# ax.legend() # (["sample size = 5", "sample size = 10"])
#
# fig.tight_layout()
# ax.grid(False)
# fig.savefig(mean_ROC_AUC_barchart_png, dpi=240)
figsize = np.array([3.42, 3.42]) # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
ROC_AUC_df.mean().plot(kind="bar", ax=ax)
ax.set_ylabel("performance value\n(mean auc)")
# ax.set_ylabel("performance value\n(auc)")
ax.set_ylim(0, 0.70)
ax.legend() # (["sample size = 5", "sample size = 10"])
fig.tight_layout()
ax.grid(False)
fig.savefig(mean_ROC_AUC_barchart_png, dpi=240)
def create_mean_PR_AUC_barchart(PR_AUC_df, mean_PR_AUC_barchart_png):
figsize = np.array([3.42, 3.42]) # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
PR_AUC_df.mean().plot(kind="bar", ax=ax)
ax.set_ylabel("performance value\n(mean auc)")
# ax.set_ylabel("performance value\n(auc)")
ax.set_ylim(0, 0.70)
ax.legend() # (["sample size = 5", "sample size = 10"])
ax.set_facecolor('white')
fig.tight_layout()
ax.grid(False)
fig.savefig(mean_PR_AUC_barchart_png, dpi=240)
def create_BOcurve_linechart(df_o_minus_r_mean_df, BOCURVE_linechart_png):
# BO_linechart_png
plt.close("all")
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
color_list = 'rgbk'
fig, ax = plt.subplots(figsize=figsize)
# for i,column in enumerate(df_o_minus_r_mean_df.columns):
# # df_o_minus_r_mean_df.plot(ax=ax, color="#0f7d9b", linestyle="-", label="prediction (AUBOC : {:0.2f}".format(AUBOC))
# label_name = "{}(AUBOC:{:.2f})".format(linechar_name_list[i] ,AUBOC_list[i])
# df_o_minus_r_mean_df[column].plot(ax=ax, linestyle="-",label=label_name, color = color_list[i])
df_o_minus_r_mean_df.plot(ax=ax)
ax.plot([1, 10], [0, 0], color="#0f7d9b", linestyle="--", label="random", alpha=0.5)
ax.grid(False)
ax.set_ylabel("fraction of correctly predicted residues\n(observed - random)", color="#0f7d9b")
ax.set_xlabel("number of TMD residues\n(sample size)")
ax.tick_params('y', colors="#0f7d9b")
ax.spines['left'].set_color("#0f7d9b")
ax.legend()
fig.tight_layout()
fig.savefig(BOCURVE_linechart_png, dpi=140)
def create_ROC_AUC_barchart(ROC_AUC_df, ROC_AUC_barchart_png, namedict, THOIPA_predictor_name):
# plt.rcParams.update({'font.size': 8})
# figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
figsize = np.array([9, 6]) # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
# replace the protein names based on the naming file
ROC_AUC_df.index = pd.Series(ROC_AUC_df.index).replace(namedict)
ROC_AUC_df.sort_values([THOIPA_predictor_name], ascending=False, inplace=True)
ROC_AUC_df.plot(kind="bar", ax=ax, alpha=0.7)
ax.set_ylabel("performance (ROC AUC)")
ax.legend(loc="upper right") # (["sample size = 5", "sample size = 10"])
fig.tight_layout()
ax.grid(False)
fig.savefig(ROC_AUC_barchart_png, dpi=240)
# fig.savefig(ROC_AUC_barchart_png[:-4] + ".pdf")
def create_PR_AUC_barchart(PR_AUC_df, indiv_PR_AUC_barchart_png, namedict, THOIPA_predictor_name):
# replace the protein names based on the naming file
PR_AUC_df.index = pd.Series(PR_AUC_df.index).replace(namedict)
# # plt.rcParams.update({'font.size': 8})
# # figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
# figsize = np.array([9, 6]) # DOUBLE the real size, due to problems on Bo computer with fontsizes
# fig, ax = plt.subplots(figsize=figsize)
# PR_AUC_df.sort_values([THOIPA_predictor_name], ascending=False, inplace=True)
# PR_AUC_df.plot(kind="bar", ax=ax, alpha=0.7)
# ax.set_ylabel("performance (precision recall AUC)")
# ax.legend(loc="upper right") # (["sample size = 5", "sample size = 10"])
# fig.tight_layout()
# ax.grid(False)
# fig.savefig(indiv_PR_AUC_barchart_png, dpi=240)
Width = 0.2
Fontsize = 14
for i in PR_AUC_df.index:
if "X-ray" in i:
PR_AUC_df.loc[i, "sort_list"] = 2
continue
if "NMR" in i:
PR_AUC_df.loc[i, "sort_list"] = 1
continue
if "ETRA" in i:
PR_AUC_df.loc[i, "sort_list"] = 0
continue
else:
raise ValueError("Neither X-ray, NMR, nor ETRA found in protein description within dataframe index.")
PR_AUC_df.sort_values(['sort_list', THOIPA_predictor_name], ascending=[True, False], inplace=True)
PR_AUC_df.rename(columns=lambda x: x.replace(THOIPA_predictor_name, 'THOIPA'), inplace=True)
PR_AUC_df.rename(columns=lambda x: x.replace('LIPS_surface_ranked', 'LIPS'), inplace=True)
PR_AUC_df.drop(['LIPS', "sort_list"], axis=1, inplace=True)
color_list = ["#E95D12", "#0065BD", "k", "#B4B3B3"]
fig, ax = plt.subplots(figsize=(13, 7))
x = list(range(0, len(PR_AUC_df), 1))
m = 0
for i, color in zip(PR_AUC_df.columns, color_list):
y = PR_AUC_df[i].tolist()
plt.bar(x, y, width=Width, label=i, color=color)
x = [i + Width for i in x]
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, ncol=31, loc=2, fontsize=Fontsize, frameon=True, bbox_to_anchor=(-0.0090, 1.15), facecolor='white', edgecolor="k")
plt.rcParams['xtick.labelsize'] = Fontsize
plt.rcParams['ytick.labelsize'] = Fontsize
ax.tick_params(axis='y', labelsize=Fontsize, pad=2)
ax.tick_params(axis='x', labelsize=Fontsize, pad=2)
x_label = list(range(0, len(PR_AUC_df), 1))
x_label = [i + Width for i in x_label]
ax.set_ylabel('performance (precision-recall AUC)', fontsize=Fontsize, labelpad=1)
ax.set_xticks(x_label)
ax.set_xticklabels(PR_AUC_df.index.tolist(), fontsize=Fontsize, rotation=90)
plt.ylim(0, 1.1)
fig.tight_layout()
plt.xlim(-0.5, 54)
plt.savefig(indiv_PR_AUC_barchart_png, bbox_extra_artists=(lgd,), bbox_inches='tight', dpi=300)
plt.savefig(str(indiv_PR_AUC_barchart_png)[:-4] + ".pdf", bbox_extra_artists=(lgd,), bbox_inches='tight', dpi=300)
plt.close()
def create_barchart_o_minus_r_bocurve_each_TMD_indiv(mean_o_minus_r_by_sample_df, AUBOC_barchart_png, namedict, THOIPA_predictor_name):
# AUC_AUBOC_df = AUBOC_df.T.sort_values(by=[THOIPA_predictor_name], ascending=False)
# plt.rcParams.update({'font.size': 8})
# figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
figsize = np.array([9, 6]) # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
## replace the protein names
# AUBOC_df.index = pd.Series(AUBOC_df.index).replace(namedict)
# replace old "crystal" references with "X-ray"
mean_o_minus_r_by_sample_df.index = pd.Series(mean_o_minus_r_by_sample_df.index).replace("crystal", "X-ray")
mean_o_minus_r_by_sample_df.sort_values([THOIPA_predictor_name], ascending=False, inplace=True)
mean_o_minus_r_by_sample_df.plot(kind="bar", ax=ax, alpha=0.7)
ax.set_ylabel("performance (AUBOC))")
ax.legend() # (["sample size = 5", "sample size = 10"])
fig.tight_layout()
ax.grid(False)
fig.savefig(AUBOC_barchart_png, dpi=240)
# fig.savefig(AUBOC_barchart_png[:-4] + ".pdf")
def merge_4_files_alignment_method_deprecated(acc, full_seq, train_data_file, THOIPA_prediction_file, PREDDIMER_prediction_file, TMDOCK_prediction_file, merged_data_xlsx_path, columns_kept_in_combined_file):
"""Deprecated method to merge predictions, with lots of checks to ensure sequences are the same.
Parameters
----------
acc
full_seq
train_data_file
THOIPA_prediction_file
PREDDIMER_prediction_file
TMDOCK_prediction_file
merged_data_xlsx_path
columns_kept_in_combined_file
Returns
-------
"""
all_files_exist = True
for path in [train_data_file, THOIPA_prediction_file, PREDDIMER_prediction_file, TMDOCK_prediction_file]:
if not os.path.isfile(path):
all_files_exist = False
sys.stdout.write("{} does not exist".format(path))
break
if not all_files_exist:
sys.stdout.write("\n{} skipped. Input file missing.".format(acc))
sys.stdout.flush()
# skip this protein
return None, None, None
df_train = pd.read_csv(train_data_file, index_col=0)
df_train.index = range(1, df_train.shape[0] + 1)
df_thoipa = pd.read_csv(THOIPA_prediction_file)
df_preddimer = pd.read_csv(PREDDIMER_prediction_file, index_col=0)
df_tmdock = pd.read_csv(TMDOCK_prediction_file, index_col=0)
if "closedist" in df_preddimer.columns:
df_preddimer.rename(columns={"closedist": "PREDDIMER"}, inplace=True)
if "closedist" in df_tmdock.columns:
df_tmdock.rename(columns={"closedist": "TMDOCK"}, inplace=True)
df_train_seq = df_train["residue_name"].str.cat()
df_thoipa_seq = df_thoipa["residue_name"].str.cat()
df_preddimer_seq = df_preddimer["residue_name"].str.cat()
df_tmdock_seq = df_tmdock["residue_name"].str.cat()
seqlist = [df_train_seq, df_thoipa_seq, df_preddimer_seq, df_tmdock_seq]
for seq in seqlist:
if seq not in full_seq:
sys.stdout.write("Sequence in residue_name column of dataframe is not found in the original df_set sequence."
"acc : {}\nTMD_seq : {}\nfull seq in df_set : {}\nall TM sequences in list : {}".format(acc, seq, full_seq, seqlist))
return None, None, None
df_train = thoipapy.utils.add_res_num_full_seq_to_df(acc, df_train, df_train_seq, full_seq)
df_thoipa = thoipapy.utils.add_res_num_full_seq_to_df(acc, df_thoipa, df_thoipa_seq, full_seq)
df_preddimer = thoipapy.utils.add_res_num_full_seq_to_df(acc, df_preddimer, df_preddimer_seq, full_seq)
df_tmdock = thoipapy.utils.add_res_num_full_seq_to_df(acc, df_tmdock, df_tmdock_seq, full_seq)
dfs = pd.DataFrame()
d = {}
d["df_train"] = df_train_seq
d["df_thoipa"] = df_thoipa_seq
d["df_preddimer"] = df_preddimer_seq
d["df_tmdock"] = df_tmdock_seq
dfs["seq"] = | pd.Series(d) | pandas.Series |
import re
import pandas as pd
def match_variables(variables, pattern, columns, value_name="capacity"):
"""Search through dictionary of variables, extracting data frame of values.
:param dict variables: dictionary, keys are strings, values are dictionaries
with key "Value" and float value.
:param str pattern: regex pattern to use to search for matching variables.
:param iterable columns: names to extract from match to data frame columns.
:param str value_name: define the column name of values, defaults to "capacity".
:return: (*pandas.DataFrame*) -- data frame of matching variables.
"""
prog = re.compile(pattern)
df = pd.DataFrame(
[
{
**{name: m.group(name) for name in columns},
value_name: list(variables[m.group(0)].values())[0],
}
for m in [
prog.match(v) for v in variables.keys() if prog.match(v) is not None
]
]
)
return df
def make_plant_indices(plant_ids, storage_candidates=None):
"""Make the indices for existing and hypothetical generators for input to Switch.
:param iterable plant_ids: plant IDs.
:param set storage_candidates: buses at which to enable storage expansion.
:return: (*dict*) -- keys are {'existing', 'expansion', 'storage'}, values are
lists of indices (str) for each sub-type.
"""
indices = {"existing": [f"g{p}" for p in plant_ids]}
indices["expansion"] = [f"{e}i" for e in indices["existing"]]
if storage_candidates is None:
indices["storage"] = []
else:
indices["storage"] = [f"s{b}i" for b in sorted(storage_candidates)]
return indices
def load_timestamps_to_timepoints(filename):
"""Read timestamps_to_timepoints csv file from the given file path using pandas.
:param str filename: path to the timestamps_to_timepoints csv.
:return: (*pandas.DataFrame*) -- a dataframe with of timepoints to a list containing
all the component timestamps.
"""
timestamps_to_timepoints = pd.read_csv(filename, index_col=0)
return timestamps_to_timepoints
def make_branch_indices(branch_ids, dc=False):
"""Make the indices of existing branch for input to Switch.
:param iterable branch_ids: list of original branch ids.
:param bool dc: branch_ids are for dclines or not, defaults to False.
:return: (*list*) -- list of branch indices for input to Switch
"""
return [f"{i}dc" if dc else f"{i}ac" for i in branch_ids]
def parse_timepoints(var_dict, variables, timestamps_to_timepoints, value_name):
"""Takes the solution variable dictionary contained in the output pickle
file of `switch` and un-maps the temporal reduction timepoints back into
a timestamp-indexed dataframe.
:param dict var_dict: a flat dictionary where the keys are a string
containing both variable names and variable indexes and the values
are a dictionary. This dictionary has a single key ("Value" for primal
variables, or "Dual" for dual variables) and the value is the data point for
that combination of variable name and indexes.
:param list variables: a list of timeseries variable strings to parse out
:param pandas.DataFrame timestamps_to_timepoints: data frame indexed by
timestamps with a column of timepoints for each timestamp.
:param str value_name: the name to assign to the variable values column.
:return (*dict*): a dictionary where the keys are the variable name strings
and the values are pandas dataframes. The index of these dataframes
are the timestamps contained in the timestamps_to_timepoints data frame.
The columns of these dataframes are a comma-separated string of the
parameters embedded in the key of the original input dictionary with
the timepoint removed and preserved order otherwise. If no variables
are found in the input dictionary, the value will be None.
"""
# Initialize final dictionary to return
parsed_data = {}
for key in variables:
# Parse out a dataframe for each variable
df = match_variables(
variables=var_dict,
pattern=(key + r"\[(?P<params>.*),(?P<timepoint>.*?)\]"),
columns=["params", "timepoint"],
value_name=value_name,
)
# If no such variable was found, set dataframe to None
if df.empty:
parsed_data[key] = None
continue
# Unstack such that the timepoints are the indices
df = df.set_index(["timepoint", "params"]).unstack()
# Cast timepoints as ints to match timestamps_to_timepoints
df.index = df.index.astype(int)
# Expand rows to all timestamps
df = df.loc[timestamps_to_timepoints["timepoint"]].set_index(
timestamps_to_timepoints.index
)
parsed_data[key] = df
return parsed_data
def recover_plant_indices(switch_plant_ids, ref_last_original_plant_id=None):
"""Recover the plant indices from Switch outputs.
:param iterable switch_plant_ids: Switch plant indices.
:param int ref_last_original_plant_id: last original plant id reference,
the final last original plant id will be the larger one between the last
original plant id from switch_plant_ids and the reference if provided.
:return: (*tuple*) -- a pair of pandas.Series objects for plant and storage
respectively. The plant series is indexed by original plant IDs (with new plants
added), values are Switch plant indices. The storage series is indexed by
the new plant IDs for storage, values are Switch plant indices.
"""
plant_ids, storage_ids = dict(), dict()
for ind in switch_plant_ids[::-1]:
if ind[-1] != "i":
last_original_plant_id = int(ind[1:])
break
if ref_last_original_plant_id is not None:
last_original_plant_id = max(last_original_plant_id, ref_last_original_plant_id)
cnt = 0
for ind in switch_plant_ids:
if ind[-1] != "i":
plant_ids[int(ind[1:])] = ind
else:
cnt += 1
if ind[0] == "s":
storage_ids[last_original_plant_id + cnt] = ind
else:
plant_ids[last_original_plant_id + cnt] = ind
return pd.Series(plant_ids), pd.Series(storage_ids, dtype=str)
def split_plant_existing_expansion(plant_ids):
"""Recover the existing plant indices from Switch outputs which contain both
existing and hypothetical indices.
:param iterable plant_ids: Switch plant indices.
:return: (*tuple*) --
a list of Switch IDs for existing plants.
a list of Switch IDs for expansion plants.
"""
existing_plant_ids = [p for p in plant_ids if re.search(r"g\d+$", p) is not None]
expansion_plant_ids = [p for p in plant_ids if re.search(r"g\d+i", p) is not None]
return existing_plant_ids, expansion_plant_ids
def recover_storage_buses(switch_storage_ids):
"""Recover the storage bus location from Switch storage indices.
:param iterable switch_storage_ids: Switch storage indices.
:return: (*list*) -- list of integers of storage bus IDs.
"""
return [int(re.search(r"s(\d+)i", s).group(1)) for s in switch_storage_ids]
def recover_branch_indices(switch_branch_ids):
"""Recover the branch indices from Switch outputs.
:param iterable switch_branch_ids: Switch branch indices.
:return: (*tuple*) -- a pair of pandas.Series objects for acline and dcline
respectively, which are indexed by original branch ids and values are
corresponding Switch branch indices.
"""
ac_branch_ids = dict()
dc_branch_ids = dict()
for ind in switch_branch_ids:
if ind[-2:] == "ac":
ac_branch_ids[int(ind[:-2])] = ind
else:
dc_branch_ids[int(ind[:-2])] = ind
return pd.Series(ac_branch_ids), | pd.Series(dc_branch_ids, dtype=str) | pandas.Series |
#Project: GBS Tool
# Author: Dr. <NAME>, <EMAIL>, denamics GmbH
# Date: January 16, 2018
# License: MIT License (see LICENSE file of this package for more information)
# Contains the main flow of the optimization as it is to be called from the GBSController.
import os
import time
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
from Analyzer.DataRetrievers.getBasecase import getBasecase
from Analyzer.DataRetrievers.getDataSubsets import getDataSubsets
from Analyzer.DataRetrievers.readNCFile import readNCFile
from Analyzer.PerformanceAnalyzers.getFuelUse import getFuelUse
from Analyzer.PerformanceAnalyzers.getPrimaryREContribution import getPrimaryREContribution
from Model.Operational.generateRuns import generateRuns
from Model.Operational.runSimulation import runSimulation
from Optimizer.FitnessFunctions.getFitness import getFitness
from Optimizer.OptimizationBoundaryCalculators.getOptimizationBoundaries import getOptimizationBoundaries
# DEV imports
class optimize:
'''
Main class of the optimization engine. This creates an object with all major pieces of optimization contained.
The constructor sets up much of the initial steps, such as selecting shorter data streams to work with, estimating
the interval for energy storage power and energy capacity to search for a solution in (if this bounding is desired).
The 'doOptimization' method executes the actual optimization routine (based on the selected configuration). The
results from each iteration are written to a separate folder for later analysis.
'''
def __init__(self, projectName, inputArgs):
'''
Constructor: does all necessary setup for the optimization itself:
1) it loads the configuration file optimizerCongfig<ProjectName>.xml and distributes required information to
pertinent variables.
2) it reads the base case for reference values and to retrieve immutable data streams (firmLoadP)
3) it determines the boundaries of the search space (for power and energy capacity, or as directed in the
config) for the optimization algorithms.
4) it finds shorter time-series representative of the data set to run faster simulations on. The method used is
based on the configuration file.
5) it calculates the base case's performnce with respect to the optimization objective given in the
configuration [Note, this is not strictly required for optimization, except that some meta data may be
retrieve in this step that later is used in the fitness calculations of actual simulation results.
:param projectName: [String] name of the project, used to locate project folder tree within the GBSProject
folder structure
:param inputArgs: [Array of strings] spare input, currently un-used.
'''
# Setup key parameters
self.thisPath = os.path.dirname(os.path.realpath(__file__))
self.projectName = projectName
self.rootProjectPath = os.path.join(self.thisPath, '../../GBSProjects/', self.projectName) # root path to project files relative to this file location
# Pull in inputArgs for potential later processing
self.inputArgs = inputArgs
# Load configuration from optimizerConfig<ProjectName>.xml
configFileName = 'optimizerConfig' + self.projectName + '.xml'
configPath = os.path.join(self.rootProjectPath, 'InputData/Setup/', configFileName)
configFile = open(configPath, 'r')
configFileXML = configFile.read()
configFile.close()
configSoup = bs(configFileXML, "xml")
self.searchMethod = configSoup.optimizationMethod.get('value')
self.optimizationObjective = configSoup.optimizationObjective.get('value')
self.dataReductionMethod = configSoup.dataReductionMethod.get('value') # should be 'RE-load-one-week'
self.boundaryMethod = configSoup.optimizationEnvelopeEstimator.get('value') # should be 'variableSRC'
# Bins for reference parameters and current best performing
# both will be written below with actual initial values, but are placed here so they don't get lost
self.basePerformance = 0
self.currentBestPerformance = 0
# Retrieve data from base case (Input files)
self.time, self.firmLoadP, self.varLoadP, self.firmGenP, self.varGenP, self.allGen, self.baseComponents = \
getBasecase(self.projectName, self.rootProjectPath)
# Calculate boundaries for optimization search
# Get boundary constraints from config file, since this may vary from method to method, these are then lumped
# into a list 'constraints' that is passed through to the appropriate algorithm
opBndMethodConfig = configSoup.find(self.boundaryMethod + 'Config')
opBndMethodConfigChildren = opBndMethodConfig.findChildren()
constraints = list()
for child in opBndMethodConfigChildren:
constraints.append(child.get('value'))
self.minESSPPa, self.maxESSPPa, self.minESSEPa, self.maxESSEPa = \
getOptimizationBoundaries(self.boundaryMethod, self.time, self.firmLoadP, self.varLoadP, self.firmGenP,
self.varGenP, constraints)
# Get the short test time-series
reductionInput = \
pd.DataFrame({'time':self.time, 'firmLoadP':self.firmLoadP, 'varGenP':self.varGenP})#, index=self.time)
self.abbrevDatasets, self.abbrevDatasetWeights = getDataSubsets(reductionInput, self.dataReductionMethod, otherInputs=[])
# Setup optimization runs
# Branch based on input from 'configSoup'->optimizationObjective
# Get base case KPI based on optimization objective
# Any of the following if-branches needs to write to self.basePerformance with the reference KPI based on the
# optimization objective
# Futurefeature: retrieve basePerformance of abbreviated data sets instead of full base case for direct comparability with optimization iteration outputs.
if self.optimizationObjective == 'maxREContribution':
# Calculate base case RE contribution
self.basePerformance = getPrimaryREContribution(self.time, self.firmLoadP, self.firmGenP, self.varGenP)
elif self.optimizationObjective == 'minFuelUtilization':
# Calculate base case fuel consumption
# Need to load fuel curves for this
genFleetList = list(self.allGen.columns.values)
genFleetList.remove('time')
genFleet = list()
for gen in genFleetList:
genFleet.append(gen[:-1])
self.fuelCurveDataPoints = pd.DataFrame(index = genFleet, columns = ['fuelCurve_pPu','fuelCurve_massFlow','POutMaxPa'])
for genString in genFleet:
genPath = os.path.join(self.rootProjectPath, 'InputData/Components/', genString + 'Descriptor.xml')
genFile = open(genPath, 'r')
genFileXML = genFile.read()
genFile.close()
genSoup = bs(genFileXML, "xml")
self.fuelCurveDataPoints.loc[genString, 'fuelCurve_pPu'] = genSoup.fuelCurve.pPu.get('value')
self.fuelCurveDataPoints.loc[genString, 'fuelCurve_massFlow'] = genSoup.fuelCurve.massFlow.get('value')
self.fuelCurveDataPoints.loc[genString, 'POutMaxPa'] = genSoup.POutMaxPa.get('value')
self.genAllFuelUsedBase, self.fuelStatsBase = getFuelUse(self.allGen, self.fuelCurveDataPoints)
self.basePerformance = self.fuelStatsBase['total']
else:
raise ValueError('Unknown optimization objective, %s, selected.' % self.optimizationObjective)
# Since we do not have a better performing set of sims yet, make basePerformance best performing.
self.currentBestPerformance = self.basePerformance
# Retrieve additional optimization config arguments for the selected algorithm
self.optimizationConfig = dict()
opAlgConfig = configSoup.find(self.searchMethod + 'Config')
opAlgConfigChildren = opAlgConfig.findChildren()
for child in opAlgConfigChildren:
self.optimizationConfig[child.name] = child.get('value')
#print(self.optimizationConfig)
def doOptimization(self):
'''
Interface to dispatch specified optimization algorithm. Returns a value error if the string in self.searchMethod
does not match a known optimization method.
Currently, only hillClimber is implemented.
:return:
'''
#print(self.searchMethod)
if self.searchMethod == 'hillClimber':
# call hillClimber function
self.hillClimber()
# FUTUREFEATURE: add further optimization methods here
else:
raise ValueError('Unknown optimization method, %s, selected.' % self.searchMethod)
def hillClimber(self):
'''
Adaptive hill climber method for optimization of EES power and energy capacity.
:return: nothing - writes to object-wide variables.
'''
maxIterNumber = int(float(self.optimizationConfig['maxRunNumber']))
convergenceRepeatNum = int(float(self.optimizationConfig['convergenceRepeatNum']))
convergenceFlag = False
# Select starting configuration at random, within the given bounds.
# The power level can be chose freely between the previously determined bounds.
self.essPPa = int(self.minESSPPa + (self.maxESSPPa - self.minESSPPa)*np.random.random_sample())
# The energy capacity must meet at least the minimum duration requirement, and cannot exceed the maximum.
self.essEPa = float(self.essPPa * (self.minESSEPa/self.minESSPPa) +
(self.maxESSEPa - (self.essPPa * (self.minESSEPa/self.minESSPPa))) * np.random.random_sample())
print(['Initial guess: ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(self.essEPa) + ' kWh.'])
# We want to make set numbers congruent with iteration numbers, a unique additional identifier needs to be added to the 'SetX' folder names.
# We'll use current unix time to the second as the unique identifier. Thus, if the 'Set0' directory already
# exists, we will name all directories of this optimization run as 'Set[IterationNumber].[snippetIdx].[identifier]', where
# 'identifier' is the current unix time rounded to the nearest second.
identifier = str(int(time.time()))
# Get the index for the ESS to be added to the system
essIdx = self.essExists()
# Create bins for fitness tracking
self.fitness = None
fitnessLog = pd.DataFrame(index = pd.Index(range(0, maxIterNumber)), columns = ['fitness', 'essPPa', 'essEPa', 'bestFitness', 'bestP', 'bestE'])
for iterIdx in range(0, maxIterNumber):
#time.sleep(1)
if not convergenceFlag:
# Need the abbreviate data designators to retrieve start and stop time stamp indicies
snippetIdx = self.abbrevDatasets.index.levels[0]
setIdx = 0
setPathList = list()
setNameList = list()
firmLoadsDF = pd.DataFrame()
#Create setup file for each of the six simulations
for sIdx in snippetIdx:
# Write the sets attributes, create the directory, etc.
startTimeIdx = self.abbrevDatasets.loc[sIdx].index[0]
endTimeIdx = self.abbrevDatasets.loc[sIdx].index[-1]
setPath, setName = self.setupSet(iterIdx, setIdx, identifier, essIdx, self.essPPa, self.essEPa, startTimeIdx, endTimeIdx)
setPathList.append(setPath)
setNameList.append(setName)
# Generate runs
print('Iteration ' + str(iterIdx) + ', Snippet ' + str(sIdx) + ' simulation dispatched.')
generateRuns(setPath)
# Dispatch simulations
runSimulation(setPath)
# Pass through firm load data
firmLoadsDF['firmLoadP.' + str(setIdx)] = self.abbrevDatasets.loc[sIdx]['firmLoadP'][:-1].values
firmLoadsDF['firmLoadTime.' + str(setIdx)] = self.abbrevDatasets.loc[sIdx]['time'][:-1].values
setIdx = setIdx + 1
print('Iteration '+ str(iterIdx) +', Snippet ' + str(sIdx) + ' completed.')
# Get KPIs
# Collect data: we need to pull all the pertinent data for KPI calculation together from the results
# in the set folders.
self.resultsDF, resultsMetaInfo = self.collectResults(setPathList, setNameList)
self.resultsDF = pd.concat([self.resultsDF, firmLoadsDF], axis = 1)
# Get the new fitness value
newFitness = getFitness(self.optimizationObjective, self.rootProjectPath, self.resultsDF, self.abbrevDatasetWeights, resultsMetaInfo)
# Log progress
fitnessLog['fitness'].loc[iterIdx] = newFitness
fitnessLog['essPPa'].loc[iterIdx] = self.essPPa
fitnessLog['essEPa'].loc[iterIdx] = self.essEPa
# Get KPIs
# TODO complete getRunMetaData(setPath, [0]) once getRunMetaData is completed. This might also be run then
# prior to getFitness and save some effort.
# Ascertain fitness
# First iteration just write the values
if not self.fitness:
self.fitness = newFitness
self.essPPaBest = self.essPPa
self.essEPaBest = self.essEPa
# Random next guess: The power level can be chose freely between the previously determined bounds.
self.essPPa = int(self.minESSPPa + (self.maxESSPPa - self.minESSPPa) * np.random.random_sample())
# The energy capacity must meet at least the minimum duration requirement, and cannot exceed the maximum.
self.essEPa = float(self.essPPa * (self.minESSEPa / self.minESSPPa) +
(self.maxESSEPa - (self.essPPa * (
self.minESSEPa / self.minESSPPa))) * np.random.random_sample())
print(['Iteration: '+ str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Set the improvement tracker
lastImprovement = 1
# Other iterations check if fitness has improved (that is, has gotten smaller!!!)
elif newFitness < self.fitness:
self.fitness = newFitness
self.essPPaBest = self.essPPa
self.essEPaBest = self.essEPa
self.essPPa, self.essEPa = self.getNextGuess(fitnessLog, self.essPPaBest, self.essEPaBest, iterIdx)
print(['Iteration: ' + str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(
self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Reset the improvement tracker
lastImprovement = 1
# Lastly if nothing has improved search again in the previously defined range.
else:
# Widen the random number deviation
self.essPPa, self.essEPa = self.getNextGuess(fitnessLog, self.essPPaBest, self.essEPaBest, iterIdx/lastImprovement) #np.sqrt(lastImprovement + 1))
# Increment the improvement tracker
lastImprovement = lastImprovement + 1
# If there's no improvement after X iterations in a row, terminate the algorithm.
# NOTE this can mean two things, either that we have achieved convergence, or that we're stuck somewhere
if lastImprovement > convergenceRepeatNum:
convergenceFlag = True
print('*********************************')
print('Terminated at Iteration: ' + str(iterIdx) + ' with fitness: ' + str(self.fitness))
print(['Iteration: ' + str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(
self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Additional logging
fitnessLog['bestFitness'].loc[iterIdx] = self.fitness
fitnessLog['bestP'].loc[iterIdx] = self.essPPaBest
fitnessLog['bestE'].loc[iterIdx] = self.essEPaBest
self.fl = fitnessLog
def getNextGuess(self, fl, pBest, eBest, iterNumParam):
'''
This method determines the next values for `essPPa` and `essEPa` that are to be tested in an iteration of the
hill climber. It uses the historical fitness values from previous iterations and determines the direction of the
steepest gradient away from the best fitness value. It then biases the random selection for new power and energy
capacity values in the _opposite_ direction of the steepest gradient with the hope that this is the most likely
direction to find a better value pair at. If new selections are outside of the constraints put on the search
space, i.e., maximum and minimum power and energy capacities, and/or minimum duration (at the essPPa selected),
it corrects selections back to the edges of the search envelope as set by the constraints.
If the more iterations in the past the best found fitness lies, the stronger the random element in picking new
values. The idea being that the algorithm might be stuck and larger jumps might get it unstuck.
**Note:** this approach to a hill climber was tested with several test functions
(found in getFitness.py->getTestFitness). With these test functions the algorithm generally converges well.
The caveat is, that recent results seem to suggest that the actual search space for the optimal GBS may not be
smooth, while the test cases used smooth test functions. This should be investigated further.
:param fl: fitnessLog
:param pBest: essPPaBest: current best power guess for GBS
:param eBest: essEPaBest: current best energy guess for GBS
:param iterNumParam: [float] parameter describing the randomness of the next value pair selection, fraction of
iteration number and count since the last improved fitness value was found.
:return: newESSPPa, newESSEPa: [float] new pair of energy and power capacities to run the next iteration with
'''
# Reduce the data in fl to the necessary columns and usable values
fl = fl[['fitness', 'essPPa', 'essEPa']]
fl = fl.dropna()
# Parameter used to adjust variability/randomization of next guess
# TODO make adjustable parameter
exponent = 0.5
# Calculate distance from best point
fl['Dist'] = pd.Series(np.sqrt(list(np.asarray(fl['essPPa'] - pBest)**2 + np.asarray(fl['essEPa'] - eBest)**2)))
fl = fl.sort_values('Dist')
originFitness = fl['fitness'].iloc[0]
originP = fl['essPPa'].iloc[0]
originE = fl['essEPa'].iloc[0]
print('Origin P: ' + str(originP) + ', Origin E: ' + str(originE))
fl = fl[fl.Dist != 0]
fl['Slope'] = (fl['fitness'] - originFitness)/fl['Dist']
# Get the difference in power-coordinate DOWN the steepest gradient of the four nearest neighbors
if fl.shape[0] == 1:
maxSlopeIdx = fl['Slope'].astype(float).index[0]
elif fl.shape[0] < 3:
maxSlopeIdx = fl['Slope'].astype(float).idxmax()
else:
maxSlopeIdx = fl['Slope'][0:2].astype(float).idxmax()
dx = fl['essPPa'][maxSlopeIdx] - originP
newCoord = originP - dx
# Get random down and up variations from the power-coordinate
rndDown = (newCoord - self.minESSPPa) * np.random.random_sample()/iterNumParam**exponent
rndUp = (self.maxESSPPa - newCoord)*np.random.random_sample()/iterNumParam**exponent
newESSPPa = float(newCoord - rndDown + rndUp)
# Check constraints
if newESSPPa < self.minESSPPa:
newESSPPa = self.minESSPPa
elif newESSPPa > self.maxESSPPa:
newESSPPa = self.maxESSPPa
# Get a random new value of energy storage capacity
# Get the difference in power-coordinate DOWN the steepest gradient
#maxSlopeIdx = fl.index[1]
dy = fl['essEPa'][maxSlopeIdx] - originE
newCoordY = originE - dy
# Get random down and up variations from the power-coordinate
# Note that ess needs to meet minimum duration requirement, so the minimum size is constraint by the currently
# selected power level.
currentESSEMin = newESSPPa * (self.minESSEPa/self.minESSPPa)
rndDown = (newCoordY - currentESSEMin) * np.random.random_sample() / iterNumParam**exponent
rndUp = (self.maxESSEPa - newCoordY) * np.random.random_sample() / iterNumParam**exponent
newESSEPa = float(newCoordY - rndDown + rndUp)
# Check constraints
if newESSEPa < currentESSEMin:
newESSEPa = currentESSEMin
elif newESSEPa > self.maxESSEPa:
newESSEPa = self.maxESSEPa
return newESSPPa, newESSEPa
def setupSet(self, iterIdx, setIdx, identifier, eesIdx, eesPPa, eesEPa, startTimeIdx, endTimeIdx):
'''
Generates the specific projectSetAttributes.xml file, and the necessary folder in the project's output folder.
Returns the name of the specific set and it's absolute path. Set naming follows the convention of
'Set[iterationNumber].[snippetNumber].[currentUNIXEpoch]', where iterationNumber is the current iteration of the
of the optimizer, snippetNumber is the numerical identifier of the abbreviated data snippet, and the
currentUNIXEpoch is the current local machine unix time to the second in int format.
:param iterIdx: [int] current iteration of optimization algorithm
:param setIdx: [int] numerical identifier of the snippet of time-series to be run here.
:param identifier: [int] current local machine UNIX time to the second, could be any other integer
:param eesIdx: [int] index of the ees to be added to the system, e.g., ees0. This is necessary should the system
already have an ees that is not part of the optimization.
:param eesPPa: [float] nameplate power capacity of the ees, assumed to be symmetrical in and out.
:param eesEPa: [float] nameplate energy capacity of the ees, necessary to calculate ratedDuration, which is the
actual parameter used in the setup.
:param startTimeIdx: [int] index of the time stamp in the master-time series where the snippet of data starts
that is to be run here.
:param endTimeIdx: [int] index of the time stamp in the master-time series where the snippet of data ends that
is to be run here.
:return setPath: [os.path] path to the set folder
:return setName: [String] name of the set
'''
# Get the current path to avoid issues with mkdir
here = os.path.dirname(os.path.realpath(__file__))
# * Create the 'SetAttributes' file from the template and the specific information given
# Load the template
setAttributeTemplatePath = os.path.join(here, '../GBSModel/Resources/Setup/projectSetAttributes.xml')
setAttributeTemplateFile = open(setAttributeTemplatePath, 'r')
setAttributeTemplateFileXML = setAttributeTemplateFile.read()
setAttributeTemplateFile.close()
setAttributeSoup = bs(setAttributeTemplateFileXML, 'xml')
# Write the project name
setAttributeSoup.project['name'] = self.projectName
# Write the power levels and duration
compNameVal = 'ees' + str(eesIdx) + ' ees' + str(eesIdx) + ' ees' + str(eesIdx)
compTagVal = 'PInMaxPa POutMaxPa ratedDuration'
compAttrVal = 'value value value'
rtdDuration = int(3600*(eesEPa/eesPPa))
compValueVal = str(eesPPa) + ' PInMaxPa.value ' + str(rtdDuration)
setAttributeSoup.compAttributeValues.compName['value'] = compNameVal
setAttributeSoup.compAttributeValues.find('compTag')['value'] = compTagVal # See issue 99 for explanation
setAttributeSoup.compAttributeValues.compAttr['value'] = compAttrVal
setAttributeSoup.compAttributeValues.compValue['value'] = compValueVal
# Write additional information regarding run-time, time resolution, etc.
setupTagVal = 'componentNames runTimeSteps timeStep'
setupAttrVal = 'value value value'
componentNamesStr = 'ees' + str(eesIdx) + ',' + ','.join(self.baseComponents)
setupValueVal = componentNamesStr + ' ' + str(startTimeIdx) + ',' + str(endTimeIdx) + ' ' + str(1)
setAttributeSoup.setupAttributeValues.find('setupTag')['value'] = setupTagVal
setAttributeSoup.setupAttributeValues.setupAttr['value'] = setupAttrVal
setAttributeSoup.setupAttributeValues.setupValue['value'] = setupValueVal
# Make the directory for this set
setName = 'Set' + str(iterIdx) + '.' + str(setIdx) + '.' + str(identifier)
setPath = os.path.join(self.rootProjectPath, 'OutputData/' + setName)
os.mkdir(setPath)
filename = self.projectName + setName + 'Attributes.xml'
setPathName = os.path.join(setPath, filename)
with open(setPathName, 'w') as xmlfile:
xmlfile.write(str(setAttributeSoup))
xmlfile.close()
return setPath, setName
def essExists(self):
'''
Checks if the system setup already contains one or more ESS components; looks for the largest index of those
components, and returns it as the index for the ESS used in optimization.
:return: essIdx
'''
# We also need to determine the unique name for the ess. Normally, this should be ess0. However, in the rare
# situation that ess0 (and essX for that matter) already exists, we need to make sure we pick an available
# numeric identifier
# Load configuration from optimizerConfig<ProjectName>.xml
setupFileName = self.projectName + 'Setup.xml'
setupPath = os.path.join(self.rootProjectPath, 'InputData/Setup/', setupFileName)
setupFile = open(setupPath, 'r')
setupFileXML = setupFile.read()
setupFile.close()
setupSoup = bs(setupFileXML, "xml")
components = setupSoup.componentNames.get('value').split()
essComps = [comp for comp in components if comp.startswith('ees')]
essNum = []
for num in essComps:
essNum.append(int(num[3:]))
if not essNum:
essNumMax = 0
else:
essNumMax = max(essNum)
essIdx = essNumMax
return essIdx
def collectResults(self, setPathList, setNameList):
'''
TODO document
:param setPathList:
:return resultsDF:
'''
# Get the current path to avoid issues with file locations
#here = os.path.dirname(os.path.realpath(__file__))
resultsDF = pd.DataFrame()
for setIdx in range(0, len(setPathList)):
# Get power channels for all components in the configuration
# Get the component list from Attributes.xml file
setAttrFile = open(os.path.join(setPathList[setIdx], self.projectName + setNameList[setIdx] + 'Attributes.xml'), 'r')
setAttrXML = setAttrFile.read()
setAttrFile.close()
setAttrSoup = bs(setAttrXML, 'xml')
setAttrVal = setAttrSoup.setupAttributeValues.setupValue.get('value')
components = setAttrVal.split(' ')[0].split(',')
for component in components:
try:
ncChannel = readNCFile(os.path.join(setPathList[setIdx], 'Run0/OutputData/', component + 'P' + setNameList[setIdx] + 'Run0.nc'))
resultsDF[component + 'Time' + '.' + str(setIdx)] = pd.Series(np.asarray(ncChannel.time))
resultsDF[component + 'P' + '.' + str(setIdx)] = pd.Series(np.asarray(ncChannel.value))
except Exception:
pass
# Well also extract the list of generators used from the component list (needed for fuel calcs)
genList = list()
for component in components:
if component[0:3] == 'gen':
genList.append(component)
resultsMetaInfo = pd.DataFrame()
resultsMetaInfo['setPathList'] = setPathList
resultsMetaInfo['setNameList'] = setNameList
resultsMetaInfo['genList'] = | pd.Series(genList) | pandas.Series |
import os
import gc
import argparse
import pandas as pd
import numpy as np
import keras.backend as K
from keras.preprocessing.image import Iterator
from src.data.category_idx import map_categories
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
from keras.layers import Input
from keras.layers import merge
from keras.models import Model
from keras.initializers import Ones
from keras.optimizers import Adam
from keras.models import load_model
from keras.constraints import non_neg
N_CATEGORIES = 5270
CATEGORIES_SPLIT = 2000
MODEL_FILE = 'model.h5'
VALID_PREDICTIONS_FILE = 'valid_predictions.csv'
TOP_K = 10
class SpecialIterator(Iterator):
def __init__(self, images, categories, n_models, batch_size=32, shuffle=True, seed=None):
self.x = images
self.products = images[['product_id', 'img_idx']].drop_duplicates().sort_values(['product_id', 'img_idx'])
self.categories = categories.sort_index()
self.num_classes = N_CATEGORIES
self.samples = self.products.shape[0]
self.n_models = n_models
super(SpecialIterator, self).__init__(self.samples, batch_size, shuffle, seed)
def next(self):
index_array = next(self.index_generator)[0]
prods = self.products.iloc[index_array]
pd = {(row.product_id, row.img_idx): i for i, row in enumerate(prods.itertuples())}
cats = self.categories.loc[prods.product_id]
images = prods.merge(self.x, on=['product_id', 'img_idx'], how='left')
p = np.zeros((len(index_array), self.num_classes, self.n_models), dtype=np.float32)
for row in images.itertuples():
p[pd[(row.product_id, row.img_idx)], row.category_idx, row.model] = 0 if np.isnan(row.prob) else row.prob
return [np.repeat(np.arange(self.n_models).reshape(1, self.n_models), len(index_array), axis=0),
p[:, :CATEGORIES_SPLIT, :], p[:, CATEGORIES_SPLIT:, :]], cats['category_idx'].as_matrix()
def train_ensemble_nn(preds_csv_files, prod_info_csv, category_idx_csv, model_dir, lr, seed, batch_size, epochs):
prod_info = pd.read_csv(prod_info_csv)
category_idx = pd.read_csv(category_idx_csv)
all_preds = []
model_inx = {}
for i, csv in enumerate(preds_csv_files):
preds = pd.read_csv(csv)
preds['model'] = i
model_inx[i] = csv
all_preds.append(preds)
print('Assigned indexes to models: ', model_inx)
all_preds = pd.concat(all_preds)
n_models = len(preds_csv_files)
categories = prod_info[prod_info.product_id.isin(all_preds.product_id.unique())][['product_id', 'category_id']]
categories['category_idx'] = map_categories(category_idx, categories.category_id)
categories = categories[['product_id', 'category_idx']]
categories = categories.set_index('product_id')
it = SpecialIterator(all_preds, categories, n_models, batch_size=batch_size, seed=seed, shuffle=True)
model_file = os.path.join(model_dir, MODEL_FILE)
if os.path.exists(model_file):
model = load_model(model_file)
else:
model_inp = Input(shape=(n_models,), dtype='int32')
preds_cat1_inp = Input((CATEGORIES_SPLIT, n_models))
preds_cat2_inp = Input((N_CATEGORIES - CATEGORIES_SPLIT, n_models))
mul_cat1 = Embedding(n_models, 1, input_length=n_models, embeddings_initializer=Ones(),
embeddings_constraint=non_neg())(model_inp)
mul_cat1 = Flatten()(mul_cat1)
mul_cat2 = Embedding(n_models, 1, input_length=n_models, embeddings_initializer=Ones(),
embeddings_constraint=non_neg())(model_inp)
mul_cat2 = Flatten()(mul_cat2)
def op(x):
z_left = x[0].dimshuffle(1, 0, 2) * x[1]
z_right = x[2].dimshuffle(1, 0, 2) * x[3]
z = K.concatenate([z_left, z_right], axis=0)
v = K.sum(z, axis=-1)
p = K.sum(v, axis=-2)
return (v / p).dimshuffle(1, 0)
x = merge([preds_cat1_inp, mul_cat1, preds_cat2_inp, mul_cat2], mode=op, output_shape=(N_CATEGORIES,))
model = Model([model_inp, preds_cat1_inp, preds_cat2_inp], x)
np.random.seed(seed)
model.compile(optimizer=Adam(lr=lr), loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
model.fit_generator(it, steps_per_epoch=it.samples / it.batch_size, epochs=epochs)
print('First {} categories model weights:'.format(CATEGORIES_SPLIT))
print(model.get_layer('embedding_1').get_weights())
print('Left categories model weights:'.format(CATEGORIES_SPLIT))
print(model.get_layer('embedding_2').get_weights())
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
model.save(os.path.join(model_dir, MODEL_FILE))
def predict_valid(preds_csv_files, prod_info_csv, category_idx_csv, model_dir, batch_size):
model_file = os.path.join(model_dir, MODEL_FILE)
if os.path.exists(model_file):
model = load_model(model_file)
else:
raise ValueError("Model doesn't exist")
prod_info = pd.read_csv(prod_info_csv)
category_idx = | pd.read_csv(category_idx_csv) | pandas.read_csv |
#!/usr/bin/python3
from argparse import ArgumentParser
import urllib.request
import pandas as pd
import re
import os
import datetime
from time import sleep
def make_dir():
if not os.path.exists('DATABASE'):
os.makedirs('DATABASE')
def get_content(url, year):
try:
request = urllib.request.Request(url)
response = urllib.request.urlopen(request, timeout=20)
if year >= 2018:
content = response.read().decode('utf-8')
else:
content = response.read().decode('windows-1252')
except Exception as e:
print(e)
sleep(10)
content = ''
return content
def parse_content(content, name, label, year):
author_list = []
abstract_list = []
pattern1 = re.compile(r'<dt class="ptitle"><br><a href="(.*?)">(.*?)</a></dt>')
pattern2 = re.compile(r'\[<a href="(.*?)">pdf</a>\]')
items1 = re.findall(pattern1, content)
items2 = re.findall(pattern2, content)
df1 = | pd.DataFrame(items1, columns=['html', 'title']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This script runs the data extraction, data cleaning, calculations and visualization
scripts used in "Regionalized footprints of battery electric vehicles in Europe"
Users can run two types of experiments concurrently; the electricity sampling period
('el_experiments'), and vehicle parameters ('BEV_experiments')
"""
import logging
from datetime import datetime, timezone, timedelta
import pytz
import pandas as pd
import openpyxl
from openpyxl.styles import Font, Alignment
import numpy as np
import itertools
import bentso_extract
import entso_data_clean
import hybridized_impact_factors as hybrid
import BEV_footprints_calculations as calc
import BEV_visualization as viz
logname = 'run ' + datetime.now().strftime('%d-%m-%y, %H-%M') + '.log'
logging.basicConfig(filename=logname,
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
if __name__ == "__main__":
# Define experiment running parameters
new_query = 0 # trigger for re-querying ENTSO-E database (takes time!)
calc_hybrid = 1 # trigger for re-calculating hybridized impact factors (takes time!); 'fresh start' runs need value 1
segments = ['A', 'C', 'JC', 'JE', 'mini', 'medium', 'compact SUV', 'mid-size SUV'] # valid entries for segment
segment_dict = {'mini': 'A', 'medium':'C', 'compact SUV':'JC', 'mid-size SUV':'JE'} # long-form to short form segments
# definition of minimum data for each experiment type
exp_type_dict = {'fullyear':set(['year']),
'subyear': set(['year', 'start', 'end']),
'country_fp': set(['year', 'start', 'country', 'segment'])
}
# define time period for experiment sampling (for ENTSO-E querying)
# provide start and end period for subannual analysis, or just start for a country footprint
start = datetime(2021, 5, 14, 22, 44, tzinfo=pytz.timezone('Europe/Berlin')) # year, month, day, hour, minute, timezone
end = datetime(2021, 5, 14, 23, 00, tzinfo=pytz.timezone('Europe/Berlin'))
if start.year != end.year:
logging.error('Cannot do cross-year comparisons!')
# define electricity experiments (dict of dicts: {experiment_name: {experiment_params}})
# accepted forms for experiment_params in el experiments (examples below):
# -- 'year': single-element list with the full year of analysis
# -- 3-item dict with year, start and stop periods as datetime objects
# -- 4-item dict with year, time of assessment (datetime object), country and vehicle segment of desired footprint
el_experiments = {'2020': {'year': 2020},
# '2021': {'year': start.year, 'start': start, 'end': end}, # example subannual period analysis
# '2021fp': {'year': start.year, 'start':start, 'country':'FR', 'segment':'C'} # example, single-country footprint (Figure 1)
}
# define BEV experiments and parameter values (dict of dicts: {experiment_name: {experiment_params}})
# accepted values for experiment_params:
# BEV_life: int/float. Lifetime of BEV vehicles, in km
# ICE_life: int/float. Lifetime of ICEV vehicles, in km
# flowtrace: True/False. Assume flowtrace electricity trade assumption or grid average assumption (False)
# allocation: True/False. Perform allocation to EOL of batteries where battery life (20% capacity fade) exceeds that of vehicle
# energy_scen: str or False. Use different assumption for battery production electricity demand (False is default). str is experiment name, defined in Excel sheet.
BEV_experiments = {'baseline': {'BEV_life':180000, 'ICE_life':180000, 'flowtrace':True, 'allocation':True, 'energy_scen':False},
'long_BEV_life': {'BEV_life':250000, 'ICE_life':180000, 'flowtrace':True, 'allocation':True, 'energy_scen':False},
'short_BEV_life': {'BEV_life':150000, 'ICE_life':180000, 'flowtrace':True, 'allocation':True, 'energy_scen':False},
'grid_avg': {'BEV_life':180000, 'ICE_life':180000, 'flowtrace':False, 'allocation':True, 'energy_scen':False},
'long_BEV_life_ga': {'BEV_life':250000, 'ICE_life':180000, 'flowtrace':False, 'allocation':True, 'energy_scen':False},
'short_BEV_life_ga': {'BEV_life':150000, 'ICE_life':180000, 'flowtrace':False, 'allocation':True, 'energy_scen':False},
'baseline_energy': {'BEV_life':180000, 'ICE_life':180000, 'flowtrace':True, 'allocation':True, 'energy_scen':'Gigafactory'},
'long_BEV_life_energy': {'BEV_life':250000, 'ICE_life':180000, 'flowtrace':True, 'allocation':True, 'energy_scen':'Gigafactory'},
'short_BEV_life_energy': {'BEV_life':150000, 'ICE_life':180000, 'flowtrace':True, 'allocation':True, 'energy_scen':'Gigafactory'},
'baseline_alloc': {'BEV_life':180000, 'ICE_life':180000, 'flowtrace':True, 'allocation':False, 'energy_scen':False},
'long_BEV_life_alloc': {'BEV_life':250000, 'ICE_life':180000, 'flowtrace':True, 'allocation':False, 'energy_scen':False},
'short_BEV_life_alloc': {'BEV_life':150000, 'ICE_life':180000, 'flowtrace':True, 'allocation':False, 'energy_scen':False},
}
# this dict defines the groups of BEV_experiments to plot together in sensitivity analysis (Figure 6).
# list items must therefore match BEV_experiment keys (experiment_name)
sensitivity_groups = {'baseline':['baseline', 'long_BEV_life', 'short_BEV_life'],
'grid_average': ['grid_avg', 'long_BEV_life_ga', 'short_BEV_life_ga'],
'batt_energy': ['baseline_energy', 'long_BEV_life_energy', 'short_BEV_life_energy'],
'no_alloc': ['baseline_alloc', 'long_BEV_life_alloc', 'short_BEV_life_alloc']
}
all_exp_results = {} # holds all results for sensitivity analysis
el_methodlist = []
for exp, params in BEV_experiments.items():
if BEV_experiments[exp]['flowtrace']:
el_methodlist.append('flowtrace')
elif not BEV_experiments[exp]['flowtrace']:
el_methodlist.append('gridaverage')
el_methodlist = list(set(el_methodlist))
el_keys = list(itertools.product(el_experiments.keys(), el_methodlist))
el_figs = {key: True for key in el_keys} # keeps track if electricity figures have already been plotted for the electriicty scenario
# Begin running electricity experiments
# Default is to recalculate everything
for el_exp, params in el_experiments.items():
logging.info(f'Starting el_experiment {el_exp}')
if ('year' not in params.keys()) and ('start' in params.keys()):
# manually populate the 'year' entry manually if missing
params['year'] = params[start].year
if set(params.keys()) == exp_type_dict['fullyear']:
experiment_type = 'fullyear'
elif set(params.keys()) == exp_type_dict['subyear']:
experiment_type = 'subyear'
elif set(params.keys()) == exp_type_dict['country_fp']:
if params['segment'] in segments:
experiment_type = 'country_fp'
if params['segment'] in segment_dict.keys():
params['segment'] = segment_dict[params['segment']] # convert to letter segment classification
else:
logging.error(f'Incorrect specification of desired vehicle segment')
else:
logging.error('Invalid electricity experiment input')
# case for sub-annual analysis period (or single-country footprint)
if (experiment_type == 'subyear') or (experiment_type == 'country_fp'):
if new_query:
bentso_extract.bentso_query(params['year']) # can alternatively specify year of analysis here
logging.info(f'year: {params["year"]}')
if experiment_type == 'subyear':
# for sub-annual analysis period
logging.info(f'subperiod start: {params["start"]}')
logging.info(f'subperiod end: {params["end"]}')
ei_countries = entso_data_clean.clean_entso(year=params['year'],
start=params['start'],
end=params['end'])
else:
# For country-footprint experiment (snapshot of time and segment)
# identical to subannual analysis; only visualization changes
if new_query:
# NB: we can use full_year = True here, but query will be rounded to closest hour
# (and which is averaged out if sub-hourly samples are available)
bentso_extract.bentso_query(params['year'], full_year=False)
logging.info(f'country footprint: {params["country"]}')
logging.info(f'segment footprint: {params["segment"]}')
ei_countries, timestamp = entso_data_clean.clean_entso(year=params['year'],
start=params['start'],
end=params['start'] + timedelta(hours=1),
country=params['country'])
if np.abs(timestamp.to_pydatetime() - params['start']) > timedelta(hours=12):
print('Warning: Large discrepancy between sampled time and user defined time. Try running a new query.')
print('Completing analysis on closest time available')
# case for full-year analysis
elif experiment_type == 'fullyear':
if new_query:
bentso_extract.bentso_query(params['year']) # can alternatively specify year of analysis here
logging.info(f'year: {params["year"]}')
ei_countries = entso_data_clean.clean_entso(year=params['year'])
print(f'Using ecoinvent factors for {ei_countries} \n')
# Calculate hybridized impact factors for electricity
logging.info('Starting calculation of hybridized emission factors \n')
# ef_countries, no_ef, countries_missing_ef = hybrid.hybrid_emission_factors(ei_countries, params['year'])
countries_missing_ef = hybrid.clean_impact_factors(params['year'], ei_countries, calc_hybrid)
# Begin BEV experiments - calculation and visualization
for BEV_exp, BEV_params in BEV_experiments.items():
print('\n **********')
print(f'Performing experiment {BEV_exp} with electricity experiment {el_exp}')
logging.info(f'Starting BEV experiment {BEV_exp}')
if BEV_params['flowtrace']:
el_method = 'flowtrace'
elif not BEV_params['flowtrace']:
el_method = 'gridaverage'
all_exp_results[BEV_exp], ICEV_prodEOL_impacts, ICEV_op_int, SI_fp_temp = calc.run_calcs(run_id=BEV_exp+'_'+el_exp,
year = params['year'],
no_ef_countries = countries_missing_ef,
BEV_lifetime=BEV_params['BEV_life'],
ICEV_lifetime=BEV_params['ICE_life'],
flowtrace_el=BEV_params['flowtrace'],
allocation = BEV_params['allocation'],
production_el_intensity=679,
export_data=True,
include_TD_losses=True,
incl_ei=False,
energy_sens=BEV_params['energy_scen']
)
if BEV_exp == 'baseline':
SI_fp = SI_fp_temp # only need to update the main SI file (that is submitted with article)
logging.info('Visualizing results...')
if (experiment_type == 'fullyear') or (experiment_type == 'subyear'):
viz.visualize(BEV_exp+'_'+el_exp, export_figures=True, include_TD_losses=True, plot_el=el_figs[(el_exp, el_method)])
if el_figs[(el_exp, el_method)]:
el_figs[(el_exp, el_method)] = False
elif experiment_type == 'country_fp':
viz.country_footprint(BEV_exp+'_'+el_exp, params, timestamp, export_figures=True)
logging.info(f'Completed experiment electricity {el_exp}, BEV scenario {BEV_exp}')
# Check we have all the necessary experiments to run a sensitivity analysis
run_sensitivity = False
for sens, exp in sensitivity_groups.items():
if len(set(exp) - set(exp).intersection(set(BEV_experiments.keys()))) == 0:
run_sensitivity = True
# Compile BEV intensities from all experiments for sensitivity analysis
if run_sensitivity and (experiment_type != 'country_fp'):
mi = pd.MultiIndex.from_product([all_exp_results.keys(), all_exp_results['baseline'].columns.tolist()])
results = pd.DataFrame(index=all_exp_results['baseline'].index, columns=mi) # countries x all experiments
for exp in all_exp_results.keys():
results[exp] = all_exp_results[exp]
# Compile ICEV intensities for different lifetimes; these are constant for all scenarios
tmp_ICEV_fp = ICEV_prodEOL_impacts.to_frame().T.reindex(results.index)
ind = pd.MultiIndex.from_product([['ICEV 250k', 'ICEV 200k', 'ICEV 180k'], tmp_ICEV_fp.columns.tolist()])
tmp_ICEV_fp = tmp_ICEV_fp.reindex(ind, axis=1)
ICEV_dict = {'ICEV 250k': 250000,
'ICEV 200k': 200000,
'ICEV 180k': 180000}
for ICE, dist in ICEV_dict.items():
for seg in tmp_ICEV_fp[ICE].columns:
tmp_ICEV_fp.loc[:, (ICE, seg)] = ((ICEV_prodEOL_impacts / dist * 1e6).add(ICEV_op_int, axis=0)).loc[seg]
# concatenate all results and visualize sensitivity analysis
results = pd.concat([results, tmp_ICEV_fp], axis=1)
results = results.loc[~results.index.isin(ei_countries)]
viz.plot_fig6(results, sensitivity_groups, export_figures=True)
results = results.reindex(columns=list(BEV_experiments.keys()) + ['ICEV 205k', 'ICEV 200k', 'ICEV 180k'], level=0)
#-- Begin export to SI
# separate each 'set' of BEV_experiments for exporting to SI
results_lifetime_sensitivity = results.iloc[:, np.r_[0:12, -12:0]].copy()
if results.shape[1] > 24:
results_ga = results.iloc[:, np.r_[12:24]].copy()
# rename columns for human-readability
new_ind = pd.MultiIndex.from_product([['Baseline (180k)', 'Long BEV life (250k)', 'Short BEV life (150k)', 'ICEV 250k','ICEV 200k','ICEV 180k'], ['A','C','JC','JE']])
results_lifetime_sensitivity.columns = new_ind
results_ga.columns = new_ind[0:12]
# make custom dataframe for low-manufacturing-energy sensitivity
df = (all_exp_results['baseline_energy'] - all_exp_results['baseline']) / all_exp_results['baseline']
sens_batt_energy = pd.concat([all_exp_results['baseline_energy'].round(0), df], axis=1)
sens_batt_energy.columns = pd.MultiIndex.from_product([['Footprint g CO2e/km', '% change from baseline'], df.columns])
# dict of captions for each sheet
excel_dict2 = {'Table S12': 'Sensitivity with alternative battery production electricity. Footprint with lower electricity demand and % change from baseline',
'Table S13': 'Data for Figure 7. Sensitivity with differing vehicle lifetimes for BEVs and ICEVs. Lifecycle carbon intensity, in g CO2e/vkm.',
'Table S14': 'BEV footprints using grid-average approach from Moro and Lonza (2018) to calculate electricity mix footprint, in g CO2e/vkm'}
book = openpyxl.load_workbook(SI_fp)
with | pd.ExcelWriter(SI_fp, engine="openpyxl") | pandas.ExcelWriter |
# This file is called separately from the rest of the program. This file takes the original data and creates cleaner csvs for app.py to use
import gsw
import numpy as np
import pandas as pd
# all of the parameters from the full data: 'yyyy-mm-ddThh:mm:ss.sss', 'Longitude [degrees_east]', 'Latitude [degrees_north]',
# 'PRESSURE [dbar]', 'DEPTH [m]', 'CTDTMP [deg C]', 'CTDSAL', 'SALINITY_D_CONC_BOTTLE', 'SALINITY_D_CONC_PUMP',
# 'SALINITY_D_CONC_FISH', 'SALINITY_D_CONC_UWAY', 'NITRATE_D_CONC_BOTTLE [umol/kg]', 'NITRATE_D_CONC_PUMP [umol/kg]',
# 'NITRATE_D_CONC_FISH [umol/kg]', 'NITRATE_D_CONC_UWAY [umol/kg]', 'NITRATE_LL_D_CONC_BOTTLE [umol/kg]',
# 'NITRATE_LL_D_CONC_FISH [umol/kg]', 'NO2+NO3_D_CONC_BOTTLE [umol/kg]', 'NO2+NO3_D_CONC_FISH [umol/kg]',
# 'Fe_D_CONC_BOTTLE [nmol/kg]', 'Fe_D_CONC_FISH [nmol/kg]', 'Fe_II_D_CONC_BOTTLE [nmol/kg]', 'Fe_II_D_CONC_FISH [nmol/kg]',
# 'Fe_S_CONC_BOTTLE [nmol/kg]', 'Fe_S_CONC_FISH [nmol/kg]'
# averages data with the exact same depth.
def average_data(cruise_data):
# from https://stackoverflow.com/questions/48830324/pandas-average-columns-with-same-value-in-other-columns
cruise_data = cruise_data.groupby(
["Latitude", "Longitude", "Station", "Depth"], as_index=False
).mean()
return cruise_data
# removes stations with specifically empty iron data.
def remove_empty_data(cruise_data):
grouped_data = cruise_data.groupby(["Latitude", "Longitude", "Station"])
for name, group in grouped_data:
if group["Iron"].isna().values.all():
cruise_data = cruise_data.drop(grouped_data.get_group(name).index)
return cruise_data
# gets the average nitrate values that are used to get ratio data.
def get_nitrate(cruise_data, index, row):
current_depth = row["Depth"]
min = None
max = None
if row["Depth"] <= 100: # for under 100m, we average nitrates between +/- 5m
min, max = current_depth - 5, current_depth + 5
elif row["Depth"] > 100: # for over 100m, we average nitrates between +/- 10m
min, max = current_depth - 10, current_depth + 10
lon = row["Longitude"]
lat = row["Latitude"]
avg_nitrate = cruise_data["Nitrate"][
(
(cruise_data.Depth <= max)
& (cruise_data.Depth >= min)
& (cruise_data.Longitude == lon)
& (cruise_data.Latitude == lat)
)
].mean()
return avg_nitrate
# create the ratio data
def add_ratio_data(cruise_data):
averaged_nitrate = []
# get averaged nitrate data at each point
for index, row in cruise_data.iterrows():
nitrate = get_nitrate(cruise_data, index, row)
averaged_nitrate.append(nitrate)
ratio = (
np.array(averaged_nitrate) / cruise_data["Iron"]
) # calculate ratio by dividing averaged nitrate by iron
cruise_data[
"Averaged Nitrate"
] = averaged_nitrate # add a column of averaged nitrate
cruise_data["Ratio"] = ratio # add the ratio column
# add the column of density data
def add_density_data(cruise_data):
# Uses the gsw library: http://www.teos-10.org/pubs/gsw/html/gsw_sigma0.html
practical_salinity = cruise_data["Salinity"]
pressure = cruise_data["Pressure"]
longitude = cruise_data["Longitude"]
latitude = cruise_data["Latitude"]
absolute_salinity = gsw.SA_from_SP(
practical_salinity, pressure, longitude, latitude
)
temperature = cruise_data["Temperature"]
sigma0 = gsw.sigma0(absolute_salinity, temperature)
cruise_data["Density"] = sigma0
# read in original data
GA03_data = pd.read_csv("./data/GA03w.csv")
GIPY05_data = pd.read_csv("./data/GIPY05e.csv")
GP02_data = pd.read_csv("./data/GP02w.csv")
GIPY04_data = pd.read_csv("./data/GIPY04.csv")
# the headers for our clean data
headers = [
"Station",
"Date",
"Latitude",
"Longitude",
"Depth",
"Temperature",
"Salinity",
"Nitrate",
"Iron",
"Pressure",
]
# make GA03 dataframe and csv
data = [
GA03_data["Station"],
GA03_data["yyyy-mm-ddThh:mm:ss.sss"],
GA03_data["Latitude [degrees_north]"],
GA03_data["Longitude [degrees_east]"],
GA03_data["DEPTH [m]"],
GA03_data["CTDTMP [deg C]"],
GA03_data["CTDSAL"],
GA03_data["NITRATE_D_CONC_BOTTLE [umol/kg]"],
GA03_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GA03_data["PRESSURE [dbar]"],
]
GA03 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GA03 = GA03[
((GA03.Longitude <= 360 - 60) & (GA03.Longitude >= 360 - 65))
| (GA03.Longitude >= 360 - 25)
]
# GA03 = average_data(GA03)
add_ratio_data(GA03)
add_density_data(GA03)
GA03 = remove_empty_data(GA03) # remove empty iron data
GA03 = GA03[(GA03.Depth <= 500)] # only keep data less than 500m depth
GA03["Date"] = GA03.Date.str.split("T").str[
0
] # only keep the day,month,year of the date
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude < 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude < 310), "Station"].astype(
str
)
+ "W"
)
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude < 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude < 310), "Station"].astype(
str
)
+ "W"
)
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude > 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude > 310), "Station"].astype(
str
)
+ "E"
)
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude > 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude > 310), "Station"].astype(
str
)
+ "E"
)
stations = []
positions = []
for i in range(len(GA03)):
station = GA03["Station"].values[i]
lat = GA03["Latitude"].values[i]
lon = GA03["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
for i in [4]: # choosing specific profiles
GA03 = GA03.drop(
GA03[
(GA03.Latitude == positions[i][0]) & (GA03.Longitude == positions[i][1])
].index
)
GA03.to_csv("./data/GA03_filtered.csv", index=False)
# make GIPY05 dataframe and csv
data = [
GIPY05_data["Station"],
GIPY05_data["yyyy-mm-ddThh:mm:ss.sss"],
GIPY05_data["Latitude [degrees_north]"],
GIPY05_data["Longitude [degrees_east]"],
GIPY05_data["DEPTH [m]"],
GIPY05_data["CTDTMP [deg C]"],
GIPY05_data["CTDSAL"],
GIPY05_data["NO2+NO3_D_CONC_BOTTLE [umol/kg]"],
GIPY05_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GIPY05_data["PRESSURE [dbar]"],
]
GIPY05 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GIPY05 = GIPY05[(GIPY05.Latitude >= -45) | (GIPY05.Latitude <= -65)]
# GIPY05 = average_data(GIPY05)
add_ratio_data(GIPY05)
add_density_data(GIPY05)
GIPY05 = remove_empty_data(GIPY05)
GIPY05 = GIPY05[(GIPY05.Depth <= 500)]
GIPY05["Date"] = GIPY05.Date.str.split("T").str[0]
positions = []
stations = []
for i in range(len(GIPY05)):
station = GIPY05["Station"].values[i]
lat = GIPY05["Latitude"].values[i]
lon = GIPY05["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
for i in [0]: # choosing specific profiles
GIPY05 = GIPY05.drop(
GIPY05[
(GIPY05.Latitude == positions[i][0]) & (GIPY05.Longitude == positions[i][1])
].index
)
GIPY05.to_csv("./data/GIPY05_filtered.csv", index=False)
# make GP02 dataframe and csv
data = [
GP02_data["Station"],
GP02_data["yyyy-mm-ddThh:mm:ss.sss"],
GP02_data["Latitude [degrees_north]"],
GP02_data["Longitude [degrees_east]"],
GP02_data["DEPTH [m]"],
GP02_data["CTDTMP [deg C]"],
GP02_data["CTDSAL"],
GP02_data["NO2+NO3_D_CONC_BOTTLE [umol/kg]"],
GP02_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GP02_data["PRESSURE [dbar]"],
]
GP02 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GP02 = GP02[(GP02.Longitude <= 155) | (GP02.Longitude >= 180)]
# GP02 = average_data(GP02)
add_ratio_data(GP02)
add_density_data(GP02)
GP02 = remove_empty_data(GP02)
GP02 = GP02[(GP02.Depth <= 500)]
GP02["Date"] = GP02.Date.str.split("T").str[0]
positions = []
stations = []
for i in range(len(GP02)):
station = GP02["Station"].values[i]
lat = GP02["Latitude"].values[i]
lon = GP02["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
# for i in []: #choosing specific profiles
# GP02 = GP02.drop(GP02[(GP02.Latitude == positions[i][0]) & (GP02.Longitude == positions[i][1])].index)
GP02.to_csv("./data/GP02_filtered.csv", index=False)
# make GIPY04 dataframe and csv
data = [
GIPY04_data["Station"],
GIPY04_data["yyyy-mm-ddThh:mm:ss.sss"],
GIPY04_data["Latitude [degrees_north]"],
GIPY04_data["Longitude [degrees_east]"],
GIPY04_data["DEPTH [m]"],
GIPY04_data["CTDTMP [deg C]"],
GIPY04_data["CTDSAL"],
GIPY04_data["NITRATE_D_CONC_BOTTLE [umol/kg]"],
GIPY04_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GIPY04_data["PRESSURE [dbar]"],
]
GIPY04 = | pd.concat(data, axis=1, keys=headers) | pandas.concat |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
def test_rolling_apply_invalid_raw(bad_raw):
with pytest.raises(ValueError, match="raw parameter must be `True` or `False`"):
Series(range(3)).rolling(1).apply(len, raw=bad_raw)
def test_rolling_apply_out_of_bounds(engine_and_raw):
# gh-1850
engine, raw = engine_and_raw
vals = Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, engine=engine, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, engine=engine, raw=raw)
expected = Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("window", [2, "2s"])
def test_rolling_apply_with_pandas_objects(window):
# 5071
df = DataFrame(
{"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)},
index=date_range("20130101", periods=5, freq="s"),
)
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with tm.external_error_raised(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_apply(engine_and_raw):
engine, raw = engine_and_raw
expected = Series([], dtype="float64")
result = expected.rolling(10).apply(lambda x: x.mean(), engine=engine, raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = | Series([None, None, None]) | pandas.Series |
import requests
import pandas as pd
import json
# Assumes you've gotten a HUD census key at ./config/hudkey
# from https://www.huduser.gov/hudapi/public/register?comingfrom=1
# Supports Zip->Tract or Tract->Zip
request_url = 'https://www.huduser.gov/hudapi/public/register?comingfrom=1'
class HUDCall():
def __init__(
self,
geo_value: str,
hud_apikey : str,
geotype : str
):
self.geotype = geotype
self.geo_value = geo_value
self.request = self.call_api(hud_apikey)
print(self.request.status_code)
if self.request.status_code ==200:
self.json = json.loads(self.request.text)
self.pandas = self._toPandas()
else:
RuntimeError(f'Request invalid. Status code {self.request.status_code} received.')
def help(self):
'''Tag to keep crosswalks straight'''
print(f'General info: Queries HUD API with available key registered from {request_url}')
if self.geotype =='zip':
print('Using zip-tract crosswalk:','Identifies what proportion of a zipcode lies across census tracts.',
sep='\n\t')
if self.geotype =='tract':
print('Using tract-zip crosswalk:','Identifies what proportion of a tract lies within a zipcode.',
sep='\n\t')
print('For more information, please see https://www.huduser.gov/portal/dataset/uspszip-api.html')
def call_api(self,apikey ):
baseurl = 'https://www.huduser.gov/hudapi/public/usps'
if self.geotype == 'zip': _type = 1
elif self.geotype =='tract': _type = 6
if (len(self.geo_value) != 5 and _type ==1):
raise ValueError('Only 5 digit zipcode supported. Check input.')
elif (len(self.geo_value) !=11 and _type==6):
raise ValueError('Census tracts are 11 digits. Check input.')
call = baseurl + f'?type={_type}&query={self.geo_value}'
request_header = {'Authorization': f'Bearer {apikey}'}
r = requests.get(call, headers=request_header)
return r
def _toPandas(self):
j = self.json['data']
_tuple = (
( j['year'], j['quarter'], j['input'], j['crosswalk_type'], self.geotype,
j['results'][n]['geoid']) for n, _ in enumerate(j['results']))
_dict = {}
for n, t in enumerate(_tuple):
tmp_val = j['results'][n]
_dict[t] = {x:tmp_val[x] for x in tmp_val.keys() if 'ratio' in x}
df = | pd.DataFrame.from_dict(_dict, orient='index') | pandas.DataFrame.from_dict |
# Python 3 server example
from http.server import BaseHTTPRequestHandler, HTTPServer
from keras.models import Sequential
from keras import layers
import time
import math
import keras
import json
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
import os
import sqlite3
from os import path
from keras.utils import np_utils
import csv
from keras.models import Sequential
from keras import layers
import datetime
from numpy.random import seed
from sklearn.feature_extraction.text import CountVectorizer
dataset = "None"
model = "None"
modelPath = "./models/cnn_model1"
dbPath = "dataset.db"
ammountCat ={}
parameters = {}
conn = ""
serverStatus = "UP"
#Utility function to create keep track of Target value
def checkKey(dict, key):
if key in dict.keys():
ammountCat[dict[key]] = ammountCat[dict[key]] + 1
return key
else:
dict[key] = len(dict) + 1
ammountCat[dict[key]]= 1
return key
#Function to make word embbedding matrix
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath,'r', encoding='UTF8') as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
#load parameters from json file
def loadParameters(file):
param = {}
with open(file) as f:
param = json.load(f)
return param
def Nmaxelements(list1, N):
final_list = []
for i in range(0, N):
max1 = 0
k = 0
jVal = 0
for j in range(len(list1)):
if list1[j][1] > max1:
max1 = list1[j][1];
k = list1[j][0];
jval = j
del list1[jval];
final_list.append(k)
return final_list
#load data on startup
def loadDataSet():
#connect to database
conn = sqlite3.connect(dbPath)
cur = conn.cursor()
#select values used in model
cur.execute("SELECT * FROM dataset WHERE used_in_model = 1")
rows = cur.fetchall()
column = {}
classDespDict = {}
encodingDict = {}
column["Class"] = []
column["Item Description"] = []
column["Class Description"] = []
for row in rows:
column['Item Description'].append(str(row[1]))
column['Class'].append(str(row[2]))
column['Class Description'].append(row[3])
encodingDict[row[4]] = row[2]
classDespDict[row[2]] = row[3]
d = {'label': column['Class'], 'sentence': column['Item Description']}
categories = len(classDespDict)
df_yelp = pd.DataFrame(data=d)
sentences = df_yelp['sentence'].values
y = df_yelp['label'].values
for i in range(len(y)):
y[i] = str(y[i])
#create encoder for predictions
encoder = LabelEncoder()
encoder.fit(y)
encoded_Y = encoder.transform(y)
y = np_utils.to_categorical(encoded_Y)
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=parameters["sampleParameters"]["testSize"], random_state=1000)
#create tokenizer for predictions
tokenizer = Tokenizer(num_words=parameters["sampleParameters"]["numWords"])
tokenizer.fit_on_texts(sentences)
return {"status": "LoadDataSet", "conn": cur, "tokenizer": tokenizer, "sentences_train": sentences_train,
"sentences_test": sentences_test, "y_train": y_train, "y_test": y_test, "y": y,
"classDescripDict": classDespDict, "encodingDict":encodingDict,'categorySize': categories,"weights":[], "db":conn}
#function to retrain model
def retrain(dataset):
global serverStatus
serverStatus = "DOWN"
conn = dataset["conn"]
conn.execute("SELECT DISTINCT class FROM dataset")
rows = conn.fetchall()
classes = []
column = {}
classDespDict = {}
encodingDict = {}
encodingDictRev= {}
column["Class"] = []
column["Item Description"] = []
column["Class Description"] = []
column["index_key"] = []
column["weight"] = []
classAmmount = {}
totalNumber = 0
#load data from database
for row in rows:
classCountSql = "SELECT count(class) FROM dataset WHERE class = %s" % row[0]
conn.execute(classCountSql)
classCount = conn.fetchone()
if classCount[0] > parameters['cutoutThreshold']:
classes.append(row[0])
classAmmount[row[0]]= classCount[0]
totalNumber = totalNumber + classCount[0]
for key, value in classAmmount.items():
classAmmount[key] = int(math.ceil((classAmmount[key]/totalNumber)*parameters["totalNumberOfSamples"]))
#for class over threshold load
for i in range(len(classes)):
selectSql = "SELECT index_key, item_description, class , class_description , weight FROM dataset WHERE class = %s ORDER by weight DESC, timestamp DESC LIMIT %d" % (classes[i], classAmmount[classes[i]])
conn.execute(selectSql)
rows = conn.fetchall()
for row in rows:
column["index_key"].append(row[0])
column["Item Description"].append(row[1])
column["Class"].append(row[2])
column["Class Description"].append(row[3])
column["weight"].append(row[4])
classDespDict[row[2]] = row[3]
categories = len(list(classAmmount.keys()))
d = {'label': column['Class'], 'sentence': column['Item Description'], 'weight': column["weight"] }
df_yelp = pd.DataFrame(data=d)
sentences = df_yelp['sentence'].values
weights = df_yelp['weight'].values
y = df_yelp['label'].values
for i in range(len(y)):
y[i] = str(y[i])
encoder = LabelEncoder()
encoder.fit(y)
encoded_Y = encoder.transform(y)
eCount = 0
classWeight = {}
for en in encoded_Y:
encodingDict[en.item()] = y[eCount]
encodingDictRev[y[eCount]] = en.item()
eCount = eCount+1
if en.item() in classWeight.keys():
classWeight[en.item()] = classWeight[en.item()] + 1
else:
classWeight[en.item()] = 1
for key, value in classWeight.items():
weight = value / parameters['totalNumberOfSamples']
if weight < .001:
weight = .5
else:
weight = 1
classWeight[key] = weight
y = np_utils.to_categorical(encoded_Y)
sentences_train, sentences_test, y_train, y_test,weights_train,weights_test = train_test_split(
sentences, y,weights, test_size=parameters["sampleParameters"]["testSize"], random_state=1000)
tokenizer = Tokenizer(num_words=parameters["sampleParameters"]["numWords"])
tokenizer.fit_on_texts(sentences_train)
conn.execute("UPDATE dataset SET used_in_model = 0, encoding_val = -999")
for i in range(len(column['Class'])):
sqlUpdate = "UPDATE dataset SET used_in_model = %d, encoding_val = %d WHERE index_key = %d" % (1,encodingDictRev[column['Class'][i]],column['index_key'][i])
conn.execute(sqlUpdate)
return {"status": "newDataSet", "conn": conn, "tokenizer": tokenizer,
"sentences_train": sentences_train, "sentences_test": sentences_test, "y_train": y_train,
"y_test": y_test, "y": y, "classDescripDict": classDespDict, "encodingDict": encodingDict,
'categorySize': categories, "weights":weights_train, "db":dataset["db"], "classWeight":classWeight}
def createNewDataSet():
conn =""
if path.exists(dbPath) == True:
os.remove(dbPath)
c = sqlite3.connect(dbPath)
conn = c.cursor()
conn.execute('''CREATE TABLE dataset ( index_key integer PRIMARY KEY autoincrement, item_description string, class string,
class_description string, encoding_val integer, weight integer, used_in_model integer,timestamp timestamp)''')
conn.execute('''CREATE TABLE entry (entry_key integer PRIMARY KEY autoincrement, new_entries integer) ''')
f = open(parameters['sampleFile'], 'r', encoding='UTF8')
reader = csv.reader(f)
headers = next(reader, None)
column = {}
columnIndex = {}
classDict = {}
classDespDict = {}
encodingDict = {}
encodingDictRev= {}
count = 0
for h in headers:
columnIndex[h] = count
count = count + 1
column["Class"] = []
column["Item Description"] = []
column["Class Description"] = []
trainingData = []
count = 0
for row in reader:
if row[columnIndex[parameters["yVal"]]] == "":
continue
blankVal = False
itemDescrip= ''
for i in parameters['xVals']:
if row[columnIndex[i]] == "":
blankVal = True
break
itemDescrip = itemDescrip +' '+ row[columnIndex[i]]
if blankVal == True:
continue
column['Class'].append(checkKey(classDict, row[columnIndex[parameters['yVal']]]))
column['Item Description'].append(itemDescrip)
column['Class Description'].append(row[columnIndex[parameters["yDescription"]]])
classDespDict[row[columnIndex[parameters['yVal']]]] = row[columnIndex[parameters["yDescription"]]]
count = count + 1
if count > parameters['totalNumberOfSamples']:
break
highest = 0
for key, value in ammountCat.items():
if value > highest:
highest = value
if value > parameters['cutoutThreshold']:
for i in range(len(column['Class'])):
if classDict[column['Class'][i]] == key:
trainingData.append(i)
total = 0
categories = 0
for key, value in ammountCat.items():
if value > parameters['cutoutThreshold']:
categories = categories + 1
total = value + total
print(len(column['Class']), len(column['Item Description']), total)
d = {'label': column['Class'], 'sentence': column['Item Description']}
df_yelp = | pd.DataFrame(data=d) | pandas.DataFrame |
"""
This module tests high level dataset API functions which require entire datasets, indices, etc
"""
from collections import OrderedDict
import pandas as pd
import pandas.testing as pdt
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
def test_dataset_get_indices_as_dataframe_partition_keys_only(
dataset_with_index, store_session
):
expected = pd.DataFrame(
OrderedDict([("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
result = ds.get_indices_as_dataframe(columns=dataset_with_index.partition_keys)
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe(dataset_with_index, store_session):
expected = pd.DataFrame(
OrderedDict([("L", [1, 2]), ("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
ds = ds.load_index("L", store_session)
result = ds.get_indices_as_dataframe()
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe_duplicates():
ds = DatasetMetadata(
"some_uuid",
indices={
"l_external_code": ExplicitSecondaryIndex(
"l_external_code", {"1": ["part1", "part2"], "2": ["part1", "part2"]}
),
"p_external_code": ExplicitSecondaryIndex(
"p_external_code", {"1": ["part1"], "2": ["part2"]}
),
},
)
expected = pd.DataFrame(
OrderedDict(
[
("p_external_code", ["1", "1", "2", "2"]),
("l_external_code", ["1", "2", "1", "2"]),
]
),
index=pd.Index(["part1", "part1", "part2", "part2"], name="partition"),
)
result = ds.get_indices_as_dataframe()
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe_predicates():
ds = DatasetMetadata(
"some_uuid",
indices={
"l_external_code": ExplicitSecondaryIndex(
"l_external_code", {"1": ["part1", "part2"], "2": ["part1", "part2"]}
),
"p_external_code": ExplicitSecondaryIndex(
"p_external_code", {"1": ["part1"], "2": ["part2"]}
),
},
)
expected = pd.DataFrame(
OrderedDict([("p_external_code", ["1"])]),
index=pd.Index(["part1"], name="partition"),
)
result = ds.get_indices_as_dataframe(
columns=["p_external_code"], predicates=[[("p_external_code", "==", "1")]]
)
| pdt.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
"""
"""
import numpy as np
import pandas as pd
import itertools
from scipy.sparse import csr_matrix
import networkx as nx
def make_mapping(unique_individuals):
"""
Create a mapping between a set of unique indivdual id's and indices into
co-occurrence and distance matrices.
parameters
----------
unique_individuals : array
Contains the individual IDs of all the members of the data-frame
Returns
-------
dict
A mapping between the IDs and ordinals [0,..., n], where n is
the number of unique IDs.
"""
mapping = dict(zip(unique_individuals,
np.arange(unique_individuals.shape[0])))
return mapping
def groups_co_occurrence(df, individual_var, group_var, T=None,
mapping=None, sparse=None):
"""
Count the co-occurrence of individuals in a group.
Parameters
----------
df : DataFrame
The data-frame with individual records to cluster.
individual_var : string
The variable (column) that identifies individuals.
group_var: string
The variable (column) that identifies groups. This is the clustering
variable.
T : ndarray, optional
If provided, this is a matrix that defines the unweighted graph of
connections between individuals. Default: None, which implies that
a matrix of zeros is initialized.
mapping : dict, optional
If provided, defines a mapping between individual identifiers and
indices in the T array. Default: None, which implies this dict
is generated on the fly.
sparse : bool, optional
Whether to use a sparse CSR matrix to represent the graph.
Returns
-------
Matrix with integer values that indicates the number of times individuals
(mapped through mapping and inv_mapping) have appeared together in the
same group.
"""
unique_individuals = df[individual_var].unique()
if T is None:
if not sparse:
T = np.zeros((unique_individuals.shape[0],
unique_individuals.shape[0]))
if mapping is None:
mapping = make_mapping(unique_individuals)
else:
mapping = mapping
gb = df.groupby(group_var)
if sparse:
rows = []
cols = []
for gid, group in gb:
ids = group[individual_var].unique()
pairs = itertools.permutations(ids, 2)
for pair in pairs:
rows.append(mapping[pair[0]])
cols.append(mapping[pair[1]])
T = csr_matrix((np.ones(len(cols)), (rows, cols)))
else:
for gid, group in gb:
ids = group[individual_var].unique()
pairs = itertools.permutations(ids, 2)
rows = []
cols = []
for pair in pairs:
rows.append(mapping[pair[0]])
cols.append(mapping[pair[1]])
T[rows, cols] = T[rows, cols] + 1
return T
def time_co_occurrence(df, individual_var, time_var, time_unit='ns',
time_delta=0, T=None, mapping=None):
"""
Group by co-occurrence of the times of enrollment (entry, exit).
Parameters
----------
time_var : list
A list of all the time-variables to consider for co-occurrence
time_unit : string
What unit is used to represent time? (default: 'ns')
time_delta : float or int
How many of the time-unit is still considered "co-occurrence"?
(default: 0).
"""
unique_individuals = df[individual_var].unique()
if T is None:
T = np.zeros((unique_individuals.shape[0],
unique_individuals.shape[0]))
if mapping is None:
mapping = make_mapping(unique_individuals)
else:
mapping = mapping
# We'll identify differences as things smaller than this:
dt0 = np.timedelta64(time_delta, time_unit)
for tv in time_var:
df[tv]
# Broadcast and get pairwise time-differences:
diff = np.array(df[tv])[:, None] - np.array(df[tv])[:, None].T
# Anything larger than the time_delta would do here:
diff[ | pd.isnull(diff) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 19:28:11 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
###chapter5
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
PREVIOUS_MAX_ROWS = pd.options.display.max_rows
pd.options.display.max_rows = 20
np.set_printoptions(precision=4, suppress=True)
### Series
obj = pd.Series([4, 7, -5, 3])
obj_array = obj.values
obj_range = obj.index
obj2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
obj2_array = obj2.values
obj2_range = obj2.index
obj3 = obj2[['a','c','d']]
obj3_array = obj3.values
obj3_range = obj3.index
obj4 = obj2[obj2>0]
obj5 = obj2*2
obj6 = np.exp(obj2)
#print('b' in obj2)
#print('e' in obj2)
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj7 = pd.Series(sdata)
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj8 = pd.Series(sdata, index=states)
#print(pd.isnull(obj8))
#print(pd.notnull(obj8))
obj9 = obj7 + obj8
obj8.name = 'population'
obj8.index.name = 'state'
####DataFrame
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
print(frame.state)
#print(frame.head())
#print(frame.columns)
frame = pd.DataFrame(data, columns=['year', 'state', 'pop'])
frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],
index=['one', 'two', 'three', 'four',
'five', 'six'])
fc1 = frame2['state']
fc2 = frame2.state
#print(fc1==fc2)
#print(id(fc1)==id(fc2))
fr1 = frame2.loc['two']
#print(fr1)
frame2['debt'] = np.arange(6.)
#print(frame2)
val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
#print(frame2)
frame2['eastern'] = frame2.state == 'Ohio'
del frame2['eastern']
pop = {'Nevada': {2001: 2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
frame3 = pd.DataFrame(pop)
#print(frame3.T)
frame4 = pd.DataFrame(pop, index=[2001, 2002, 2003])
pdata = {'Ohio': frame3['Ohio'][:-1],
'Nevada': frame3['Nevada'][:2]}
frame5 = | pd.DataFrame(pdata) | pandas.DataFrame |
#!/usr/bin/python3
#-*- coding: UTF-8 -*-
#import os
import sys
import logging
import argparse
import time
import re
import json
import xml.etree.ElementTree
from xml.dom import minidom
import numpy as np
import pandas as pd
def parse_args():
"""Parse the command line for options."""
parser = argparse.ArgumentParser(description='LR fixer')
parser.add_argument('-i', '--input_file_name', default='template.csv', help='Input csv')
parser.add_argument('-o', '--output_file_name', default='object.xlsx', help='Output file name')
options = parser.parse_args()
return options
def run_as_script():
options = parse_args()
tab1content = {'col_a': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_b': pd.Series(data=np.random.randint(10, 100, 3), index=['a', 'b', 'c']),
'col_c': pd.Series(data=np.random.randint(10, 100, 3), index=['a', 'b', 'c'])}
tab2content = {'col_a': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_b': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_c': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c'])}
tab3content = {'col_a': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_b': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_c': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c'])}
tab4content = {'col_a': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_b': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c']),
'col_c': pd.Series(data=np.random.randint(10, 100, 3),index=['a', 'b', 'c'])}
tab1 = pd.DataFrame(data=tab1content)
tab2 = | pd.DataFrame(data=tab2content) | pandas.DataFrame |
import robinhoodwrapper
import logging
import inspect
import pandas as pd
import commonqueries
import numpy as np
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas_market_calendars as mcal
import pytz
import os
import configwrapper
class TradeRobinhood():
def __init__(self,config_file):
self.config = configwrapper.ConfigWrapper(config_file=config_file)
data_collections=self.build_collections('FINANCIALDATA_COLLECTIONS')
user_collections=self.build_collections('USERS_COLLECTIONS')
self.data_collections=data_collections
self.user_collections=user_collections
self.data_cq=commonqueries.CommonQueries(port=self.config.get_int('FINANCIALDATA_MONGO','port'),host=self.config.get_string('FINANCIALDATA_MONGO','host'), username=self.config.get_string('FINANCIALDATA_MONGO','username'), password=self.config.get_string('FINANCIALDATA_MONGO','password'), dbname=self.config.get_string('FINANCIALDATA_MONGO','dbname'),collections=data_collections)
self.user_cq=commonqueries.CommonQueries(port=self.config.get_int('USERS_MONGO','port'),host=self.config.get_string('USERS_MONGO','host'), username=self.config.get_string('USERS_MONGO','username'), password=self.config.get_string('USERS_MONGO','password'), dbname=self.config.get_string('USERS_MONGO','dbname'),collections=user_collections)
return
def get_trade_now(self):
x=mcal.get_calendar('NYSE').schedule(start_date=datetime.now().date()-relativedelta(days=7),end_date=datetime.now().date()+relativedelta(days=7))
now = pytz.utc.localize(datetime.utcnow())
today=now.date()
x=x[pd.to_datetime(x['market_open'])>=now]
time_until_market_open=float((x['market_open'].iloc[0]-now).total_seconds())
max_time_between_close_and_open=float(17.5*60*60) #4:00pm until 9:30 the next day, is 7.5 hours
tradenow=True
if time_until_market_open>max_time_between_close_and_open:
logging.info('more than 7.5 hours until the next market open, not trading now')
tradenow=False
return tradenow
def build_collections(self,section='FINANCIALDATA_COLLECTIONS'):
self.user_collections={}
for option in self.config.get_options(section):
self.user_collections[option]=self.config.get_string(section,option)
return self.user_collections
def trade_robinhood(self):
recommended_portfolio=pd.DataFrame(list(self.data_cq.mongo.db[self.data_collections['quantative_value_recommended']].find({},{'_id':0})))
#calculate aech companyies empercenftage.
for row, company in recommended_portfolio.iterrows():
empercentage=self.data_cq.get_percent_greater_than(self.data_collections['metrics'],self.data_cq.ticker2cik(company['ticker']),'emyield')
recommended_portfolio.loc[row,'empercentage']=1-empercentage
min_empercentage=float(recommended_portfolio['empercentage'].min()) #the value where we will sell any stock less than this number
user_df = pd.DataFrame(list(self.user_cq.mongo.db[self.user_collections['robinhood_users']].find()))
user_df = user_df.sort_values('username')
user_df = user_df.drop_duplicates('username') # has the usernames and passwords of all robinhood users
rh_generic = robinhoodwrapper.RobinHoodWrapper(instruments=self.data_cq.get_robinhood_instruments())
for row, data in recommended_portfolio.iterrows():
recommended_portfolio.loc[row, 'robinhood_price'] = rh_generic.get_last_price(data['ticker'])
recommended_portfolio.loc[row, 'instrument'] = rh_generic.symbol2instrument(data['ticker'])
if (recommended_portfolio['price'] != recommended_portfolio['robinhood_price']).any():
logging.error('pricemismatch')
logging.error(str(recommended_portfolio[recommended_portfolio['price'] != recommended_portfolio['robinhood_price']]))
recommended_portfolio.to_csv('recommended_portfolio.csv')
if len(recommended_portfolio[recommended_portfolio['price'] != recommended_portfolio['robinhood_price']]) >= .1 * float(len(recommended_portfolio)): # if more than 10% of the companies dont match
logging.error('more than 10 percent of the companies dont match, dont trade, something is wrong')
return
recommended_portfolio=recommended_portfolio[pd.notnull(recommended_portfolio['price'])]
recommended_portfolio=recommended_portfolio[pd.notnull(recommended_portfolio['robinhood_price'])]
recommended_portfolio['price']=recommended_portfolio['price'].round(2)
recommended_portfolio['robinhood_price']=recommended_portfolio['robinhood_price'].round(2)
recommended_portfolio['weight']=recommended_portfolio['weight']/(recommended_portfolio['weight'].sum())
recommended_portfolio=recommended_portfolio.set_index('ticker',drop=False)
if len(recommended_portfolio)==0:
logging.error('empty trade dataframe')
return
recommended_portfolio_orig = recommended_portfolio.copy(deep=True)
for index,account in user_df.iterrows():
rh_user=robinhoodwrapper.RobinHoodWrapper(username=account['username'],password=account['password'],instruments=self.data_cq.get_robinhood_instruments())
#get all the options from the user
user_trade_options=account['trade']
should_trade_now=self.get_trade_now()
live_trade=user_trade_options['live_trade']
options_trade=user_trade_options['options_trade']
can_trade_options=rh_user.can_trade_options()
master_options_trade=self.config.get_bool('TRADING','trade_options')
master_live_trade=self.config.get_bool('TRADING','live_trade')
if master_options_trade is False or not can_trade_options or not options_trade:
options_trade=False
if not live_trade or not should_trade_now or master_live_trade is False:
live_trade=False
if float(rh_user.get_accounts()[0]['cash'])==0:
logging.info('we have no money to trade today')
continue
#FIRST WE DO THE BUYS
recommended_portfolio=recommended_portfolio_orig.copy(deep=True)
#filter out wash sale symbols, this way we are always fully invested as we are able
washsalesymboles=rh_user.get_wash_sale_symbols()
recommended_portfolio=recommended_portfolio[~recommended_portfolio['ticker'].isin(washsalesymboles)]
recommended_portfolio['weight']=recommended_portfolio['weight']/(recommended_portfolio['weight'].sum())
current_positions=rh_user.get_positions()
recommended_portfolio['desired_value']=recommended_portfolio['weight']*(float(rh_user.get_total_portfolio_value())+float(rh_user.get_accounts()[0]['cash']))
current_positions=current_positions[current_positions['instrument'].isin(recommended_portfolio['instrument'])] #filter our current positions so we only look at positions we have that we also want to buy
recommended_portfolio['current_value']=float(0)
for index,row in current_positions.iterrows():
recommended_portfolio.loc[rh_user.instrument2symbol(row['instrument']),'current_value']=float(row['quantity'])*float(row['last_trade_price'])
#we need to see if we have any current put option positions and take this into account and modify the current_value
if options_trade is True:
current_options_positions=rh_user.get_options_positions()
if current_options_positions is not None and len(current_options_positions)>0:
#todo 6/28 we still need to adjust hte current value of positions with outstanding options, both call and put
current_options_positions=current_options_positions[current_options_positions['type']=='put']
if len(current_options_positions)>0:
logging.error('we need to do something with the optoins we have in our account because we now actually have put options')
current_options_positions.to_csv('current_options_positions.csv')
exit()
recommended_portfolio['new_value']=recommended_portfolio['desired_value']-recommended_portfolio['current_value']
recommended_portfolio=recommended_portfolio[recommended_portfolio['new_value']>0] #we only take buys, we dont worry about that we are overallocated to
recommended_portfolio['new_weight']=recommended_portfolio['new_value']/(recommended_portfolio['new_value'].sum())
recommended_portfolio['today_value_add']=recommended_portfolio['new_weight']*float(rh_user.get_accounts()[0]['cash'])
recommended_portfolio['shares']=recommended_portfolio['today_value_add']/(recommended_portfolio['price'])
recommended_portfolio['max_shares']=np.floor(recommended_portfolio['new_value']/(recommended_portfolio['price'])) #the maximum number of shares we would want to purchase today
recommended_portfolio=recommended_portfolio.sort_values('shares',ascending=False)
while any(recommended_portfolio['shares']<1) and len(recommended_portfolio)>0:
recommended_portfolio=recommended_portfolio[:-1]
recommended_portfolio['new_weight']=recommended_portfolio['today_value_add']/(recommended_portfolio['today_value_add'].sum())
recommended_portfolio['today_value_add']=recommended_portfolio['new_weight']*float(rh_user.get_accounts()[0]['cash'])
recommended_portfolio['shares']=recommended_portfolio['today_value_add']/(recommended_portfolio['price']) #we will only purchase at this limit price
recommended_portfolio=recommended_portfolio.sort_values('shares',ascending=False)
if len(recommended_portfolio)==0:
logging.info('empty recommended df after filtering for shares')
continue
recommended_portfolio['shares']=np.floor(recommended_portfolio['shares'])
recommended_portfolio['shares']=recommended_portfolio[['shares','max_shares']].min(axis=1) #take the minimum of what we are going to by, and the max we should, this will ensure that we never overallocate
if live_trade:
rh_user.cancel_all_orders() #ONLY REMOVE THE open stock orders, we really should not NEED to cancel, we can work it into our calculations
if options_trade is True:
rh_user.cancel_all_options_orders() #removes all current option orders
logging.info(recommended_portfolio)
for symbol,order in recommended_portfolio.iterrows():
if options_trade is True:
if float(order['shares'])>100:
option_chain=self.data_cq.convert_option_chain_rh2td(symbol=symbol,stock_price=rh_user.get_last_price(symbol),option_chain=rh_user.get_options_instrument_data(symbol=symbol))
best_put_to_sell=self.data_cq.get_best_put_to_sell(symbol,option_chain=option_chain,exercise_fee=0,trading_fee=0,contract_fee=0)
if | pd.notnull(best_put_to_sell) | pandas.notnull |
import re
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from woodwork import DataColumn, DataTable
from woodwork.datatable import _check_unique_column_names
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
LogicalType,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
Timedelta,
ZIPCode
)
from woodwork.tests.testing_utils import (
check_column_order,
mi_between_cols,
to_pandas,
validate_subset_dt
)
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
dask_delayed = import_or_none('dask.delayed')
ks = import_or_none('databricks.koalas')
def test_datatable_df_property(sample_df):
dt = DataTable(sample_df)
assert dt.df is sample_df
pd.testing.assert_frame_equal(to_pandas(dt.df), to_pandas(sample_df))
def test_datatable_with_numeric_datetime_time_index(time_index_df):
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': Datetime})
error_msg = 'Time index column must contain datetime or numeric values'
with pytest.raises(TypeError, match=error_msg):
DataTable(time_index_df, name='datatable', time_index='strs', logical_types={'strs': Datetime})
assert dt.time_index == 'ints'
assert dt.to_dataframe()['ints'].dtype == 'datetime64[ns]'
def test_datatable_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
dt = DataTable(time_index_df, time_index='ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Integer
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Specify logical type for time index on init
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': 'Double'})
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Change time index to normal datetime time index
dt = dt.set_time_index('times')
date_col = dt['ints']
assert dt.time_index == 'times'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'numeric'}
# Set numeric time index after init
dt = DataTable(time_index_df, logical_types={'ints': 'Double'})
dt = dt.set_time_index('ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
def test_datatable_adds_standard_semantic_tags(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={
'id': Categorical,
'age': Integer,
})
assert dt.semantic_tags['id'] == {'category'}
assert dt.semantic_tags['age'] == {'numeric'}
def test_check_unique_column_names(sample_df):
if ks and isinstance(sample_df, ks.DataFrame):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if dd and isinstance(sample_df, dd.DataFrame):
duplicate_cols_df = dd.concat([duplicate_cols_df, duplicate_cols_df['age']], axis=1)
else:
duplicate_cols_df.insert(0, 'age', [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(IndexError, match='Dataframe cannot contain duplicate columns names'):
_check_unique_column_names(duplicate_cols_df)
def test_datatable_types(sample_df):
new_dates = ["2019~01~01", "2019~01~02", "2019~01~03", "2019~01~04"]
if dd and isinstance(sample_df, dd.DataFrame):
sample_df['formatted_date'] = pd.Series(new_dates)
else:
sample_df['formatted_date'] = new_dates
ymd_format = Datetime(datetime_format='%Y~%m~%d')
dt = DataTable(sample_df, logical_types={'formatted_date': ymd_format})
returned_types = dt.types
assert isinstance(returned_types, pd.DataFrame)
assert 'Physical Type' in returned_types.columns
assert 'Logical Type' in returned_types.columns
assert 'Semantic Tag(s)' in returned_types.columns
assert returned_types.shape[1] == 3
assert len(returned_types.index) == len(sample_df.columns)
assert all([dc.logical_type in ww.type_system.registered_types or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
'formatted_date': ymd_format
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(returned_types['Logical Type'])
for tag in returned_types['Semantic Tag(s)']:
assert isinstance(tag, str)
def test_datatable_typing_info_with_col_names(sample_df):
dt = DataTable(sample_df)
typing_info_df = dt._get_typing_info(include_names_col=True)
assert isinstance(typing_info_df, pd.DataFrame)
assert 'Data Column' in typing_info_df.columns
assert 'Physical Type' in typing_info_df.columns
assert 'Logical Type' in typing_info_df.columns
assert 'Semantic Tag(s)' in typing_info_df.columns
assert typing_info_df.shape[1] == 4
assert typing_info_df.iloc[:, 0].name == 'Data Column'
assert len(typing_info_df.index) == len(sample_df.columns)
assert all([dc.logical_type in LogicalType.__subclasses__() or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(typing_info_df['Logical Type'])
for tag in typing_info_df['Semantic Tag(s)']:
assert isinstance(tag, str)
correct_column_names = pd.Series(list(sample_df.columns),
index=list(sample_df.columns))
assert typing_info_df['Data Column'].equals(correct_column_names)
def test_datatable_head(sample_df):
dt = DataTable(sample_df, index='id', logical_types={'email': 'EmailAddress'}, semantic_tags={'signup_date': 'birthdat'})
head = dt.head()
assert isinstance(head, pd.DataFrame)
assert isinstance(head.columns, pd.MultiIndex)
if dd and isinstance(sample_df, dd.DataFrame):
assert len(head) == 2
else:
assert len(head) == 4
for i in range(len(head.columns)):
name, dtype, logical_type, tags = head.columns[i]
dc = dt[name]
# confirm the order is the same
assert dt._dataframe.columns[i] == name
# confirm the rest of the attributes match up
assert dc.dtype == dtype
assert dc.logical_type == logical_type
assert str(list(dc.semantic_tags)) == tags
shorter_head = dt.head(1)
assert len(shorter_head) == 1
assert head.columns.equals(shorter_head.columns)
def test_datatable_repr(small_df):
dt = DataTable(small_df)
dt_repr = repr(dt)
expected_repr = ' Physical Type Logical Type Semantic Tag(s)\nData Column \nsample_datetime_series datetime64[ns] Datetime []'
assert dt_repr == expected_repr
dt_html_repr = dt._repr_html_()
expected_repr = '<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>Physical Type</th>\n <th>Logical Type</th>\n <th>Semantic Tag(s)</th>\n </tr>\n <tr>\n <th>Data Column</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>sample_datetime_series</th>\n <td>datetime64[ns]</td>\n <td>Datetime</td>\n <td>[]</td>\n </tr>\n </tbody>\n</table>'
assert dt_html_repr == expected_repr
def test_datatable_repr_empty(empty_df):
dt = DataTable(empty_df)
assert repr(dt) == 'Empty DataTable'
assert dt._repr_html_() == 'Empty DataTable'
assert dt.head() == 'Empty DataTable'
def test_set_types_combined(sample_df):
dt = DataTable(sample_df, index='id', time_index='signup_date')
assert dt['signup_date'].semantic_tags == set(['time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Integer
assert dt['is_registered'].semantic_tags == set()
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == NaturalLanguage
assert dt['phone_number'].logical_type == NaturalLanguage
semantic_tags = {
'signup_date': ['test1'],
'age': [],
'is_registered': 'test2'
}
logical_types = {
'email': 'EmailAddress',
'phone_number': PhoneNumber,
'age': 'Double'
}
dt = dt.set_types(logical_types=logical_types, semantic_tags=semantic_tags)
assert dt['signup_date'].semantic_tags == set(['test1', 'time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Double
assert dt['is_registered'].semantic_tags == set(['test2'])
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == EmailAddress
assert dt['phone_number'].logical_type == PhoneNumber
def test_new_dt_from_columns(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
empty_dt = dt._new_dt_from_cols([])
assert len(empty_dt.columns) == 0
just_index = dt._new_dt_from_cols(['id'])
assert just_index.index == dt.index
assert just_index.time_index is None
validate_subset_dt(just_index, dt)
just_time_index = dt._new_dt_from_cols(['signup_date'])
assert just_time_index.time_index == dt.time_index
assert just_time_index.index is None
validate_subset_dt(just_time_index, dt)
transfer_schema = dt._new_dt_from_cols(['phone_number'])
assert transfer_schema.index is None
assert transfer_schema.time_index is None
validate_subset_dt(transfer_schema, dt)
def test_pop(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={'age': Integer},
semantic_tags={'age': 'custom_tag'},
use_standard_tags=True)
datacol = dt.pop('age')
assert isinstance(datacol, DataColumn)
assert 'custom_tag' in datacol.semantic_tags
assert all(to_pandas(datacol.to_series()).values == [33, 25, 33, 57])
assert datacol.logical_type == Integer
assert 'age' not in dt.to_dataframe().columns
assert 'age' not in dt.columns
assert 'age' not in dt.logical_types.keys()
assert 'age' not in dt.semantic_tags.keys()
def test_shape(categorical_df):
dt = ww.DataTable(categorical_df)
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 5)
assert dt_shape == df_shape
dt.pop('ints')
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 4)
assert dt_shape == df_shape
def test_select_invalid_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
err_msg = "Invalid selector used in include: 1 must be either a string or LogicalType"
with pytest.raises(TypeError, match=err_msg):
dt.select(['boolean', 'index', Double, 1])
dt_empty = dt.select([])
assert len(dt_empty.columns) == 0
def test_select_single_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d')
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth'
})
dt_ltype_string = dt.select('full_name')
assert len(dt_ltype_string.columns) == 1
assert 'full_name' in dt_ltype_string.columns
dt_ltype_obj = dt.select(Integer)
assert len(dt_ltype_obj.columns) == 2
assert 'age' in dt_ltype_obj.columns
assert 'id' in dt_ltype_obj.columns
dt_tag_string = dt.select('index')
assert len(dt_tag_string.columns) == 1
assert 'id' in dt_tag_string.columns
dt_tag_instantiated = dt.select('Datetime')
assert len(dt_tag_instantiated.columns) == 1
assert 'signup_date' in dt_tag_instantiated.columns
def test_select_list_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d'),
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth',
'email': 'tag2',
'is_registered': 'category'
})
dt_just_strings = dt.select(['FullName', 'index', 'tag2', 'boolean'])
assert len(dt_just_strings.columns) == 4
assert 'id' in dt_just_strings.columns
assert 'full_name' in dt_just_strings.columns
assert 'email' in dt_just_strings.columns
assert 'is_registered' in dt_just_strings.columns
dt_mixed_selectors = dt.select([FullName, 'index', 'time_index', Integer])
assert len(dt_mixed_selectors.columns) == 4
assert 'id' in dt_mixed_selectors.columns
assert 'full_name' in dt_mixed_selectors.columns
assert 'signup_date' in dt_mixed_selectors.columns
assert 'age' in dt_mixed_selectors.columns
dt_common_tags = dt.select(['category', 'numeric', Boolean, Datetime])
assert len(dt_common_tags.columns) == 3
assert 'is_registered' in dt_common_tags.columns
assert 'age' in dt_common_tags.columns
assert 'signup_date' in dt_common_tags.columns
def test_select_instantiated():
ymd_format = Datetime(datetime_format='%Y~%m~%d')
df = pd.DataFrame({
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd': ["2019~01~01", "2019~01~02", "2019~01~03"],
})
dt = DataTable(df,
logical_types={'ymd': ymd_format,
'dates': Datetime})
dt = dt.select('Datetime')
assert len(dt.columns) == 2
err_msg = "Invalid selector used in include: Datetime cannot be instantiated"
with pytest.raises(TypeError, match=err_msg):
dt.select(ymd_format)
def test_select_maintain_order(sample_df):
dt = DataTable(sample_df, logical_types={col_name: 'NaturalLanguage' for col_name in sample_df.columns})
new_dt = dt.select('NaturalLanguage')
check_column_order(dt, new_dt)
def test_filter_cols(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
filtered = dt._filter_cols(include='email', col_names=True)
assert filtered == ['email']
filtered_log_type_string = dt._filter_cols(include='NaturalLanguage')
filtered_log_type = dt._filter_cols(include=NaturalLanguage)
assert filtered_log_type == filtered_log_type_string
filtered_semantic_tag = dt._filter_cols(include='numeric')
assert filtered_semantic_tag == ['age']
filtered_multiple = dt._filter_cols(include=['numeric'])
expected = ['phone_number', 'age']
for col in filtered_multiple:
assert col in expected
filtered_multiple_overlap = dt._filter_cols(include=['NaturalLanguage', 'email'], col_names=True)
expected = ['full_name', 'phone_number', 'email']
for col in filtered_multiple_overlap:
assert col in expected
def test_datetime_inference_with_format_param():
df = pd.DataFrame({
'index': [0, 1, 2],
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd_special': ["2019~01~01", "2019~01~02", "2019~01~03"],
'mdy_special': pd.Series(['3~11~2000', '3~12~2000', '3~13~2000'], dtype='string'),
})
dt = DataTable(df,
name='dt_name',
logical_types={'ymd_special': Datetime(datetime_format='%Y~%m~%d'),
'mdy_special': Datetime(datetime_format='%m~%d~%Y'),
'dates': Datetime},
time_index='ymd_special')
assert dt.time_index == 'ymd_special'
assert dt['dates'].logical_type == Datetime
assert isinstance(dt['ymd_special'].logical_type, Datetime)
assert isinstance(dt['mdy_special'].logical_type, Datetime)
dt = dt.set_time_index('mdy_special')
assert dt.time_index == 'mdy_special'
df = pd.DataFrame({
'mdy_special': pd.Series(['3&11&2000', '3&12&2000', '3&13&2000'], dtype='string'),
})
dt = DataTable(df)
dt = dt.set_types(logical_types={'mdy_special': Datetime(datetime_format='%m&%d&%Y')})
dt.time_index = 'mdy_special'
assert isinstance(dt['mdy_special'].logical_type, Datetime)
assert dt.time_index == 'mdy_special'
def test_natural_language_inference_with_config_options():
dataframe = pd.DataFrame({
'index': [0, 1, 2],
'values': ["0123456", "01234567", "012345"]
})
ww.config.set_option('natural_language_threshold', 5)
dt = DataTable(dataframe, name='dt_name')
assert dt.columns['values'].logical_type == NaturalLanguage
ww.config.reset_option('natural_language_threshold')
def test_describe_dict(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_dict = dt.describe_dict()
index_order = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
stats_dict_to_df = pd.DataFrame(stats_dict).reindex(index_order)
stats_df = dt.describe()
pd.testing.assert_frame_equal(stats_df, stats_dict_to_df)
def test_describe_does_not_include_index(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_df = dt.describe()
assert 'index_col' not in stats_df.columns
def test_datatable_describe_method(describe_df):
categorical_ltypes = [Categorical,
CountryCode,
Ordinal(order=('yellow', 'red', 'blue')),
SubRegionCode,
ZIPCode]
boolean_ltypes = [Boolean]
datetime_ltypes = [Datetime]
formatted_datetime_ltypes = [Datetime(datetime_format='%Y~%m~%d')]
timedelta_ltypes = [Timedelta]
numeric_ltypes = [Double, Integer]
natural_language_ltypes = [EmailAddress, Filepath, FullName, IPAddress,
PhoneNumber, URL]
latlong_ltypes = [LatLong]
expected_index = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
# Test categorical columns
category_data = describe_df[['category_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'object'
else:
expected_dtype = 'category'
for ltype in categorical_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'category', 'custom_tag'},
'count': 7,
'nunique': 3,
'nan_count': 1,
'mode': 'red'}, name='category_col')
dt = DataTable(category_data, logical_types={'category_col': ltype}, semantic_tags={'category_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'category_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['category_col'].dropna())
# Test boolean columns
boolean_data = describe_df[['boolean_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'bool'
else:
expected_dtype = 'boolean'
for ltype in boolean_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 8,
'nan_count': 0,
'mode': True,
'num_true': 5,
'num_false': 3}, name='boolean_col')
dt = DataTable(boolean_data, logical_types={'boolean_col': ltype}, semantic_tags={'boolean_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'boolean_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['boolean_col'].dropna())
# Test datetime columns
datetime_data = describe_df[['datetime_col']]
for ltype in datetime_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': pd.Timestamp('2020-01-19 09:25:42.857142784'),
'mode': pd.Timestamp('2020-02-01 00:00:00'),
'min': pd.Timestamp('2020-01-01 00:00:00'),
'max': | pd.Timestamp('2020-02-02 18:00:00') | pandas.Timestamp |
import pandas
import lib.file as file
import lib.text as text
def getCombined(corpora, targetCorpus, shouldEnhance=False):
"""
@param corpora:
@param targetCorpus:
@param shouldEnhance:
@return:
@rtype: DataFrame
"""
metadataList = []
for corpus in corpora:
if ((targetCorpus == 'all') or (targetCorpus and corpus['name'] == targetCorpus)):
targetMetadata = file.readMetadata(corpus.get('metadataFilename'))
if (shouldEnhance):
textFilesFolder = corpus.get('textFilesFolder')
enhancedMetadata = text.enhanceMetadata(textFilesFolder, metadata=targetMetadata, detectPublishedYear=False,
calculateTokens=False)
metadataList.append(enhancedMetadata)
else:
metadataList.append(targetMetadata)
return | pandas.concat(metadataList) | pandas.concat |
import pandas as pd
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
import numpy as np
import math
import sys
import argparse
import json
import html
def read_excel(path):
df = pd.read_excel(path, sheet_name=0, header=None, index_col=None)
r_count = len(df.index)
c_count = len(df.columns)
map = {}
for i in range(0, c_count):
label = df.iloc[1, i]
map[i] = label
print(map)
data = []
search = []
uri_context = "https://webpark5032.sakura.ne.jp/tmp/sat/context.json"
context = {
"@context" : [
{
"ex" : "http://example.org/",
"data" : "https://nakamura196.github.io/sat/data",
"keiten" : "https://nakamura196.github.io/sat/経典番号/"
}
]
}
with open("../static/context.json", 'w') as f:
json.dump(context, f, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
for j in range(2, r_count):
id = df.iloc[j, 0]
id = str(id).zfill(5)
print(id)
if id == "00000":
continue
経典番号 = df.iloc[j, 1]
uri = "data:"+id+".json"
uri_経典番号 = "keiten:"+経典番号+".json"
枝番 = df.iloc[j, 9] if not pd.isnull(df.iloc[j, 9]) else ""
# print(df.iloc[j, 8], df.iloc[j, 9], df.iloc[j, 10], df.iloc[j, 11], df.iloc[j, 12], df.iloc[j, 13])
uri_sat = "https://21dzk.l.u-tokyo.ac.jp/SAT2018/"+df.iloc[j, 8]+枝番+"_."+str(df.iloc[j, 10]).zfill(2)+"."+str(df.iloc[j, 11]).zfill(4)+df.iloc[j, 12]+str(df.iloc[j, 13]).zfill(2)+".html"
# --------
texts_k = []
for c in range(0, 5):
start = c * 10
if not pd.isnull(df.iloc[j, 22+start]):
obj = {
"@id": uri+"#テキスト"+str(c+1)+"(勘同目録)",
"ex:標準名称" : df.iloc[j, 22+start],
}
if not pd.isnull(df.iloc[j, 23+start]):
obj["ex:巻"] = df.iloc[j, 23+start]
if not pd.isnull(df.iloc[j, 24+start]):
obj["ex:国"] = df.iloc[j, 24+start]
if not pd.isnull(df.iloc[j, 25+start]):
obj["ex:時代"] = df.iloc[j, 25+start]
if not pd.isnull(df.iloc[j, 26+start]):
obj["ex:年"] = df.iloc[j, 26+start]
if not pd.isnull(df.iloc[j, 27+start]):
obj["ex:~年"] = df.iloc[j, 27+start]
if not pd.isnull(df.iloc[j, 28+start]):
obj["ex:刊写者"] = df.iloc[j, 28+start]
if not pd.isnull(df.iloc[j, 29+start]):
obj["ex:刊写形態"] = df.iloc[j, 29+start]
if not pd.isnull(df.iloc[j, 30+start]):
obj["ex:関与者"] = df.iloc[j, 30+start]
if not pd.isnull(df.iloc[j, 31+start]):
obj["ex:関与形態"] = df.iloc[j, 31+start]
texts_k.append(obj)
# --------
holds_k = []
for c in range(0, 2):
start = c * 2
if not pd.isnull(df.iloc[j, 72+start]):
obj = {
"@id": uri+"#所蔵者"+str(c+1)+"(勘同目録)",
"ex:国" : df.iloc[j, 72+start],
}
if not pd.isnull(df.iloc[j, 73+start]):
obj["ex:所蔵者"] = df.iloc[j, 73+start]
holds_k.append(obj)
# --------
obj_k = {
"@id" : uri+"#勘同目録",
"ex:texts" : texts_k,
"ex:所蔵者" : holds_k
}
if not pd.isnull(df.iloc[j, 14]):
obj_k["ex:底本/校本"] = df.iloc[j, 14]
if not pd.isnull(df.iloc[j, 15]):
obj_k["ex:❹"] = df.iloc[j, 15]
if not pd.isnull(df.iloc[j, 16]):
obj_k["ex:❼"] = df.iloc[j, 16]
if not | pd.isnull(df.iloc[j, 17]) | pandas.isnull |
import pandas as pd
import numpy as np
df= pd.read_csv('..//Datos//Premios2020.csv', encoding='ISO-8859-1')
opciones = | pd.value_counts(df['genre1']) | pandas.value_counts |
import sys, os
sys.path.append("../ern/")
sys.path.append("../..dies/dies/")
sys.path.append(os.path.expanduser("~/workspace/prophesy_code/"))
import pandas as pd
import numpy as np
import glob, argparse, copy, tqdm
from ern.shift_features import ShiftFeatures
from ern.utils import to_short_name
import pathlib
from ern.utils_data import (
create_consistent_number_of_sampler_per_day,
get_data_with_intersected_timestamps,
)
import prophesy
from prophesy.utils.utils import get_blacklist
def get_df(file):
df = pd.read_csv(file, sep=";")
create_timestampindex(df)
df = create_consistent_number_of_sampler_per_day(df, num_samples_per_day=24 * 4)
cols_to_drop = [c for c in df.columns if "sin" in c.lower() or "cos" in c.lower()]
df.drop(cols_to_drop, inplace=True, axis=1)
df["Hour"] = df.index.hour
df["DayOfYear"] = df.index.dayofyear
return df
def create_timestampindex(df):
df.PredictionTimeUTC = pd.to_datetime(
df.PredictionTimeUTC, infer_datetime_format=True, utc=True
)
df.rename({"PredictionTimeUTC": "TimeUTC"}, inplace=True)
df.set_index("PredictionTimeUTC", inplace=True)
def main(data_folder, data_type):
files = glob.glob(data_folder + "/*.csv")
output_folder = f"data/{data_type}_cosmo/"
pathlib.Path(output_folder).mkdir(exist_ok=True, parents=True)
bl = get_blacklist(data_type)
min_samples = 22.5 * 30 * 24 * 3.565
if "pv" in data_type:
min_samples = 12 * 30 * 24 * 4 * 1.125
dfs = {}
for f in tqdm.tqdm(files):
df = get_df(f)
sn = to_short_name(f)
if sn in bl:
print("skipped")
continue
time_diff_min = (df.index[11] - df.index[10]).seconds / 60.0
if time_diff_min != 15:
print(
f"WARNING: Skipping file due to time difference is {time_diff_min} instead of 15 mins."
)
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import utils.gen_cutouts as gc
from sklearn import metrics
import pandas as pd
import ipdb
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['mathtext.fontset'] = 'stixsans'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
MEAN_TEMP = 2.726 * (10**6)
DEFAULT_FONT = 24
import os
from global_settings import DATA_PATH, FULL_DATA_PATH, FULL_DATA_LABEL_PATH, CNN_MODEL_OUTPUT_DIR, CACHE_FULLDF, CACHE_MAPPED_HALOS, CACHE_FULLDF_DIST2EDGE_CAL
import os
def prepare_data_class(dir_test, num_frequency=3, get_all_components=False, label_fname="1025_hashalo_freq%03i.npy" % 148,
balanced=False,
suffix=""):
"""
read data from dir_test, and prepare data with different noise level (components)
"""
freqs=[90,148,219]
def _load_help(name_format):
paths = [os.path.join(dir_test, name_format%freq) for freq in freqs]
ret = [np.load(p) for p in paths]
#print(paths)
return ret
# set file names for data
#y_data = np.load(dir_test + "1025_hashalo_freq%03i.npy"%148) # y data (labels)
y_data = np.load(os.path.join(dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
nsamples = len(y_data)
#load data into dictionary
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
#load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
noises = [np.load(os.path.join(dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in ["_90","_150", "_220"]]
noises = [noises[0]*2.8, noises[1]*2.6, noises[2]*6.6]
#samples has CMB+TSZ
try:
com = ['samples','ksz','ir_pts','rad_pts','dust']
x_data_all['base'] = _load_help("1025_samples_freq%03i{}.npy".format(suffix))
ksz_comp = _load_help("1025_ksz_freq%03i{}.npy".format(suffix))
x_data_all['ksz'] = [x_data_all['base'][i] + ksz_comp[i] for i in range(3)]
ir_comp = _load_help("1025_ir_pts_freq%03i{}.npy".format(suffix))
x_data_all['ir'] = [x_data_all['ksz'][i] + ir_comp[i] for i in range(3)]
rad_comp = _load_help("1025_rad_pts_freq%03i{}.npy".format(suffix))
x_data_all['rad'] = [x_data_all['ir'][i] + rad_comp[i] for i in range(3)]
dust_comp = _load_help("1025_dust_freq%03i{}.npy".format(suffix))
x_data_all['dust'] = [x_data_all['rad'][i] + dust_comp[i] for i in range(3)]
except Exception as err:
print("error: ", err)
print("reading only the composite")
x_data_all['dust'] = _load_help("1025_skymap_freq%03i{}.npy".format(suffix))
#return x_data_all['dust'], y_data
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples,num_frequency,10,10),dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:,i,:,:] = np.squeeze(x_data_all[com1][i]*k2uk*Tcmb) + noises[i]
else:
x_data[com1][:,0,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,0,:,:] += np.squeeze(x_data_all[com1][1]*k2uk*Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:,1,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,1,:,:] += np.squeeze(x_data_all[com1][0]*k2uk*Tcmb) + noises[0]
if balanced:
n_pos = int(y_data.sum())
idx = np.arange(nsamples)
idx = np.concatenate([idx[y_data==0.0][:n_pos], idx[y_data==1.0]])
x_data = {k: x_data[k][idx] for k in x_data.keys()}
return x_data if get_all_components else x_data['dust'], y_data[idx], idx
return x_data if get_all_components else x_data['dust'], y_data
def prepare_data_class2(dir_test, num_frequency=3, component="skymap", label_fname="1025_hashalo_freq%03i.npy" % 148,
balanced=False,
use_noise=True,
get_test_idx=False,
suffix=""):
"""
read data from dir_test, and prepare data with different noise level (components)
"""
freqs=[90,148,219]
def _load_help(name_format):
paths = [os.path.join(dir_test, name_format%freq) for freq in freqs]
ret = [np.load(p) for p in paths]
#print(paths)
return ret
# set file names for data
y_data = np.load(os.path.join(dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
nsamples = len(y_data)
#load data into dictionary
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
#load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
if use_noise:
noises = [np.load(os.path.join(dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in ["_90","_150", "_220"]]
noises = [noises[0]*2.8, noises[1]*2.6, noises[2]*6.6]
else:
noises = [0., 0., 0.]
#samples has CMB+TSZ
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(component, suffix))
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples,num_frequency,10,10),dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:,i,:,:] = np.squeeze(x_data_all[com1][i]*k2uk*Tcmb) + noises[i]
else:
x_data[com1][:,0,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,0,:,:] += np.squeeze(x_data_all[com1][1]*k2uk*Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:,1,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,1,:,:] += np.squeeze(x_data_all[com1][0]*k2uk*Tcmb) + noises[0]
splits = np.asarray([0.8, 0.2])
splits = np.round(splits / splits.sum() * nsamples).astype(int).cumsum()
split_idx = np.split(np.arange(nsamples),splits[:-1])
x_data, x_test = {k: x_data[k][split_idx[0]] for k in x_data.keys()}, {k: x_data[k][split_idx[-1]] for k in x_data.keys()}
y_data, y_test = y_data[split_idx[0]], y_data[split_idx[-1]]
nsamples = len(y_data)
if balanced:
n_pos = int(y_data.sum())
idx = np.arange(nsamples)
idx = np.concatenate([idx[y_data==0.0][:n_pos], idx[y_data==1.0]])
x_data = {k: x_data[k][idx] for k in x_data.keys()}
if get_test_idx: return x_data[component], y_data[idx], x_test[component], y_test, idx, split_idx[-1]
return x_data[component], y_data[idx], x_test[component], y_test, idx
if get_test_idx:
return x_data[component], y_data, x_test[component], y_test, split_idx[-1]
return x_data[component], y_data, x_test[component], y_test
class DataHolder:
def __init__(self, data, label, idx):
self.data = data
self.label = label
self.idx = idx
def get(self, which, ratio=None, incl_idx=False):
curr_idx = self.idx[which]
y_data = self.label[curr_idx]
if ratio is not None:
n_pos = int(y_data.sum())
idx = np.arange(len(y_data))
idx = np.concatenate([idx[y_data == 0.0][:int(ratio * n_pos)], idx[y_data == 1.0]])
curr_idx = curr_idx[idx]
if incl_idx:
return self.data[curr_idx], self.label[curr_idx], curr_idx
return self.data[curr_idx], self.label[curr_idx]
class DataGetter:
WO_DUST_MAPPING = ("dust", ['samples', 'ksz', 'ir_pts', 'rad_pts'])
def __init__(self, dir_test, overlap=False):
self.dir_test = dir_test
self.overlap = overlap
self.halocounter = gc.HalosCounter(overlap=overlap)
df = self.halocounter.get_complete_df()
if overlap:
df = df.reset_index().rename(columns={"index": "cutout_id"})
test_idx = df[(df['cutout_ra'] >= 0.5 * 90) & (df['cutout_dec'] > 0.5 * 90)].index
train_idx = df[~df.index.isin(test_idx)].index
n_samples = len(train_idx)
splits = np.asarray([0.65, 0.1])
splits = np.round(splits / splits.sum() * n_samples).astype(int).cumsum()
#print(splits)
#print(train_idx, len(train_idx))
split_idx = np.split(train_idx, splits[:-1])
split_idx = [split_idx[0], split_idx[1], test_idx]
#print(len(split_idx[0]), len(split_idx[1]), len(split_idx[2]))
#print(split_idx[0], split_idx[1], split_idx[2])
else:
n_samples = df.shape[0]
splits = np.asarray([0.7, 0.1, 0.2]) # (train ratio, valid ratio, test ratio)
splits = np.round(splits / splits.sum() * n_samples).astype(int).cumsum()
split_idx = np.split(np.arange(n_samples), splits[:-1])
#print(list(map(len, split_idx)), df.shape)
self.split_idx = {"train":split_idx[0], 'valid':split_idx[1], 'test':split_idx[2]}
pass
def get_labels(self, thres=5e13, which='full'):
if isinstance(thres, float) or isinstance(thres, int):
thres = ("%0.0e"%(thres)).replace("+", "")
label_fname = {"5e13": "m5e13_z0.25_y.npy", "2e14":"m2e14_z0.5_y.npy"}[thres]
y_data = np.load(os.path.join(self.dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
if which == 'full': return y_data
return y_data[self.split_idx[which]]
def get_data(self, component, thres=5e13, use_noise=False, num_frequency=3):
suffix = "_overlap" if self.overlap else ""
freqs = [90, 148, 219]
def _load_help(name_format):
paths = [os.path.join(self.dir_test, name_format % freq) for freq in freqs]
return [np.load(p) for p in paths]
y_data = self.get_labels(thres, which='full')
nsamples = len(y_data)
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
# load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
if use_noise:
noises = [np.load(os.path.join(self.dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in
["_90", "_150", "_220"]]
noises = [noises[0] * 2.8, noises[1] * 2.6, noises[2] * 6.6]
else:
noises = [0., 0., 0.]
# samples has CMB+TSZ
if isinstance(component, str):
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(component, suffix))
elif isinstance(component,tuple):
component, lc = component
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(lc[0], suffix))
for cc in lc[1:]:
tx = _load_help("1025_{}_freq%03i{}.npy".format(cc, suffix))
assert len(tx) == len(x_data_all[component])
x_data_all[component] = [x_data_all[component][i] + tx[i] for i in range(len(tx))]
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples, num_frequency, 10, 10), dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:, i, :, :] = np.squeeze(x_data_all[com1][i] * k2uk * Tcmb) + noises[i]
else:
x_data[com1][:, 0, :, :] = -np.squeeze(x_data_all[com1][2] * k2uk * Tcmb) - noises[2]
x_data[com1][:, 0, :, :] += np.squeeze(x_data_all[com1][1] * k2uk * Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:, 1, :, :] = -np.squeeze(x_data_all[com1][2] * k2uk * Tcmb) - noises[2]
x_data[com1][:, 1, :, :] += np.squeeze(x_data_all[com1][0] * k2uk * Tcmb) + noises[0]
return DataHolder(x_data[component], y_data, self.split_idx)
def get_full_df(self):
df = self.halocounter.get_complete_df().reset_index().rename(columns={"index":"cutout_id"})
for k, idx in self.split_idx.items():
df.loc[idx, "which_set"] = k
return df
class IndexMapper:
#map cutout_id to the index location
def __init__(self, overlap=False):
self.overlap = overlap
o = DataGetter(overlap)
self.split_idx = o.split_idx
self.full_idx = gc.HalosCounter(overlap=overlap).get_complete_df().index
self.split_idx = {"train":self.full_idx[self.split_idx['train']],
"valid":self.full_idx[self.split_idx['valid']],
"test":self.full_idx[self.split_idx['test']],
"full":self.full_idx}
self.reverse_idx = {'train':{}, 'valid':{}, 'test':{}, 'full':{}}
for k in self.split_idx.keys():
idx = self.split_idx[k]
for i, d in enumerate(idx):
self.reverse_idx[k][d] = i
def get(self, i, which='test'):
return self.reverse_idx[which][i]
def eval(models, get_test_func, model_weight_paths=None, pred_only=False):
y_prob_avg = None
y_probs = []
x_test, y_test = get_test_func()
num_nets = len(models)
for i in range(num_nets):
model = models[i]
if model_weight_paths is not None:
model.load_weights(model_weight_paths[i])
y_prob = model.predict(x_test)
y_probs.append(y_prob.squeeze())
y_prob_avg = y_prob if y_prob_avg is None else y_prob + y_prob_avg
y_probs = np.stack(y_probs, 0)
y_prob_avg /= float(num_nets)
y_pred = (y_prob_avg > 0.5).astype('int32').squeeze() # binary classification
if pred_only:
return y_prob_avg
return summary_results_class(y_probs, y_test), y_pred, y_prob_avg, y_test, x_test, models
def summary_results_class(y_probs, y_test, threshold=0.5, log_roc=False, show_f1=True):
"""
y_probs: a list of independent predictions
y_test: true label
threshold: predict the image to be positive when the prediction > threshold
"""
# measure confusion matrix
if show_f1:
threshold, maxf1 = get_F1(y_probs.mean(0),y_test)
threshold = threshold - 1e-7
cm = pd.DataFrame(0, index=['pred0','pred1'], columns=['actual0','actual1'])
cm_std = pd.DataFrame(0, index=['pred0', 'pred1'], columns=['actual0', 'actual1'])
#memorizing the number of samples in each case (true positive, false positive, etc.)
tp_rate, tn_rate = np.zeros(len(y_probs)), np.zeros(len(y_probs))
for actual_label in range(2):
for pred_label in range(2):
cnt = np.zeros(len(y_probs))
for i in range(len(y_probs)):
cnt[i] = np.sum(np.logical_and(y_test == actual_label, (y_probs[i] > threshold) == pred_label))
cm.loc["pred%d"%pred_label,"actual%d"%actual_label] = cnt.mean()
cm_std.loc["pred%d" % pred_label, "actual%d" % actual_label] = cnt.std()
print("Confusion matrix (cnts)",cm)
print("Confusion matrix (stdev of cnts)", cm_std)
#Measuring the true positive and negative rates,
#since the false positive/negative rates are always 1 minus these,
#they are not printed and have the same standard deviation
for i in range(len(y_probs)):
pred_i = y_probs[i] > threshold
tp_rate[i] = np.sum(np.logical_and(y_test==1, pred_i==1)) / np.sum(pred_i==1)
tn_rate[i] = np.sum(np.logical_and(y_test==0, pred_i==0)) / np.sum(pred_i == 0)
print("True Positive (rate): {0:0.4f} ({1:0.4f})".format(tp_rate.mean(), tp_rate.std()))
print("True Negative (rate): {0:0.4f} ({1:0.4f})".format(tn_rate.mean(), tn_rate.std()))
def vertical_averaging_help(xs, ys, xlen=101):
"""
Interpolate the ROC curves to the same grid on x-axis
"""
numnets = len(xs)
xvals = np.linspace(0,1,xlen)
yinterp = np.zeros((len(ys),len(xvals)))
for i in range(numnets):
yinterp[i,:] = np.interp(xvals, xs[i], ys[i])
return xvals, yinterp
fprs, tprs = [], []
for i in range(len(y_probs)):
fpr, tpr, _ = metrics.roc_curve(y_test, y_probs[i], pos_label=1)
fprs.append(fpr)
tprs.append(tpr)
new_fprs, new_tprs = vertical_averaging_help(fprs, tprs)
# measure Area Under Curve (AUC)
y_prob_mean = y_probs.mean(0)
auc = metrics.roc_auc_score(y_test, y_prob_mean)
try:
auc = metrics.roc_auc_score(y_test, y_prob_mean)
print()
print("AUC:", auc)
except Exception as err:
print(err)
auc = np.nan
#Take the percentiles for of the ROC curves at each point
new_tpr_mean, new_tpr_5, new_tpr_95 = new_tprs.mean(0), np.percentile(new_tprs, 95, 0), np.percentile(new_tprs, 5, 0)
# plot ROC curve
plt.figure(figsize=[12,8])
lw = 2
plt.plot(new_fprs, new_tpr_mean, color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % metrics.auc(new_fprs, new_tpr_mean))
if len(y_probs) > 1:
plt.plot(new_fprs, new_tpr_95, color='yellow',
lw=lw, label='ROC curve 5%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_95)))
plt.plot(new_fprs, new_tpr_5, color='yellow',
lw=lw, label='ROC curve 95%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_5)))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right", fontsize=16)
plt.grid()
plt.show()
#If log flag is set, plot also the log of the ROC curves within some reasonable range
if log_roc:
# plot ROC curve
plt.figure(figsize=[12,8])
lw = 2
plt.plot(np.log(new_fprs), np.log(new_tpr_mean), color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % metrics.auc(new_fprs, new_tpr_mean))
if len(y_probs) > 1:
plt.plot(np.log(new_fprs), np.log(new_tpr_95), color='yellow',
lw=lw, label='ROC curve 5%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_95)))
plt.plot(np.log(new_fprs), np.log(new_tpr_5), color='yellow',
lw=lw, label='ROC curve 95%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_5)))
#plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-5, -3])
plt.ylim([-1, 0.2])
plt.xlabel('Log False Positive Rate', fontsize=16)
plt.ylabel('Log True Positive Rate', fontsize=16)
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right", fontsize=16)
plt.grid()
plt.show()
return (auc,maxf1) if show_f1 else auc, (tp_rate.mean(),tn_rate.mean()), new_fprs, new_tprs
#=======================================================Prediction criteria here
import numpy as np
import pickle
import pandas as pd
from scipy.optimize import minimize
def _get_Fbeta(y, yhat, beta=1., debug=False, get_raw=False):
TP = ((y == 1) & (yhat == 1)).sum()
FP = ((y == 0) & (yhat == 1)).sum()
TN = ((y == 0) & (yhat == 0)).sum()
FN = ((y == 1) & (yhat == 0)).sum()
if debug: print("TP: {}, FP:{}, TN:{}, FN:{}".format(TP, FP, TN, FN))
if FP+TP == 0 or TP + FN==0 or TP == 0: return -1.
precision = (TP) / (FP + TP).astype(float)
recall = (TP) / (TP + FN).astype(float)
if debug:
print("TP={}; FP={}; TN={}; FN={}; precision={};recall={}".format(((y == 1) & (yhat == 1)).sum(),
((y == 0) & (yhat == 1)).sum(),
((y == 0) & (yhat == 0)).sum(),
((y == 1) & (yhat == 0)).sum(), precision,
recall))
if get_raw: return precision, recall, (1 + beta ** 2) * (precision * recall) / (beta * precision + recall)
return (1 + beta ** 2) * (precision * recall) / (beta * precision + recall)
def get_F1(y_pred, y, xlim=None, method='cnn', mass_thresh='5e13', plot=True,
save_path=None, xlabel=None, get_raw=False, font=DEFAULT_FONT):
plt.rcParams.update({'font.size': font})
if xlim is None:
xlim = (0, 0.997)
x = np.linspace(xlim[0], xlim[1])
elif isinstance(xlim, tuple):
x = np.linspace(xlim[0], xlim[1])
else:
x = xlim
Fscore = lambda x: _get_Fbeta(y, (y_pred > x).astype(int))
y = np.asarray([Fscore(xx) for xx in x]).clip(0.)
if plot:
f = plt.figure(figsize=(8, 5))
plt.plot(x, y)
plt.xlim(x[0], x[-1])
if xlabel:
plt.xlabel(xlabel)
else:
plt.xlabel('%s Threshold' % ("CNN Prob" if method == 'cnn' else "MF S/N"))
plt.ylabel('F1 Score', fontsize=font)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches='tight')
plt.show(block=True)
if get_raw: return x, y
return x[np.argmax(y)], np.max(y)
def stack_F1(xmf, ymf, xcnn, ycnn, save_path=None, font=DEFAULT_FONT, title="", hist={}, nxbins=20):
lns = []
fig = plt.figure(figsize=(8,5))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
plt.rcParams.update({'font.size': font})
lns.extend(ax1.plot(xmf, ymf, label='MF', color='purple'))
ax1.set_xlabel("MF S/N Ratio")
ax1.set_ylabel("F1 Score")
lns.extend(ax2.plot(xcnn, ycnn, label='CNN', color='black'))
ax2.set_xlabel("CNN Prob")
import matplotlib.patches as mpatches
if 'MF' in hist:
assert 'CNN' in hist
ax12 = ax1.twinx()
bins = np.linspace(*ax1.get_xlim(), num=nxbins)
#ax12.set_ylim(0, 100000)
ax12.hist(hist['MF'][~pd.isnull(hist['MF'])], alpha=0.3, bins=bins, color='purple',
#weights=np.ones(len(hist['MF']))/len(hist['MF'])
)
lns.append(mpatches.Patch(color='purple', label='MF Score Dist.', alpha=0.3))
ax12.set_yscale('log')
ax22 = ax2.twinx()
bins = np.linspace(*ax2.get_xlim(), num=nxbins)
#ax22.set_ylim(0, 100000)
ax22.hist(hist['CNN'], alpha=0.3, bins=bins, color='black',
#weights=np.ones(len(hist['CNN']))/len(hist['CNN'])
)
lns.append(mpatches.Patch(color='black', label='CNN Score Dist.', alpha=0.3))
ax22.set_yscale('log')
if ax12.get_ylim()[1] > ax22.get_ylim()[1]:
ax22.set_ylim(ax12.get_ylim())
else:
ax12.set_ylim(ax22.get_ylim())
#ylim1 = ax12.get_ylim()
#ylim2 = ax22.get_ylim()
#ylim = (min(ylim1[0], ylim2[0]), max(ylim1[1], ylim2[1]))
#print(ylim)
#for _temp in [ax12, ax22]:
#_temp.set_ylim(ylim)
#_temp.set_yscale('log')
ax12.set_ylabel("Counts", fontsize=font)
#ax12.set_yscale('log')
labs = [l.get_label() for l in lns]
plt.title(title)
plt.legend(lns, labs, loc='lower center', prop={"size":font-8})
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
return
def get_F1_CNN_and_MF(vdf, col_cnn='score_wdust (trained>%s)', col_mf ='mf_peaksig', col_label='Truth(>%s)',
mass_thresh='5e13', method='and', save_path=None, font=DEFAULT_FONT):
plt.rcParams.update({'font.size': font})
import itertools
if mass_thresh == '5e13':
cnn_range = (0, 0.997)
mf_range = (3, 15)
else:
#cnn_range = (0.4, 0.8)
#mf_range = (3, 15)
cnn_range = (0.2, 0.9)
mf_range = (3, 25)
cnn_range = np.linspace(cnn_range[0], cnn_range[1])
mf_range = np.linspace(mf_range[0], mf_range[1])
#criteria = itertools.product(cnn_range, mf_range)
criteria = [(c,m) for c in cnn_range for m in mf_range]
if method == 'or':
Fscore = lambda cc, mc: _get_Fbeta(vdf[col_label], ((vdf[col_cnn] > cc) | (vdf[col_mf] > mc)).astype(int))
elif method == 'and':
Fscore = lambda cc, mc: _get_Fbeta(vdf[col_label], ((vdf[col_cnn] > cc) & (vdf[col_mf] > mc)).astype(int))
elif method == 'rankproduct':
rnk_cnn = vdf[col_cnn].rank() / len(vdf)
rnk_mf = vdf[col_mf].rank() / float(vdf[col_mf].count())
return get_F1(rnk_cnn * rnk_mf, vdf[col_label], xlim=(0.7, .985), xlabel='rank product', save_path=save_path, font=font)
cnn_x = np.asarray([c[0] for c in criteria])
mf_y = np.asarray([c[1] for c in criteria])
vals = np.asarray([Fscore(cc,mc) for cc,mc in criteria])
cm = plt.cm.get_cmap('YlGn')
sc = plt.scatter(cnn_x, mf_y, c=vals, cmap=cm)
plt.scatter(*criteria[np.argmax(vals)], s=100, c='black', marker='x', linewidth=3)
cbar = plt.colorbar(sc)
cbar.set_label("F1 Score", rotation=270, labelpad=20)
plt.xlabel("CNN Threshold")
plt.ylabel("MF Threshold")
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
return criteria[np.argmax(vals)], np.max(vals)
import glob
import ipdb
class PytorchResultReader(object):
def __init__(self, data_dir = CNN_MODEL_OUTPUT_DIR,
exp_name="ratio1-20_convbody=R-50-C4_SGD_lr=0.005_wd=0.003_steps=1000-4000_comp=skymap",
xlim=(0.5, 0.7)):
self.data_dir = data_dir
if exp_name is None:
exp_name = [f for f in os.listdir(self.data_dir) if "_lr" in f and '_wd' in f]
self.exp_names = [exp_name] if isinstance(exp_name,str) else exp_name
labels = pd.read_pickle(FULL_DATA_LABEL_PATH)
test_labels, val_labels = labels[labels['which'] == 'test'], labels[labels['which'] == 'valid']
np.random.seed(0)
self.labels = test_labels.iloc[np.random.permutation(len(test_labels))]
np.random.seed(0)
self.val_labels = val_labels.iloc[np.random.permutation(len(val_labels))]
self.xlim = xlim
def _read_one(self, exp_name, xlim=None):
dir_path = os.path.join(self.data_dir, exp_name)
fs = sorted(glob.glob(os.path.join(dir_path, 'results', 'epoch*.pkl')),
key=lambda x: int(x.replace(".pkl", "").split("epoch")[1]))
if len(fs) ==0: return None, None, None
dfs = {w: pd.DataFrame(columns=['acc', 'loss', 'F1', "F1_thres"], dtype=float) for w in ['test', 'val_ret']}
preds, val_preds = {}, {}
iter_ser = {}
for f in fs:
epoch = int(os.path.basename(f).replace(".pkl", "").split("epoch")[1])
res = pd.read_pickle(f)
for which in dfs.keys():
for key in ['acc', 'loss']:
dfs[which].loc[epoch, key] = res[which][key]
y_pred, y = res[which]['y_pred'], res[which]['y']
dfs[which].loc[epoch, 'F1_thres'], dfs[which].loc[epoch, 'F1'] = get_F1(y_pred, y, plot=False, xlim=xlim)
if f == fs[-1]:
for which in ['train', 'val']: dfs[which] = pd.DataFrame(res[which]).astype(float)
if 'true' not in preds:
preds['true'] = res['test']['y']
val_preds['true'] = res['val_ret']['y']
preds[epoch] = res['test']['y_pred']
val_preds[epoch] = res['val_ret']['y_pred']
iter_ser[epoch] = pd.DataFrame(res['train']).index.max()
min_len = min(len(res['test']['y']), len(self.labels))
assert min_len == (res['test']['y'][:min_len] == self.labels['y'].values[:min_len]).sum()
preds = pd.DataFrame(preds).iloc[:min_len]
preds.index = self.labels.index[:min_len]
preds['which'] = 'test'
val_min_len = min(len(res['val_ret']['y']), len(self.val_labels))
assert val_min_len == (res['val_ret']['y'][:val_min_len] == self.val_labels['y'].values[:val_min_len]).sum()
val_preds = pd.DataFrame(val_preds).iloc[:val_min_len]
val_preds.index = self.val_labels.index[:val_min_len]
val_preds['which'] = 'valid'
preds = pd.concat([preds, val_preds])
preds.index.name = 'cutout_id'
return dfs, preds, iter_ser
def get_all(self):
results = {exp: self._read_one(exp, self.xlim) for exp in self.exp_names}
return results
def get_best(self):
best_test_results, best_test_F1 = {}, -1.
best_val_results, best_val_F1 = {}, -1.
for exp in self.exp_names:
res = self._read_one(exp, self.xlim)
if res[0] is None: continue
best_epoch = res[0]['test'].sort_values('F1', ascending=False).index[0]
if res[0]['test'].loc[best_epoch, 'F1'] > best_test_F1:
best_test_F1 = res[0]['test'].loc[best_epoch, 'F1']
best_test_results['pred'] = res[1].reindex(columns=['true', best_epoch, 'which'])
best_test_results['stat'] = res[0]['test'].loc[best_epoch]
best_test_results['name'] = exp
best_epoch = res[0]['val_ret'].sort_values('F1', ascending=False).index[0]
if res[0]['val_ret'].loc[best_epoch, 'F1'] > best_val_F1:
best_val_F1 = res[0]['val_ret'].loc[best_epoch, 'F1']
best_val_results['pred'] = res[1].reindex(columns=['true', best_epoch, 'which'])
best_val_results['stat'] = res[0]['val_ret'].loc[best_epoch]
best_val_results['name'] = exp
return best_val_results, best_test_results
import time, sys
class ProgressBar:
def __init__(self, iterable, taskname=None, barLength=40, stride = 50):
self.l = iterable
try:
self.n = len(self.l)
except TypeError:
self.l = list(self.l)
self.n = len(self.l)
self.cur = 0
self.starttime = time.time()
self.barLength = barLength
self.taskname = taskname
self.last_print_time = time.time()
self.stride = stride
def __iter__(self):
return self
def _update_progress(self):
status = "Done...\r\n" if self.cur == self.n else "\r"
progress = float(self.cur) / self.n
curr_time = time.time()
block = int(round(self.barLength * progress))
text = "{}Percent: [{}] {:.2%} Used Time:{:.2f} seconds {}".format("" if self.taskname is None else "Working on {}. ".format(self.taskname),
"#" * block + "-"*(self.barLength - block),
progress, curr_time - self.starttime, status)
sys.stdout.write(text)
sys.stdout.flush()
def __next__(self):
if self.cur % self.stride == 0:
self._update_progress()
if self.cur >= self.n:
raise StopIteration
else:
self.cur += 1
return self.l[self.cur - 1]
def match(mf_df, dist=1, data_dir="../data/", cache_dir=None):
"""
Clean up the matches such that we assign the largest (highest mass) halo to the MF signal
:param mf_df:
MF
:param dist:
:param data_dir:
:param cache_dir:
:return:
"""
from astropy.coordinates import SkyCoord
from astropy import units as units
if cache_dir is None: cache_dir = os.path.join(data_dir, 'cache')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
run_min_sig = 3 if np.min(mf_df['mf_peaksig']) < 3.5 else 4
MF_match_cache_path = os.path.join(cache_dir, "MF_match_{}sig_{}arcmin.pkl".format(run_min_sig, dist))
if not os.path.isfile(MF_match_cache_path):
halo_data = pd.read_csv(data_dir + 'halo_sz.ascii', sep='\s+', header=None,
names=['redshift', 'ra', 'dec', 'Mvir', 'M500'], usecols=[0, 1, 2, 10, 32])
halo_coord = SkyCoord(ra=halo_data['ra'].values * units.degree, dec=halo_data['dec'].values * units.degree)
mf_coord = SkyCoord(ra=mf_df['mf_ra'].values * units.degree, dec=mf_df['mf_dec'].values * units.degree)
#idx, sep_2d, dist_3d = mf_coord.match_to_catalog_sky(halo_coord)
idxmf, idxhalo, sep_2d, dist_3d = halo_coord.search_around_sky(mf_coord, dist * units.arcmin)
bad_idxmf = mf_df.index.difference(idxmf)
mf_df.shape, len(np.unique(idxmf)) + len(bad_idxmf)
mf_df = mf_df.reindex(columns=['mf_peaksig', 'mf_dec', 'mf_ra'])
print(mf_df.dropna().shape)
n_mf = len(mf_df)
matched_halos = set()
match_list = []
for ii in ProgressBar(range(n_mf - 1, -1, -1)):
idxhalo_this = idxhalo[idxmf == ii]
halos_match = halo_data.iloc[idxhalo_this].copy()
while not halos_match.empty:
idx_mostmass = halos_match['Mvir'].idxmax()
if idx_mostmass in matched_halos and len(halos_match) > 1:
halos_match.drop(idx_mostmass, inplace=True)
continue
matched_halos.add(idx_mostmass)
match_list.append(
np.concatenate(([idx_mostmass], halos_match.loc[idx_mostmass].values, [ii], mf_df.loc[ii].values)))
break
mfhalo_df = pd.DataFrame(match_list,
columns=['halo_id', 'redshift', 'ra_halo', 'dec_halo', 'Mvir', 'M500',
'mf_id', 'StoN', 'mf_dec', 'mf_ra'])
mfhalo_df = pd.concat([mfhalo_df, mf_df.reindex(bad_idxmf).rename(columns={"mf_peaksig": "StoN"})])
mfhalo_df.to_pickle(MF_match_cache_path)
mfhalo_df = pd.read_pickle(MF_match_cache_path)
return mfhalo_df
def show_cutouts(df, get_x_func, n=5, cutout_size=8./60, show_freq=True, save_path=None, font=DEFAULT_FONT):
f = plt.figure(figsize=(15, 5 * n))
for i in range(n):
r = df.iloc[i]
print("Cutout {} has features: Mvir={:.1e} redshift={} rvir={}".format(r['cutout_id'], r['Mvir'], r['redshift'],
r['rvir']))
img = get_x_func(r['cutout_id'])
for c in range(3):
f.add_subplot(n, 3, 3 * i + c + 1)
#plt.imshow(img[:,:,c], cmap='gray')
im = plt.imshow(img[:, :, c], cmap='gray', extent=[cutout_size / 2, -cutout_size / 2, cutout_size / 2, -cutout_size / 2])
plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
plt.scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=200, c='red', marker='x', linewidth=3)
plt.title("freq %d GHz" % ({0:90,1:148,2:219}[c]), fontsize=font)
if c == 0: plt.ylabel("img %d" % r['cutout_id'], fontsize=font)
#plt.scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']])
#plt.xlim(cutout_size / 2, -cutout_size / 2)
#plt.ylim(cutout_size / 2, -cutout_size / 2)
#plt.xlabel("ra")
#plt.ylabel("dec")
#plt.title("position in the cutout", fontsize=16)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show(block=True)
def _get_cax_for_colobar(ax, fig):
return fig.add_axes([ax.get_position().x1 + 0.01, ax.get_position().y0, 0.02, ax.get_position().height])
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import ImageGrid
def show_cutout_full(r, get_comp_func, cutout_size=8./60, save_path=None, font=DEFAULT_FONT, separate_cbar=None, mark=True,
normalization=None,
components = ['samples', 'ksz', 'ir_pts', 'rad_pts', 'dust'],
override_name_map={},
adjust=None, width_multiple=1.1):
assert normalization is None or normalization in {"log"}
plt.rcParams.update({'font.size': font})
#get_x_func = lambda c: ..
#components = ['samples', 'ksz', 'ir_pts', 'rad_pts', 'dust']#, 'skymap']
name_map = {"samples":"CMB + tSZ", "ksz":"+kSZ", "ir_pts":"+Infrared Galaxies", "rad_pts":"+Radio Galaxies", "dust":"+Galactic Dust"}
name_map.update(override_name_map)
nc = len(components)
fig = plt.figure(figsize=(4 * nc * width_multiple, 4 * 3))
#fig, axes = plt.subplots(nrows=3, ncols=nc, figsize=(4 * nc, 4 * 3))
comps = {c: get_comp_func(c) for c in components}
if separate_cbar is None:
kwargs = [{"vmin": min([comps[c].min() for c in components]),
"vmax": max([comps[c].max() for c in components]) }for _ in range(3)]
raise NotImplementedError
grid_kwargs = {}
elif separate_cbar == 'row':
kwargs = [{"vmin": min([comps[c][:,:,freq].min() for c in components]),
"vmax": max([comps[c][:,:,freq].max() for c in components])} for freq in range(3)]
grid_kwargs = {"cbar_mode":"edge", "direction":"row"}
default_adjust = lambda f: None
grid = ImageGrid(fig, 111,
nrows_ncols=(3, nc),
axes_pad=0.2,
share_all=True,
cbar_location="right",
cbar_size="4%",
cbar_pad=0.1,
**grid_kwargs) # https://stackoverflow.com/questions/45396103/imagegrid-with-colorbars-only-on-some-subplots
get_ax = lambda ic, ir: grid[nc * ir + ic]
elif separate_cbar == 'col':
raise NotImplementedError
else:
assert separate_cbar == 'each'
kwargs = [{},{},{}]
#grid_kwargs = {"cbar_mode": "each"}
default_adjust = lambda f: f.subplots_adjust(hspace=-0.1, wspace=0.25)
get_ax = lambda ic, ir: fig.add_subplot(3, nc, nc * ir + ic + 1)
if adjust is None: adjust = default_adjust
if normalization == 'log':
assert separate_cbar == 'row'
offset = {}
for i, kk in enumerate(kwargs):
vmin, vmax = kk['vmin'], kk['vmax']
offset[i] = vmin - 1
kk['vmin'] -= offset[i]
kk['vmax'] -= offset[i]
def transform(img, freq):
return img[:, :, freq] - offset[freq]
def set_cbar_ticks(cbar, freq):
vmin, vmax = kwargs[freq]['vmin'], kwargs[freq]['vmax']
bias = offset[freq]
ticks = np.logspace(np.log10(vmin), np.log10(vmax), 5)
ticklabels = ["%.1f"%(t + bias) for t in ticks]
cbar.ax.set_yscale('log')
cbar.ax.set_yticks(ticks)
cbar.ax.set_yticklabels(ticklabels)
else:
def transform(img, freq):
return img[:, :, freq]
import matplotlib.colors as colors
import matplotlib.ticker as ticker
for i, c in enumerate(components):
img = get_comp_func(c)
for freq in range(3):
cplt = get_ax(i, freq)
norm = colors.LogNorm(clip=True, **kwargs[freq]) if normalization == 'log' else None
im = cplt.imshow(transform(img, freq), cmap='gray', norm=norm,
extent=[cutout_size / 2, -cutout_size / 2, cutout_size / 2, -cutout_size / 2],
**kwargs[freq])
cplt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
if mark: cplt.scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=200, c='red', marker='x', linewidth=3)
if separate_cbar == 'each':
divider = make_axes_locatable(cplt)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='vertical')
elif c == components[-1] and separate_cbar == 'row':
if normalization == 'log':
cbar = cplt.cax.colorbar(im, norm=norm, format=ticker.LogFormatterMathtext())
set_cbar_ticks(cbar, freq)
else:
cbar = cplt.cax.colorbar(im, norm=norm)
else:
cbar = None
if cbar is not None:
cbar.ax.tick_params(labelsize=font-5)
if freq == 0: cbar.ax.set_title('$\mu$K', fontsize=font - 4)
if freq == 0: cplt.set_title(name_map[c]) # , fontsize=font)
if i == 0: cplt.set_ylabel("%d GHz"%{0:90,1:148,2:219}[freq])#, fontsize=font)
continue
if not separate_cbar:
# put colorbar at desire position
divider = make_axes_locatable(cplt)
fig.subplots_adjust(right=0.95)
cbar_ax = fig.add_axes([0.96, 0.15, 0.03, 0.7])
cbar = fig.colorbar(im, cax=cbar_ax)
#cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('$\mu$K', rotation=270, labelpad=15)#, fontsize=font)
if save_path is not None: plt.savefig(save_path, dpi=500,bbox_inches="tight")
#fig.tight_layout()
#fig.subplots_adjust(top=1.00, bottom=0.)
#fig.subplots_adjust(hspace=-0.1, wspace=0.25)
adjust(fig)
plt.show()
def show_range(df, get_x_func, n=5, cnn_prob=(0.6, 1.0), mf_sn=(3., 5.), which='test', tp=True,
CNN_col='pred', MF_col='StoN', label_col='y', cutout_size=8./60, save_path=None):
ss = "Sampling cutouts such that"
if cnn_prob is not None:
ss = ss + " CNN prob in ({},{}),".format(cnn_prob[0], cnn_prob[1])
df = df[(df[CNN_col] > cnn_prob[0]) & (df[CNN_col] < cnn_prob[1])]
if mf_sn is not None:
ss = ss + " MF S/N in ({}, {}),".format(mf_sn[0], mf_sn[1])
df = df[(df[MF_col] > mf_sn[0]) & (df[MF_col] < mf_sn[1])]
if tp is not None:
ss = ss + (" has halo" if tp else " does not have halo")
df = df[df[label_col]] if tp else df[~df[label_col]]
print(ss)
print("there are {} such cutouts ".format(len(df)))
if get_x_func is not None:
show_cutouts(df, get_x_func, n,cutout_size, save_path=save_path)
return df
def false_color(x, params=None):
for i in range(3):
try:
_min = params[i]['min']
_max = params[i]['max']
except:
_min = np.min(x[:,:,i])
_max = np.max(x[:,:,i])
x[:, :, i] = x[:, :, i].clip(_min, _max)
x[:,:,i] = (x[:,:,i] - _min)/(_max - _min) * 255
return np.round(x).astype(int)#[:,:,0]
def false_color_log(x, params=None):
for i in range(3):
try:
_min = params[i]['min']
_max = params[i]['max']
except:
_min = np.min(x[:,:,i])
_max = np.max(x[:,:,i])
x[:, :, i] = np.log(x[:,:,i] - _min + 1.)
x[:, :, i] = (x[:, :, i] - np.min(x[:, :, i])) / (np.max(x[:, :, i]) - np.min(x[:, :, i])) * 255
#x[:,:,i] = (x[:,:,i] - _min)/(_max - _min) * 255
return np.round(x).astype(int)#[:,:,0]
def single_channel_log(x, params=None):
try:
_min = params['min']
_max = params['max']
except:
_min = np.min(x)
_max = np.max(x)
x = np.log(x - _min + 1.)
return x
def make_false_color_params1(cutouts):
params = [{} for i in range(3)]
for k in cutouts.keys():
for i in range(3):
try:
params[i]['max'] = max(params[i]['max'], np.max(cutouts[k][:,:,i]))
except:
params[i]['max'] = np.max(cutouts[k][:, :, i])
try:
params[i]['min'] = min(params[i]['min'], np.min(cutouts[k][:,:,i]))
except:
params[i]['min'] = np.min(cutouts[k][:, :, i])
print(params)
return params
def make_false_color_params(cutouts):
channels = [[], [], []]
for k in cutouts.keys():
for i in range(3):
channels[i].append(cutouts[k][:,:,i].flatten())
channels = [np.concatenate(x) for x in channels]
params = [{"max": np.percentile(channels[i], 99.5),
"min": np.percentile(channels[i], 0.5)} for i in range(3)]
print("max pixel val = %f, min=%f; 148 max=%f, min=%f; 219 max=%f, min=%f"%(params[0]['max'], params[0]['min'], params[1]['max'], params[1]['min'],params[2]['max'], params[2]['min']))
return params
def make_false_color_params_bad(cutouts):
channels = [[], [], []]
for k in cutouts.keys():
for i in range(3):
channels[i].append(cutouts[k][:,:,i].flatten())
channels = [np.concatenate(x) for x in channels]
params = [{"max": np.percentile(channels[i], 99.5),
"min": np.percentile(channels[i], 0.5)} for i in range(3)]
print("max pixel val = %f, min=%f; 148 max=%f, min=%f; 219 max=%f, min=%f"%(params[0]['max'], params[0]['min'], params[1]['max'], params[1]['min'],params[2]['max'], params[2]['min']))
return [{"max": max([params[i]['max'] for i in range(3)]), "min": min([params[i]['min'] for i in range(3)])}] * 3
def show_range_by_feature(df, CNN_pred, MF_pred, get_x_func, n=5, feature='redshift',
ycol ='y', save_path=None):
import matplotlib.pyplot as plt
if isinstance(CNN_pred,str): CNN_pred = df[CNN_pred]
if isinstance(MF_pred, str): MF_pred = df[MF_pred]
percs = np.linspace(0., 1., n+1)[:n] + 0.5 / n
cols = ["MF_TP", "MF_FN", "CNN_TP", "CNN_FN"]
col2idxs = {"MF_TP": df[df[ycol] & MF_pred].index, "MF_FN": df[df[ycol] & ~MF_pred].index,
"CNN_TP": df[df[ycol] & CNN_pred].index, "CNN_FN": df[df[ycol] & ~CNN_pred].index}
rows = ["%.2f%%-tile"%(p * 100) for p in percs]
import ipdb
#ipdb.set_trace()
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=(3 * len(cols), 3 * len(rows)))
for ax, col in zip(axes[0], cols):
ax.set_title(col)
for ax, row in zip(axes[:,0], rows):
ax.set_ylabel(row, rotation=90, size='large')
for row_i, row in enumerate(rows):
for col_i, col in enumerate(cols):
#print(row_i, col_i)
tdf =df.reindex(col2idxs[col])
curr_percentile_val = tdf[feature].quantile(percs[row_i])
r = tdf.loc[tdf[feature].map(lambda x: (x - curr_percentile_val) ** 2).idxmin()]
print("Cutout {} has features: Mvir={:.1e} redshift={} rvir={}".format(r['cutout_id'], r['Mvir'], r['redshift'],
r['rvir']))
axes[row_i, col_i].imshow(false_color(get_x_func(r['cutout_id'])), cmap='gray')
axes[row_i, col_i].set_xlabel("%s=%f"%(feature, r[feature]))
fig.tight_layout()
if save_path is not None: plt.savefig(save_path, dpi=500,bbox_inches="tight")
plt.show()
def show_range_by_feature2(df, pred_cols, get_x_func, n=5, feature='redshift', use_log=False,
ycol='y', save_path=None):
import matplotlib.pyplot as plt
print("There are %d such cutouts in total"%(len(df)))
print(
"Below are examples whose %s are at different percentiles (shown on the left side of the left-most column) within these %d cutouts." % (
feature, len(df)))
print("%s increases from top to bottom" % feature)
percs = np.linspace(0., 1., n + 1)[:n] + 0.5 / n
cols , col2idxs = [], {}
for k in pred_cols.keys():
if isinstance(pred_cols[k], str): pred_cols[k] = df[pred_cols[k]]
cols.extend(["%s_TP"%k, "%s_FN"%k])
col2idxs['%s_TP'%k] = df[df[ycol] & (pred_cols[k])].index
col2idxs['%s_FN' % k] = df[df[ycol] & (~pred_cols[k])].index
if feature not in {"redshift", "tSZ", "Mvir", "rvir"}:
cols.extend(["%s_FP" % k, "%s_TN" % k])
col2idxs['%s_FP' % k] = df[(~df[ycol]) & pred_cols[k]].index
col2idxs['%s_TN' % k] = df[(~df[ycol]) & ~pred_cols[k]].index
#else:
#cols.extend(["%s_FP" % k, "%s_TN" % k])
old_cols = cols.copy()
cols = []
for k in old_cols:
if len(col2idxs[k]) > 0:
cols.append(k)
else:
col2idxs.pop(k)
rows = ["%.2f%%-tile" % (p * 100) for p in percs]
import ipdb
# ipdb.set_trace()
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=(3 * len(cols), 3 * len(rows)))
print("from left to right we have in each column {}".format(", ".join(cols)))
for ax, col in zip(axes[0], cols):
ax.set_title(col)
for ax, row in zip(axes[:, 0], rows):
ax.set_ylabel(row, rotation=90, size='large')
cutout_size = 8. / 60
cutouts, rs = {}, {}
for row_i, row in enumerate(rows):
for col_i, col in enumerate(cols):
# print(row_i, col_i)
tdf = df.reindex(col2idxs[col])
curr_percentile_val = tdf[feature].quantile(percs[row_i])
r = tdf.loc[tdf[feature].map(lambda x: (x - curr_percentile_val) ** 2).idxmin()]
#print("Cutout {} has features: Mvir={:.1e} redshift={} rvir={}".format(r['cutout_id'], r['Mvir'], r['redshift'], r['rvir']))
cutouts[(row_i, col_i)], rs[(row_i, col_i)] = get_x_func(r['cutout_id']), r
if use_log:
fc = lambda x: false_color_log(x)
else:
_fc_params = make_false_color_params(cutouts)
fc = lambda x: false_color(x, _fc_params)
for row_i, row in enumerate(rows):
for col_i, col in enumerate(cols):
# print(row_i, col_i)
r = rs[(row_i, col_i)]
axes[row_i, col_i].imshow(fc(cutouts[(row_i, col_i)]), extent=[cutout_size/2, -cutout_size/2, cutout_size/2, -cutout_size/2])
axes[row_i, col_i].tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
if not pd.isnull(r['CL_ra']):
axes[row_i, col_i].scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=200, edgecolors='red', marker='o', facecolors='none')
if feature in ['rvir' ,'redshift', 'Mvir', 'tSZ']:
axes[row_i, col_i].set_xlabel("%s=%.2E" % (feature, r[feature]))
else:
axes[row_i, col_i].set_xlabel("%s=%f" % (feature, r[feature]) + ", Mass=%.2E" % ("Mvir", r["Mvir"]))
fig.tight_layout()
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show()
def show_false_color_2x2(rows, get_x_func, save_path=None, font=DEFAULT_FONT):
plt.rcParams.update({'font.size': font})
cutout_size = 8. / 60
cutouts = {k: get_x_func(rows[k].loc['cutout_id']) for k in rows.keys()}
_fc_params = make_false_color_params(cutouts)
fc = lambda x: false_color(x, _fc_params)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(3 * 2, 3 * 2))
for row_i, CNN_class in enumerate(['TP', 'FN']):
for col_i, MF_class in enumerate(['TP', 'FN']):
key = 'MF %s & CNN %s' % (MF_class, CNN_class)
cplt, r = axes[row_i, col_i], rows[key]
cplt.imshow(fc(cutouts[key]),
extent=[cutout_size / 2, -cutout_size / 2, cutout_size / 2, -cutout_size / 2])
cplt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
if not pd.isnull(r['CL_ra']):
cplt.scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=200, c='red', marker='x', linewidth=3)
if col_i == 0: cplt.set_ylabel('CNN %s'%CNN_class)
if row_i == 0: cplt.set_title('MF %s' % MF_class)
cplt.set_xlabel("S/N=%.2f CNN Prob=%.2f\n Mvir=%.2e $M_\\odot$"%(r['pred_MF'], r['pred'], r['Mvir']), fontsize=font-4)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
#plt.show()
def show_examples_breakchannels(df, title, get_x_func, n=5, feature='redshift', use_log=False,
font=DEFAULT_FONT, save_path=None, additional_df=None):
#df should be filterd already
print("%d cutouts are '%s'"%(len(df), _translate_one(title)))
print("Below are examples whose %s are at different percentiles (shown on the left side of the left-most column) within these %d cutouts."%(feature, len(df)))
print("%s increases from top to bottom"%feature)
#print("Also showing all cutouts information sequntially below:\n")
percs = np.linspace(0., 1., n + 1)[:n] + 0.5 / n
cols = ['90kHz', '148kHz', '219kHz', 'false color combined']
ncols = 4
rows = ["%.2f%%-tile" % (p * 100) for p in percs]
fig, axes = plt.subplots(nrows=len(rows), ncols=ncols, figsize=(3 * ncols, 3 * len(rows)))
for ax, row in zip(axes[:, 0], rows):
ax.set_ylabel(row, rotation=90, size='large')
cutout_size = 8. / 60
cutouts, rs = {}, {}
for row_i, row in enumerate(rows):
curr_percentile_val = df[feature].quantile(percs[row_i])
r = df.loc[df[feature].map(lambda x: (x - curr_percentile_val) ** 2).idxmin()]
print("Cutout {} has features: Mvir={:.1e} redshift={} rvir={}".format(r['cutout_id'], r['Mvir'], r['redshift'], r['rvir']))
cutouts[row_i], rs[row_i] = get_x_func(r['cutout_id']), r
if use_log:
fc = lambda x: false_color_log(x)
sc = lambda x: single_channel_log(x)
else:
print("When not using log scale (now), all false coloring in the same matrix is on the same scale")
_fc_params = make_false_color_params(cutouts)
fc = lambda x: false_color(x, _fc_params)
sc = lambda x: x
for row_i, row in enumerate(rows):
x, r = cutouts[row_i], rs[row_i]
for col_i, col in enumerate(cols):
cplt = axes[row_i, col_i]
if col_i == 3:
cplt.imshow(fc(x), extent=[cutout_size/2, -cutout_size/2, cutout_size/2, -cutout_size/2])
else:
_min = x[:,:,col_i].min()
im = cplt.imshow(sc(x[:,:,col_i]), extent=[cutout_size/2, -cutout_size/2, cutout_size/2, -cutout_size/2], cmap='gray')
cbar = plt.colorbar(im, ax=cplt)
if _min -1 > 0:
cbar.ax.set_ylabel("ln(x+%.2f)"%(_min - 1))
else:
cbar.ax.set_ylabel("ln(x%.2f)" % (_min - 1))
cplt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False,
left=False, labelleft=False)
if not pd.isnull(r['CL_ra']):
cplt.scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=400,
edgecolors='red' if col_i != 3 else 'black',
marker='o', facecolors='none')
if additional_df is not None:
tdf = additional_df[additional_df['cutout_id'] == r['cutout_id']].dropna(subset=['CL_ra'])
if len(tdf) > 1:
for _idx in tdf.index:
_r = tdf.loc[_idx]
if _r['halo_id'] == r['halo_id']: continue
cplt.scatter([_r['CL_ra'] - _r['cutout_ra']], [_r['CL_dec'] - _r['cutout_dec']], s=200,
edgecolors='red' if col_i != 3 else 'black',
marker='o', facecolors='none')
if row_i == 0: cplt.set_title(col, fontsize=font)
if feature in ['rvir' ,'redshift', 'Mvir', 'tSZ']:
cplt.set_xlabel("%s=%.2E" % (feature, r[feature]))
else:
cplt.set_xlabel("%s=%f" % (feature, r[feature]) + ("" if pd.isnull(r["Mvir"]) else ", Mass=%.2E" % (r["Mvir"]) ))
fig.tight_layout()
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show()
def show_range_by_feature_sep(df, pred_cols, get_x_func, n=5, feature='redshift',
ycol='y', save_path=None):
import matplotlib.pyplot as plt
print("There are %d such cutouts" % (len(df)))
percs = np.linspace(0., 1., n + 1)[:n] + 0.5 / n
cols, col2idxs = [], {}
for k in pred_cols.keys():
if isinstance(pred_cols[k], str): pred_cols[k] = df[pred_cols[k]]
cols.extend(["%s_TP" % k, "%s_FN" % k])
col2idxs['%s_TP' % k] = df[df[ycol] & (pred_cols[k])].index
col2idxs['%s_FN' % k] = df[df[ycol] & (~pred_cols[k])].index
if feature not in {"redshift", "tSZ", "Mvir", "rvir"}:
cols.extend(["%s_FP" % k, "%s_TN" % k])
col2idxs['%s_FP' % k] = df[(~df[ycol]) & pred_cols[k]].index
col2idxs['%s_TN' % k] = df[(~df[ycol]) & ~pred_cols[k]].index
# else:
# cols.extend(["%s_FP" % k, "%s_TN" % k])
old_cols = cols.copy()
cols = []
for k in old_cols:
if len(col2idxs[k]) > 0:
cols.append(k)
else:
col2idxs.pop(k)
rows = ["%.2f%%-tile" % (p * 100) for p in percs]
import ipdb
# ipdb.set_trace()
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=(3 * len(cols), 3 * len(rows)))
for ax, col in zip(axes[0], cols):
ax.set_title(col)
for ax, row in zip(axes[:, 0], rows):
ax.set_ylabel(row, rotation=90, size='large')
cutout_size = 8. / 60
cutouts, rs = {}, {}
for row_i, row in enumerate(rows):
for col_i, col in enumerate(cols):
# print(row_i, col_i)
tdf = df.reindex(col2idxs[col])
curr_percentile_val = tdf[feature].quantile(percs[row_i])
r = tdf.loc[tdf[feature].map(lambda x: (x - curr_percentile_val) ** 2).idxmin()]
print("Cutout {} has features: Mvir={:.1e} redshift={} rvir={}".format(r['cutout_id'], r['Mvir'],
r['redshift'],
r['rvir']))
cutouts[(row_i, col_i)], rs[(row_i, col_i)] = get_x_func(r['cutout_id']), r
_fc_params = make_false_color_params(cutouts)
fc = lambda x: false_color(x, _fc_params)
for row_i, row in enumerate(rows):
for col_i, col in enumerate(cols):
# print(row_i, col_i)
r = rs[(row_i, col_i)]
axes[row_i, col_i].imshow(fc(cutouts[(row_i, col_i)]),
extent=[cutout_size / 2, -cutout_size / 2, cutout_size / 2, -cutout_size / 2])
axes[row_i, col_i].tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False,
right=False, left=False, labelleft=False)
if not pd.isnull(r['CL_ra']):
axes[row_i, col_i].scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=200,
edgecolors='red', marker='o', facecolors='none')
if feature in ['rvir', 'redshift', 'Mvir', 'tSZ']:
axes[row_i, col_i].set_xlabel("%s=%.2E" % (feature, r[feature]))
else:
axes[row_i, col_i].set_xlabel("%s=%f" % (feature, r[feature]) + ", Mass=%.2E" % ("Mvir", r["Mvir"]))
fig.tight_layout()
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show()
MARKER_STYLE = dict(color='tab:blue', linestyle=':', marker='o',
markersize=15, markerfacecoloralt='tab:red')
def show_range_by_pred_value(df, get_x_func, nrow=4, ncol=4, feature='redshift', save_path=None, extra_info=[]):
import matplotlib.pyplot as plt
print("There are %d such cutouts, ordered by %s"%(len(df), feature))
n = nrow * ncol
percs = np.linspace(0., 1., n + 1)[:n] + 0.5 / n
import ipdb
# ipdb.set_trace()
fig, axes = plt.subplots(nrows=nrow, ncols=ncol, figsize=(3 * ncol, 3 * nrow))
def _format_row(r):
ss = "%s=%.2f"%(feature, r[feature])
for _f in extra_info:
ss += "\n%s=%.2f"%(_f, r[_f])
return ss
def _format_row_halo(r):
#ss = "log(M)=%.2f, log(tSZ)=%.3f, z=%.2f"%(np.log(r['Mvir'])/np.log(10), np.log(r['tSZ'])/np.log(10), r['redshift'])
ss = "Mass=%.2E, tSZ=%.2E, z=%.2f" % (r['Mvir'], r['tSZ'], r['redshift'])
return ss
cutout_size = 8. / 60
cutouts = {}
rs = {}
for row_i in range(nrow):
for col_i in range(ncol):
tdf = df.reindex()
curr_percentile_val = tdf[feature].quantile(percs[ncol * row_i + col_i])
r = tdf.loc[tdf[feature].map(lambda x: (x - curr_percentile_val) ** 2).idxmin()]
cutouts[(row_i, col_i)] = get_x_func(r['cutout_id'])
rs[(row_i, col_i)] = r
_fc_params = make_false_color_params(cutouts)
fc = lambda x: false_color(x, _fc_params)
for row_i in range(nrow):
for col_i in range(ncol):
r = rs[(row_i, col_i)]
axes[row_i, col_i].imshow(fc(cutouts[(row_i, col_i)]), extent=[cutout_size/2, -cutout_size/2, cutout_size/2, -cutout_size/2])
axes[row_i, col_i].tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
if not pd.isnull(r['CL_ra']):
axes[row_i, col_i].scatter([r['CL_ra'] - r['cutout_ra']], [r['CL_dec'] - r['cutout_dec']], s=200, edgecolors='red', marker='o', facecolors='none')#, linewidth='3')
axes[row_i, col_i].set_xlabel(_format_row(r))
axes[row_i, col_i].set_title(_format_row_halo(r))
fig.tight_layout()
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show()
def plot_features(df, x='redshift', y='rvir', c='tSZ', font=DEFAULT_FONT, ranges={}):
plt.rcParams.update({'font.size': font})
col_to_name = {"rvir": "Virial Radius (Mpc)", "Mvir": "Virial Mass ($M_\\odot$)",
"tSZ":"tSZ (arcmin$^2$)", "redshift":"Redshift"}
cm = plt.cm.get_cmap('RdYlBu')
if c in ranges:
sc = plt.scatter(df[x], df[y], c=df[c], cmap=cm, vmax=ranges[c][1], vmin=ranges[c][0])
else:
sc = plt.scatter(df[x], df[y], c=df[c], cmap=cm)
ylab = col_to_name.get(y,y)
plt.ylabel(ylab)
if y in ranges: plt.ylim(*ranges[y])
xlab = col_to_name.get(x,x)
plt.xlabel(xlab)
if x in ranges: plt.xlim(*ranges[x])
# legend
cbar = plt.colorbar(sc)
#cbar.set_label(c, rotation=270, horizontalalignment='right')
cbar.ax.set_title(col_to_name.get(c,c), fontsize=font - 4)
#cbar.set_title(col_to_name.get(c,c), rotation=270, labelpad=14)
# plt.show()
pass
def plot_relative_loc(df, cutout_size=8./60, font=DEFAULT_FONT):
df['rel_ra'] = df['CL_ra'] - df['cutout_ra']
df['rel_dec'] = df['CL_dec'] - df['cutout_dec']
plt.scatter(df['rel_ra'], df['rel_dec'])
plt.xlabel("Relative RA (degree)", fontsize=font)
plt.xlim((-cutout_size / 2, cutout_size/2))
plt.ylabel("Relative Dec (degree)", fontsize=font, labelpad=0)
plt.ylim((-cutout_size / 2, cutout_size / 2))
#plt.title("Halo dist. within cutout")
def plot_all(df, cutout_size=8./60,save_path=None, font=DEFAULT_FONT, ranges = {}):
# features, relative locations,
f, axes = plt.subplots(1, 2, figsize=(12.5, 5), gridspec_kw={'width_ratios': [1.5, 1]})
plt.rcParams.update({'font.size': font})
"""
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=(3 * len(cols), 3 * len(rows)))
fig.tight_layout()
"""
#f.add_subplot(1, 2, 1)
plt.sca(axes[0])
plot_features(df, font=font, ranges=ranges)
#plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
#f.add_subplot(1, 2, 2)
plt.sca(axes[1])
plot_relative_loc(df, cutout_size=cutout_size)
f.tight_layout()
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
pass
CB_color_cycle = [
#'377eb8',
#'ff7f00',
#'4daf4a',
#'f781bf',
#'a65628',
#'984ea3',
#'999999',
#'e41a1c',
'crimson',
'cyan',
'silver',
'peru',
'dodgerblue',
'purple',
'black'
]
def plot_training_process(result, ratio_map, key='acc', save_path=None, font=DEFAULT_FONT):
plt.rcParams.update({'font.size': font})
colors = CB_color_cycle.copy()
full_spelling = {"acc":"Accuracy", "acc_adj":"Accuracy Adjusted", "loss": "Loss"}
ratio_map_raw = ratio_map.copy()
ratio_map = pd.Series(index=range(max(ratio_map_raw.values()) + 1))
for k in sorted(ratio_map_raw.keys()):
start_idx = 0 if k == 0 else ratio_map_raw[k - 1]
ratio_map[(ratio_map.index < ratio_map_raw[k]) & (ratio_map.index >= start_idx)] = k + 1
if key.startswith("acc"):
acc_adj = key.endswith("adj")
key = 'acc'
val_ser = | pd.DataFrame(result['val']) | pandas.DataFrame |
'''
LICENSE: MIT license
This module can help us know about who can ask when
we have troubles in some buggy codes while solving problems.
'''
from asyncio import gather, get_event_loop
from pandas import DataFrame, set_option
from online_judge import Online_Judge
loop = get_event_loop()
set_option('display.max_colwidth', -1)
class Scoreboard:
'''Handles a dataframe to build up a scoreboard.
Attributes:
problems: (list) A list of problem id which we are tracking.
scoreboard: (Dataframe) A pandas.Dataframe that saves user attempts.
by student id.
online_judge: (Online_Judge) An FOJ api wrapper.
'''
def __init__(self, token, problems, problem_name):
self.problems = problems
self.problem_name = problem_name
self.online_judge = Online_Judge(token)
self.scoreboard = | DataFrame() | pandas.DataFrame |
from bs4 import BeautifulSoup
import requests
import pandas as pd
from datetime import datetime
import time
#cahnge url
url = "https://www.sec.gov/cgi-bin/current?q1=0&q2=0&q3=4"
#url = 'https://www.sec.gov/edgar/searchedgar/companysearch.html'
page = requests.get(url)
data = page.text
soup = BeautifulSoup(data, "lxml")
days_url = []
appended_dataframes = []
for link in soup.find_all('a'):
if 'index' in link.get('href'):
url_to_save = link.get('href')
days_url.append(url_to_save)
for item in days_url:
time.sleep(1)
index ='https://www.sec.gov'+item
index = | pd.read_html(index) | pandas.read_html |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 17:10:19 2016
@author: tkc
"""
import pandas as pd
import numpy as np
import sys, glob
import scipy.stats
import matplotlib.pyplot as plt
import os
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Modules' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Modules')
import Auger_smdifquant_functions as AESsmquant
import Auger_quantmap_functions as QM
from Auger_utility_functions import pickelemsGUI
import Auger_utility_functions as AESutils
from scipy.signal import medfilt
os.chdir('C:\\Temp\\AugerQM')
#%% CREATE PHI FILES FOR AUTOTOOL, SPATIAL AREAS, MULTIPLEX CONDITIONS
# A few of these are also stored in Auger import main (to allow QM data combination prior to quant)
AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv', encoding='utf-8')
AugerParamLog= | pd.read_csv('Augerparamlog.csv', encoding='cp437') | pandas.read_csv |
# Pylint is complaining about duplicated lines, but they are all imports
# pylint: disable=duplicate-code
import shutil
from pathlib import Path
from data_pipeline_api import standard_api
from simple_network_sim import inference
import pandas as pd
import pytest
# Path to directory containing test files for fixtures
FIXTURE_DIR = Path(__file__).parents[0] / "test_data"
def _data_api(base_data_dir, config): # pylint: disable=redefined-outer-name
try:
with standard_api.StandardAPI.from_config(str(base_data_dir / config), uri="", git_sha="") as store:
yield store
finally:
# TODO; remove this once https://github.com/ScottishCovidResponse/SCRCIssueTracking/issues/505 is in prod
try:
(base_data_dir / "access.log").unlink()
except FileNotFoundError:
pass
@pytest.fixture
def short_simulation_dates():
return pd.DataFrame({"Parameter": ["start_date", "end_date"], "Value": ["2020-03-16", "2020-04-16"]})
@pytest.fixture
def data_api(base_data_dir): # pylint: disable=redefined-outer-name
yield from _data_api(base_data_dir, "config.yaml")
@pytest.fixture
def data_api_stochastic(base_data_dir): # pylint: disable=redefined-outer-name
yield from _data_api(base_data_dir, "config_stochastic.yaml")
@pytest.fixture
def base_data_dir():
yield FIXTURE_DIR / "data_pipeline_inputs"
@pytest.fixture
def locations():
yield FIXTURE_DIR / "sampleNodeLocations.json"
@pytest.fixture(autouse=True, scope="session")
def teardown_remove_data():
"""Remove test output created during testing.
Datasets defined in data_pipeline_inputs/config.yaml can't be handled with
pytest's tmp_path, so are cleaned up here. Change these locations as necessary
when the config file changes.
"""
yield
shutil.rmtree(FIXTURE_DIR / "data_pipeline_inputs" / "output", ignore_errors=True)
# Tests may drop access*.yaml files in the fixtures directory
for logpath in (FIXTURE_DIR / "data_pipeline_inputs").glob("access*.yaml"):
shutil.rmtree(logpath, ignore_errors=True)
# Tests may drop access*.log files in the fixtures directory
for logpath in (FIXTURE_DIR / "data_pipeline_inputs").glob("access*.log"):
shutil.rmtree(logpath, ignore_errors=True)
@pytest.fixture
def abcsmc(data_api): # pylint: disable=redefined-outer-name
yield inference.ABCSMC(
data_api.read_table("human/abcsmc-parameters", "abcsmc-parameters"),
data_api.read_table("human/historical-deaths", "historical-deaths"),
data_api.read_table("human/compartment-transition", "compartment-transition"),
data_api.read_table("human/population", "population"),
data_api.read_table("human/commutes", "commutes"),
data_api.read_table("human/mixing-matrix", "mixing-matrix"),
| pd.DataFrame([{"Date": "2020-01-01", "Value": 0.5}]) | pandas.DataFrame |
import matplotlib as mpl
import warnings
warnings.simplefilter(action='ignore', category=mpl.MatplotlibDeprecationWarning)
################################################################################
# System dependencies
################################################################################
import powerlaw
import numpy as np
import pandas as pd
import collections
import networkx as nx
import seaborn as sns
from matplotlib import rc
from functools import reduce
from itertools import cycle
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from powerlaw import plot_pdf, Fit, pdf
from palettable.colorbrewer.diverging import BrBG_11
from palettable.colorbrewer.diverging import BrBG_5
from palettable.colorbrewer.sequential import Blues_3, Reds_3, Greens_3
from palettable.colorbrewer.qualitative import Paired_11, Set2_8, Set1_9, Set3_8, Accent_8, Dark2_8, Set1_6
#https://jiffyclub.github.io/palettable/colorbrewer/sequential/
#https://jiffyclub.github.io/palettable/tableau/#tableaumedium_10
#https://jiffyclub.github.io/palettable/cartocolors/qualitative/
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
################################################################################
# Local dependencies
################################################################################
from org.gesis.lib import utils
from org.gesis.lib import graph
################################################################################
# Constants
################################################################################
MAIN_COLORS = {'min':'#ec8b67', 'maj':'#6aa8cb'}
################################################################################
# Plot setup
################################################################################
def plot_setup(latex=True):
mpl.rcParams.update(mpl.rcParamsDefault)
sns.set_style("white")
if latex:
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.unicode'] = True
lw = 0.8
sns.set_context("paper", rc={"lines.linewidth": lw})
else:
sns.set_context('paper', font_scale=1.2)
################################################################################
# Distributions
################################################################################
def plot_degree_distribution(G):
degree_sequence = sorted([d for n, d in G.degree()], reverse=True) # degree sequence
degreeCount = collections.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())
fig, ax = plt.subplots()
plt.bar(deg, cnt, width=0.80, color="b")
plt.title("Degree Histogram")
plt.ylabel("Count")
plt.xlabel("Degree")
ax.set_xticks([d + 0.4 for d in deg])
ax.set_xticklabels(deg, rotation=90)
def plot_degree_powerlaw(G):
fig,axes = plt.subplots(1,2,figsize=(10,3))
colors = ['blue','orange']
labels = ['Majority','minority']
titles = ['Outdegree', 'Indegree']
for d in [0,1]: #in/ out degree (columns)
for k in [0,1]: #min vs maj
if d:
data = [i for n,i in G.in_degree() if G.node[n][G.graph['label']]==k]
else:
data = [o for n,o in G.out_degree() if G.node[n][G.graph['label']]==k]
fit = powerlaw.Fit(data, discrete=True)
fig = fit.plot_pdf(linewidth=3, color=colors[k], label=labels[k], ax=axes[d])
fit.power_law.plot_pdf(ax=axes[d], color=colors[k], linestyle='--',
label='Power law fit ({})'.format(round(fit.power_law.alpha,1)))
fig.set_ylabel(u"p(X≥x)")
fig.set_xlabel(titles[d])
handles, lbs = fig.get_legend_handles_labels()
fig.legend(handles, lbs, loc=3)
def plot_degree_powerlaw_fit(df_metadata):
metrics = ['outdegree','indegree']#,'pagerank']
fig,axes = plt.subplots(1,len(metrics),figsize=(len(metrics)*4,3))
colors = ['blue','orange']
labels = ['Majority','minority']
title = 'Power-law degree distributions'
for c,metric in enumerate(metrics):
discrete = metric!='pagerank'
for m in df_metadata.minority.unique():
data_emp = df_metadata.query("kind=='empirical' & minority==@m")[metric].values
data_fit = df_metadata.query("kind!='empirical' & minority==@m")[metric].values
ax = axes[c]
emp = powerlaw.Fit(data_emp, discrete=discrete)
emp.power_law.plot_ccdf(ax=ax, color=colors[m], linestyle='solid', label='Empirical {}'.format(round(emp.power_law.alpha,1)))
fit = powerlaw.Fit(data_fit, discrete=discrete)
fit.power_law.plot_ccdf(ax=ax, color=colors[m], linestyle='dotted', label='Model {}'.format(round(fit.power_law.alpha,1)))
ax.set_xlabel(metric)
leg1 = ax.legend(loc=3)
hs = []
for i,color in enumerate(colors):
hs.append(mpatches.Patch(color=color, label=labels[i]))
leg2 = plt.legend(handles=hs, loc=1)
axes[-1].add_artist(leg1)
axes[0].set_ylabel(u"p(X≥x)")
plt.suptitle(title)
def plot_lorenz_curve(df_metadata):
metrics = ['pagerank']
fig,ax = plt.subplots(1,1,figsize=(3,3))
title = 'Vertical Inequality\n(Individual level)'
colors = ['green','lightgreen']
labels = ['Empirical','Model']
markers = ['-','--']
for m, metric in enumerate(metrics):
data_emp = df_metadata.query("kind=='empirical'")[metric].values
lc_emp = utils.lorenz_curve(data_emp)
ax.plot(np.arange(lc_emp.size)/(lc_emp.size-1), lc_emp, linestyle=markers[m], color=colors[0])
data_fit = df_metadata.query("kind!='empirical'")[metric].values
lc_fit = utils.lorenz_curve(data_fit)
ax.plot(np.arange(lc_fit.size)/(lc_fit.size-1), lc_fit, linestyle=markers[m], color=colors[1])
# baseline: equality
ax.plot([0,1], [0,1], linestyle='--', color='grey')
# legend 1: empirical vs model (colors)
hs = []
for i,color in enumerate(colors):
hs.append(mpatches.Patch(color=color, label=labels[i]))
leg1 = plt.legend(handles=hs, loc=2)
# legend 2: metric (marker)
lines = [Line2D([0], [0], color='black', linewidth=1, linestyle=markers[m]) for m,metric in enumerate(metrics)]
leg2 = plt.legend(lines, metrics, loc=3)
ax.add_artist(leg1)
return
def plot_fraction_minorities(df_rank):
fg = sns.catplot(data=df_rank, x='rank', y='fmt', hue='kind',
kind='point', palette='BrBG', height=3, aspect=1, legend_out=False,
estimator=np.mean, ci='sd')
fg.ax.axhline(df_rank.query("kind=='empirical' & rank==100").fmt.values[0], ls='--', c='grey', lw=1.0)
def plot_distribution_min_maj(datae, datam, dataset, model, metric):
colors = ['red','blue'] #sns.color_palette("colorblind")
fig,axes = plt.subplots(1,2,figsize=(10,4))
for minority in [0,1]:
##############
# empirical
##############
# scatter
tmpe = datae.query("dataset==@dataset & minority==@minority").copy().loc[:,metric].astype(int).values
x, y = pdf(tmpe[tmpe>0], linear_bins=False)
ind = y>0
y = y[ind]
x = x[:-1]
x = x[ind]
axes[minority].scatter(x, y, color=colors[0], label='Empirical')
# pdf
plot_pdf(tmpe[tmpe>0], ax=axes[minority], color=colors[0], linewidth=2, label='pdf')
# pdf powelaw fit
if dataset not in ['seventh']:
fit = Fit(tmpe, discrete=True)
fit.power_law.plot_pdf(ax=axes[minority], linestyle='--', color=colors[0], label='power-law')
posy = min(y)
axes[minority].text(color=colors[0], x=1, y=posy, s='Empirical = {} ({},{})'.format(round(fit.power_law.alpha,2), int(fit.power_law.xmin), fit.power_law.xmax ))
##############
# model
##############
# scatter
tmpm = datam.query("dataset==@dataset & kind==@model & minority==@minority").copy().loc[:,metric].astype(int).values
x, y = pdf(tmpm[tmpm>0], linear_bins=False)
ind = y>0
y = y[ind]
x = x[:-1]
x = x[ind]
axes[minority].scatter(x, y, color=colors[1], label=model)
# pdf
plot_pdf(tmpm[tmpm>0], ax=axes[minority], color=colors[1], linewidth=2, label='pdf')
# pdf powelaw fit
if dataset not in ['seventh']:
fit = Fit(tmpm, discrete=True)
fit.power_law.plot_pdf(ax=axes[minority], linestyle='--', color=colors[1], label='power-law')
posy *= 10
axes[minority].text(color=colors[1], x=1, y=posy, s='{} = {} ({},{})'.format(model,round(fit.power_law.alpha,2), int(fit.power_law.xmin), fit.power_law.xmax ))
axes[0].set_title("Majorities")
axes[1].set_title("minorities")
plt.suptitle(dataset.upper() + '-' + metric.lower())
plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1))
plt.show()
plt.close()
def plot_degree_distributions_groups_fit(df_summary_empirical, df_metadata_empirical, df_metadata_fit, model='DPAH', forcepl=False, fn=None):
plt.close()
### main data
metrics = ['indegree', 'outdegree']
discrete = True
labels = {0:'Majority', 1:'minority'}
#datasets = reduce(np.intersect1d, (df_summary_empirical.dataset.unique(),
# df_metadata_empirical.dataset.unique(),
# df_metadata_fit.dataset.dropna().unique()))
datasets = df_summary_empirical.dataset.unique()
### main plot
nrows = len(metrics)
ncols = len(datasets)
fig, axes = plt.subplots(nrows, ncols, figsize=(ncols * 2.6, 5), sharey=False, sharex=False) #3, 4.5
### subplots
colors = sns.color_palette("colorblind")
for col, dataset in enumerate(datasets):
axes[0, col].set_title(dataset)
xye = {}
xym = {}
for row, metric in enumerate(metrics):
### Power-law fit
txt_emp = "Empirical:" + "\n" + r"$\gamma_{M}=$" + "<maj>" + "\n" + r"$\gamma_{m}=$" + "<min>"
txt_fit = model + "\n" + r"$\gamma_{M}=$" + "<maj>" + "\n" + r"$\gamma_{m}=$" + "<min>"
for minority in sorted(df_metadata_fit.minority.unique()):
sum_emp = df_summary_empirical.query("dataset.str.lower()[email protected]()").iloc[0]
data_emp = df_metadata_empirical.query("dataset.str.lower()[email protected]() & minority==@minority")[metric].values.astype(np.float)
data_fit = df_metadata_fit.query("dataset.str.lower()[email protected]() & minority==@minority & kind==@model")[metric].values.astype(np.float)
#DPAH
# outdegree | indegree
# APS 4 30 | 9 180
# BLOGS 16 180 | 35 600
# HATE 14 400 | 5 120
# SEVENTH
# WIKIPEDIA 5 30 | 10 200
#EMPIRICAL
# outdegree | indegree
# APS 4 14 | 1 20
# BLOGS 6 70 | 3 120
# HATE 4 150 | 1 50
# SEVENTH
# WIKIPEDIA 10 50 | 7 200
minmax = {'empirical':
{'indegree':
{'aps':{'xmin':1, 'xmax':20},
'blogs':{'xmin':3, 'xmax':120},
'hate':{'xmin':1, 'xmax':50},
'wikipedia':{'xmin':7, 'xmax':200}},
'outdegree':
{'aps':{'xmin':4, 'xmax':14},
'blogs':{'xmin':6, 'xmax':70},
'hate':{'xmin':4, 'xmax':150},
'wikipedia':{'xmin':10, 'xmax':50}}},
'DPAH':
{'indegree':
{'aps':{'xmin':9, 'xmax':180},
'blogs':{'xmin':35, 'xmax':600},
'hate':{'xmin':5, 'xmax':120},
'wikipedia':{'xmin':10, 'xmax':200}},
'outdegree':
{'aps':{'xmin':4, 'xmax':30},
'blogs':{'xmin':16, 'xmax':180},
'hate':{'xmin':14, 'xmax':400},
'wikipedia':{'xmin':5, 'xmax':30}}}}
### Empirical:
try:
label = '{} empirical'.format(labels[minority])
if forcepl and 'empirical' in minmax and dataset.lower() in minmax['empirical'][metric]:
xmin = minmax[model][metric][dataset.lower()]['xmin']
xmax = minmax[model][metric][dataset.lower()]['xmax']
fit_emp = graph.fit_power_law_force(data_emp, discrete=discrete, xmin=xmin, xmax=xmax)
fit_emp.power_law.plot_pdf(ax=axes[row, col], linestyle='-', color=colors[minority], label=label)
else:
fit_emp = graph.fit_power_law(data_emp, discrete=discrete)
fit_emp.power_law.plot_pdf(ax=axes[row, col], linestyle='-', color=colors[minority], label=label)
txt_emp = txt_emp.replace("<min>" if minority else "<maj>", str(round(fit_emp.power_law.alpha,1)))
except Exception as ex:
print(ex)
print('?')
pass
### Model:
try:
if data_fit.shape[0] > 0:
label = '{} {}'.format(labels[minority], model)
if forcepl and model in minmax and dataset.lower() in minmax[model][metric]:
xmin = minmax[model][metric][dataset.lower()]['xmin']
xmax = minmax[model][metric][dataset.lower()]['xmax']
fit_mod = graph.fit_power_law_force(data_fit, discrete=discrete, xmin=xmin, xmax=xmax)
fit_mod.power_law.plot_pdf(ax=axes[row, col], linestyle='--', color=colors[minority], label=label)
else:
fit_mod = graph.fit_power_law(data_fit, discrete=discrete)
fit_mod.power_law.plot_pdf(ax=axes[row, col], linestyle='--', color=colors[minority], label=label)
txt_fit = txt_fit.replace("<min>" if minority else "<maj>", str(round(fit_mod.power_law.alpha)))
except:
pass
### Exponents
if row == 0:
# indegree
xye[metric] = {'aps': (40, 0.5),
'hate': (30, 0.5),
'blogs': (150, 0.05),
'wikipedia': (40, 0.4)}
xym[metric] = {'aps': (2, 0.0002),
'hate': (3, 0.001),
'blogs': (32, 0.00025),
'wikipedia': (2, 0.0002)}
else:
# outdegree
xye[metric] = {'aps': (25, 0.7),
'hate': (50, 0.4),
'blogs': (80, 0.25),
'wikipedia': (25, 0.6)}
xym[metric] = {'aps': (4, 0.0001),
'hate': (2, 0.000015),
'blogs': (18, 0.0001),
'wikipedia': (7, 0.0002)}
### Column name (dataset)
axes[row, col].text(s=txt_emp, x=xye[metric][dataset.lower()][0], y=xye[metric][dataset.lower()][1], horizontalalignment='left', va='top')
axes[row, col].text(s=txt_fit, x=xym[metric][dataset.lower()][0], y=xym[metric][dataset.lower()][1], horizontalalignment='left', va='top')
### y-label right
if col == ncols - 1:
xt = axes[row, col].get_xticks()
yt = axes[row, col].get_yticks()
axes[row, col].text(s=metric,
x=700 if row == 0 else 78,
y=0.003 , rotation=-90, va='center')
### legend
width = 4*1.1
row = 0
col = int(axes.shape[1] / 2)
axes[row,col].legend(loc='lower left',
bbox_to_anchor=(width/-1.8, 1.12, width, 0.2), mode='expand',
ncol=4, handletextpad=0.3, frameon=False)
### ylabel left
ylabel = 'P(x)'
row = int(axes.shape[0] / 2)
col = 0
if nrows % 2 != 0:
axes[row, col].set_ylabel(ylabel)
else:
xt = axes[row, col].get_xticks()
yt = axes[row, col].get_yticks()
axes[row, col].text(min(xt) * 10,
max(yt) / 15,
ylabel, {'ha': 'center', 'va': 'center'}, rotation=90)
### xlabel
xlabel = 'Degree'
row = -1
col = int(axes.shape[1] / 2)
if ncols % 2 != 0:
axes[row, col].set_xlabel(xlabel)
else:
xt = axes[row, col].get_xticks()
yt = axes[row, col].get_yticks()
axes[row, col].text(min(xt) * 10,
min(yt) * 4.7,
xlabel, {'ha': 'center', 'va': 'center'}, rotation=0)
### space between subplots
plt.subplots_adjust(hspace=0.2, wspace=0.32)
### Save fig
if fn is not None:
fig.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
###
plt.show()
plt.close()
def plot_gini_density_distribution(df, title, fn=None):
x = 'd'
y = 'gini'
metrics = ['PageRank','WTF']
colors = ['red', 'blue']
bps = []
fig,ax = plt.subplots(1,1,figsize=(6,4))
for metric,color in zip(*(metrics,colors)):
tmp = df.query("metric==@<EMAIL>() & rank==100", engine='python').copy()
labels, data = zip(*[(name, tmp[y]) for name, tmp in tmp.groupby(x)])
tmp = ax.boxplot(data)
for box in tmp['boxes']:
box.set(color=color, linewidth=3)
bps.append(tmp)
### details
ax.set_title(title)
ax.set_xlabel('Edge density')
ax.set_ylabel('Inequality\n(Gini coef. of entire rank distribution)')
ax.set_xticklabels(labels)
ax.set_ylim((0-0.03,1+0.03))
ax.legend([bp['boxes'][0] for bp in bps], metrics, loc='upper right')
### vertical baselines
ax.axhline(y=0.3, ls='--', color='darkgrey', lw=0.5)
ax.axhline(y=0.6, ls='--', color='darkgrey', lw=0.5)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fig.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
###
plt.show()
plt.close()
################################################################################
# Ranking Empirical
################################################################################
def plot_vh_inequalities_empirical(df_rank, graph_fnc=None, datapath=None, vtype='mae', metric=None, fn=None):
tmp = df_rank.query("kind=='empirical' & metric in ['pagerank','wtf']").copy()
if metric != 'all':
tmp = df_rank.query("metric==@metric").copy()
### plot setup
r = 3 # rows
c = tmp.dataset.nunique() # columns (datasets)
w = 2.2 # width cell
h = 2 # height cell
lw = 1 # line width plot
blw = 0.8 # line witdth baselines
### graph setup
nsize = 1 # node size
ecolor = '#c8cacc' # edge color
ewidth = 0.1 # thickness of edges
asize = 1 # size of edge arrow (viz)
### https://xkcd.com/color/rgb/
colors = sns.xkcd_palette(["medium green", "medium purple"])
lightcolors = sns.xkcd_palette(["light green", "light purple"])
### plot
fig, axes = plt.subplots(r,c,figsize=(c*w, r*h),sharex=False, sharey=False)
counter = 0
for mc, met in enumerate(tmp.metric.unique()):
counter -= 0.15
### graph, individual and vertical inequalities (row 0, row 1, row 2)
for i, (dataset, df_group) in enumerate(tmp.query("metric==@met").groupby("dataset")):
# graph
if mc == 0:
try:
g = graph_fnc(datapath,dataset)
g = g.subgraph(max(nx.connected_components(g.to_undirected()), key=len))
ncolor = [MAIN_COLORS['min'] if obj[g.graph['label']] else MAIN_COLORS['maj']
for n,obj in g.nodes(data=True)]
nx.draw(g,
pos = nx.nx_pydot.graphviz_layout(g, prog='neato'),
edge_color=ecolor,
node_size=nsize,
node_color=ncolor,
width=ewidth,
arrows=True,
arrowsize=asize,
with_labels=False,
ax=axes[0,i])
except Exception as ex:
print(ex)
axes[0,i].axis('off')
axes[0,i].set_title(dataset.title() if dataset!='aps' else 'APS')
# inequalities
df_group = df_group.sort_values("rank")
axes[1,i].plot(df_group['rank'], df_group['gt'], color=colors[mc], label=met, linewidth=lw) # individual: gini
axes[2,i].plot(df_group['rank'], df_group['fmt'], color=colors[mc], label=met, linewidth=lw) # group: % of min
# baseline (gini_all)
#axes[1,i].axhline(df_group['gini'].unique(), c=lightcolors[mc], ls='--', lw=blw)
axes[1,i].text(x=10, y=1.0+counter, s="Gini$_{}={}$".format('{all}',round(df_group['gini'].unique()[0],2)), color=colors[mc], zorder=100)
# baseline (fraction of min in network)
axes[2,i].axhline(df_group['fm'].unique(), c='grey', ls='--', lw=blw)
axes[2,i].text(x=50, y=1.0+counter, s="{}: {}".format(vtype.upper(),round(df_group[vtype].unique()[0],2)), color=colors[mc], zorder=100)
axes[1,0].set_ylabel("Gini\nin top-k\%")
axes[2,0].set_ylabel("\% of minorities\nin top-k\%")
axes[1,c-1].legend(loc='lower right')
# Labels
for i in np.arange(0,c):
axes[1,i].set_xlabel('')
axes[1,i].set_xticklabels("")
axes[1,i].set_ylim((-0.1,1.1))
axes[2,i].set_ylim((-0.1,1.1))
axes[1,i].set_xlim((5,100))
axes[2,i].set_xlim((5,100))
axes[2,i].set_xticks([20,50,80])
if i>0:
axes[1,i].set_yticklabels("")
axes[2,i].set_yticklabels("")
if c%2 != 0:
axes[2,i].set_xlabel('' if (i!=int(c/2)) else "Top-k\% PageRank")
### border color
for ax in axes.flatten():
ax.tick_params(color='grey', labelcolor='grey')
for spine in ax.spines.values():
spine.set_edgecolor('grey')
### Save fig
plt.subplots_adjust(hspace=0.05, wspace=0.1)
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print("{} saved!".format(fn))
plt.show()
plt.close()
return
def plot_vh_inequalities_empirical_summary(df_rank, x='mae', fn=None):
if x not in ['mae','me']:
raise Exception('invalid x-axis (horizontal ineq.)')
### only man data points
tmp = df_rank.groupby(['dataset','kind','metric']).mean().reset_index()
tmp.drop(columns=['rank', 'fmt'], inplace=True)
### main plot
tmp.sort_values(by=["dataset","metric"], inplace=True)
fg = sns.catplot(data=tmp,
x=x, y='gini',
height=2.0,aspect=0.9,
hue='metric', col='dataset')
[plt.setp(ax.texts, text="") for ax in fg.axes.flat]
fg.set_titles(row_template='{row_name}', col_template='{col_name}')
### labels and xticks
for ax in fg.axes.flatten():
ax.set_xlabel("")
ax.set_ylabel("")
# xticks
#ax.xaxis.set_major_locator(plt.MaxNLocator(3))
#ax.xaxis.set_minor_locator(plt.MaxNLocator(10))
# xticklabels
xtls = ax.get_xticklabels()
ax.set_xticklabels([round(float(xtl.get_text()), 2) for i, xtl in enumerate(xtls)], rotation=0)
#ax.axvline(x=0 if x=='me' else 0.5, ls='--', c='black', lw=0.5)
#ax.axhline(y=0.5, ls='--', c='black', lw=0.5)
### ylabel
ylabel = 'Gini'
if fg.axes.shape[0] % 2 != 0:
fg.axes[int(fg.axes.shape[0] / 2), 0].set_ylabel(ylabel)
else:
fg.axes[int(fg.axes.shape[0] / 2), 0].text(-50, 0.28, ylabel, {'ha': 'center', 'va': 'center'}, rotation=90)
### xlabel
xlabel = x.upper() #'MAE of fraction of minorities in top-k%'
if fg.axes.shape[1] % 2 != 0:
fg.axes[-1, int(fg.axes.shape[1] / 2)].set_xlabel(xlabel)
else:
fg.axes[-1, int(fg.axes.shape[1] / 2)].text(0, 0.15, xlabel, {'ha': 'center', 'va': 'center'}, rotation=0)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fg.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
def plot_vh_inequalities_per_dataset_and_metric(df_metadata, df_rank, df_summary, fn=None):
plt.close()
metrics = ['pagerank','wtf']
colors = sns.xkcd_palette(["medium green", "medium purple"])
### Main figure
nrows = 2
ncols = df_summary.dataset.nunique()
fig, axes = plt.subplots(nrows, ncols, figsize=(ncols * 2.5, 5), sharey=True)
### curves (per dataset and metric)
df_summary.sort_values("dataset", inplace=True)
for col, dataset in enumerate(df_summary.dataset.unique()):
### title (dataset)
axes[0,col].set_title(dataset)
### vertical inequality (gini)
row = 0
tmp = df_metadata.query("dataset.str.lower()[email protected]()").copy()
for i,metric in enumerate(metrics):
X = tmp[metric].astype(np.float).values
X_lorenz = utils.lorenz_curve(X)
gc = round(utils.gini(X), 2)
axes[row, col].plot(np.arange(X_lorenz.size) / (X_lorenz.size - 1), X_lorenz, label=metric, color=colors[i])
axes[row, col].text(s=r'$Gini='+str(gc)+'$', x=0, y=0.9 if metric == metrics[0] else 0.8, color=colors[i])
axes[row, 0].set_ylabel('Fraction of total wealth\nin Bottom-k%')
axes[row, col].set_xlabel('')
axes[row, col].plot([0,1],[0,1],linestyle='--',color='grey')
axes[row, col].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[row, col].xaxis.set_minor_locator(plt.MaxNLocator(10))
axes[row, col].set_xlim(0-0.05, 1+0.05)
### horizontal inequality (groups)
row = 1
tmp = df_rank.query("dataset.str.lower()[email protected]()").copy()
for i, metric in enumerate(metrics):
tmp_m = tmp.query("metric==@metric").copy()
if tmp_m.shape[0] == 0:
continue
tmp_m.loc[:, 'rank'] = tmp['rank'].apply(lambda x: x / 100)
tmp_m.sort_values("rank", inplace=True)
tmp_m.plot(x='rank', y='fmt', ax=axes[row, col], label=metric, legend=col==ncols-1, color=colors[i])
if col==ncols-1:
axes[row, col].legend(loc='center right')
fm = df_summary.query("dataset.str.lower()==@<EMAIL>()").fm.unique()[0]
_ = axes[row, col].axhline(fm, c='grey', ls='--')
d = tmp_m.me.unique()[0]
_ = axes[row, col].text(s='$ME='+str(round(d, 3))+'$',
x=0, # tmp['rank'].values[1],
y=0.9 if metric == metrics[0] else 0.8, color=colors[i])
axes[row, 0].set_ylabel('Fraction of minorities\nin Top-k rank %')
axes[row, col].set_xlabel('')
axes[row, col].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[row, col].xaxis.set_minor_locator(plt.MaxNLocator(10))
axes[row, col].set_xlim(0 - 0.05, 1 + 0.05)
### xlabel
xlabels = ['Bottom-k% of nodes', 'Top-k rank %']
col = int(axes.shape[1] / 2)
for row, xlabel in enumerate(xlabels):
if ncols % 2 != 0:
axes[row, col].set_xlabel(xlabel)
else:
xt = axes[row, col].get_xticks()
yt = axes[row, col].get_yticks()
axes[row, col].text(min(xt) + 0.3,
min(yt) - 0.1,
xlabel, {'ha': 'center', 'va': 'center'}, rotation=0)
### space between subplots
plt.subplots_adjust(hspace=0.35, wspace=0.1)
### savefig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
################################################################################
# Ranking Fit
################################################################################
def plot_inequalities_fit_improved(df_best_fit, df_empirical, models, markers, valid_metrics=None, vtype='mae', fn=None):
### attribute for horizontal inequality
_, mini, mid, _ = setup_plot_HI_simplified(vtype)
label = vtype.upper()
### datasets (hue1) and metrics (columns)
datasets = df_empirical.dataset.unique().categories #sorted(df_best_fit.dataset.str.lower().unique())
metrics = sorted(df_best_fit.metric.unique())
metrics = metrics if valid_metrics is None else [m for m in metrics if m in valid_metrics]
### init plot
ncol = len(metrics)
nrow = 1
colors = cycle(Set1_9.mpl_colors)
x, y = vtype, 'gini'
xmin, xmax = -1,1 #df_best_fit[x].min(), df_best_fit[x].max()
ymin, ymax = 0,1 #df_best_fit[y].min(), df_best_fit[y].max()
fig,axes = plt.subplots(nrow, ncol, figsize=(2.2*ncol, 2.2), sharey=True, sharex=True)
### scatter plot
tmp_emp = df_empirical.groupby(['dataset','metric','kind']).mean().reset_index()
for h, dataset in enumerate(datasets):
color = next(colors)
for c, metric in enumerate(metrics):
### empirical
tmp = tmp_emp.query("metric == @metric & dataset.str.lower() == @dataset")
axes[c].scatter(x=tmp[x], y=tmp[y], color=color, marker='s', label=dataset)
### synthetic
tmp = df_best_fit.query("metric == @metric & dataset.str.lower() == @dataset")
axes[c].scatter(x=tmp[x], y=tmp[y], color=color, marker=markers[models.index(tmp.kind.iloc[0])], label=None)
### visuals
axes[c].set_xlim((xmin-0.03, xmax+0.03))
axes[c].set_ylim((ymin-0.03, ymax+0.03))
axes[c].set_ylabel(y.title() if c==0 else '')
axes[c].set_xlabel(label)
axes[c].set_title(metric.upper())
for i in np.arange(mini,1.0+0.1,0.1):
i = round(i,1)
axes[c].axhline(y=i, lw=0.5, ls='-', c='#FAF8F7', zorder=0)
axes[c].axvline(x=i, lw=0.5, ls='-', c='#FAF8F7', zorder=0)
axes[c].axhline(y=0.3, lw=0.5, ls='--', c='grey', zorder=0)
axes[c].axhline(y=0.6, lw=0.5, ls='--', c='grey', zorder=0)
if vtype == 'mae':
axes[c].axvline(x=0.5, lw=0.5, ls='--', c='grey', zorder=0)
else:
smooth = 0.05
axes[c].axvline(x=0.0+smooth, lw=0.5, ls='--', c='grey', zorder=0)
axes[c].axvline(x=0.0-smooth, lw=0.5, ls='--', c='grey', zorder=0)
### legend 1 (datasets)
legend1 = axes[-1].legend(bbox_to_anchor=(1.04,1), borderaxespad=0, title='Dataset')
### legend 2 (empirical, model)
labels = [m for m in models if m in df_best_fit.kind.unique()]
h = [plt.plot([],[], color="black", marker=markers[models.index(m)], ls="")[0] for m in labels]
axes[-1].legend(handles=h, labels=labels, bbox_to_anchor=(2.14,1), borderaxespad=0, title="Model", frameon=True)
plt.gca().add_artist(legend1)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
def plot_inequalities_fit(df_fit, df_empirical, models, markers, vtype='mae', fn=None):
vat, mini, mid, color = setup_plot_HI_simplified(vtype)
### data & same datasets (uppercase)
metrics = ['pagerank','wtf'] #, 'indegree','outdegree']
datasets = sorted([d.lower() for d in df_empirical.dataset.unique()])
### best model
# calculating gm (sum of absolute differences between model and empirical gini and mae)
data = df_fit.query("metric in @metrics & not gt.isnull()", engine='python').copy()
data.loc[:,'dataset'] = data.apply(lambda row: [d for d in datasets if d.lower()==row.dataset][0] ,axis=1)
data.loc[:,'gm'] = data.apply(lambda row:
abs(row['gt'] - _get_mean_val_from_df(df_empirical, 'gt', row)) +
abs(row[vat] - _get_mean_val_from_df(df_empirical, vat, row)) , axis=1)
data = data.groupby(['dataset','kind','metric']).mean().reset_index()
# searching for smalles gm
idx = data.groupby(['metric','dataset']).apply(lambda data:data.gm.abs().argmin())
data = data.loc[idx, :]
# getting empirical mean values
tmp = df_empirical.groupby(['metric','kind','dataset']).mean().reset_index()
tmp.loc[:, 'gm'] = None
cols = tmp.columns
# combining empirical and best model
data = data[cols].append(tmp[cols], ignore_index=True)
### colors
colors = cycle(Set1_9.mpl_colors)
me = 's'
mm = set()
### plot per metric / model / dataset
fig,axes = plt.subplots(1,len(metrics),figsize=(len(metrics)*3.7,3),sharex=True,sharey=True)
for dataset in datasets:
color = next(colors)
for c, metric in enumerate(metrics):
axes[c].set_title(metric.upper())
# empirical
tmp = data.query("dataset==@dataset & metric==@metric & kind=='empirical'")
axes[c].scatter(x=tmp[vat], y=tmp.gini, color=color, label=dataset, marker=me)
# model
for m,model in zip(*(markers,models)):
tmp = data.query("dataset==@dataset & metric==@metric & kind==@model")
if tmp.shape[0] > 0:
axes[c].scatter(x=tmp[vat], y=tmp.gini, color=color, marker=m, s=200, label=None)
mm.add(model)
# visuals
axes[c].set_xlabel("Horizontal Inequality\n({} fraction of minorities in top-k rank)".format(vtype.upper()))
axes[c].set_ylabel("")
axes[c].set_ylim((0-0.03,1+0.03))
axes[c].set_xlim((mini-0.03,1+0.03))
for i in np.arange(mini,1.0+0.1,0.1):
i = round(i,1)
axes[c].axhline(y=i, lw=0.5 if i!=0.5 else 1, ls='--', c='lightgrey' if i!=0.5 else 'black', zorder=0)
axes[c].axvline(x=i, lw=0.5 if i!=mid else 1, ls='--', c='lightgrey' if i!=mid else 'black', zorder=0)
### general visuals
axes[0].set_ylabel("Vertical Inequality\n(Gini of rank distribution)")
### legend 1 (datasets)
legend1 = axes[-1].legend(bbox_to_anchor=(1.04,1), borderaxespad=0, title='Dataset')
### legend 2 (empirical, model)
h = [plt.plot([],[], color="black", marker=markers[models.index(m)], ls="")[0] for m in mm]
axes[-1].legend(handles=h, labels=mm, bbox_to_anchor=(1.04,0 if len(metrics)==2 else -0.2),
loc='lower left',
title="Model", frameon=True, borderaxespad=0)
plt.gca().add_artist(legend1)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
def plot_vh_inequalities_fit(df_rank, x='mae', group=False, kind=['empirical','DPAH'], metrics='all', fn=None):
if x not in ['mae','me']:
raise Exception('Invalid x-axis (horizontal ineq.)')
datasets = df_rank.dataset.unique()
models = df_rank.kind.unique()
### only main data points
if metrics == 'all':
metrics = ['pagerank', 'wtf']
tmp = df_rank.query("kind in @kind & metric in @metrics").copy()
tmp = tmp.groupby(['dataset','kind','metric','epoch']).mean().reset_index()
tmp.drop(columns=['rank', 'fmt'], inplace=True)
tmp.loc[:,'dataset'] = tmp.loc[:,'dataset'].str.lower()
if group:
tmp = tmp.groupby(['dataset','kind','metric']).mean().reset_index()
### main plot
nrows = tmp.metric.nunique()
ncols = tmp.dataset.nunique()
fig, axes = plt.subplots(nrows, ncols, figsize=(ncols * 2., nrows * 2.), sharey=True, sharex=True)
### subplots
#colors = sns.color_palette() #"tab20")
colors = ['black'] + Set1_6.mpl_colors
for col, dataset in enumerate(datasets):
ax = axes[0, col] if nrows > 1 else axes[col]
ax.set_title(dataset)
for row, metric in enumerate(tmp.metric.unique()):
### y-label right
if nrows > 1:
if col == ncols-1:
ax = axes[row, col] if nrows > 1 else axes[col]
ax.text(s=metric,
x=0.9,# if nrows >1 else 0.34,
y=0.5+(len(metric)*0.018),# if nrows >1 else 0.46,
rotation=-90)
for hue, kind in enumerate(models):
data = tmp.query("dataset==@dataset & metric==@metric & kind==@kind").copy()
ax = axes[row, col] if nrows > 1 else axes[col]
ax.scatter(y=data.gini.values, x=data[x].values, label=kind, color=colors[hue],
marker='x' if kind!='empirical' else 'o',
zorder=1000 if kind == 'empirical' else 1)
### legend
ax = axes[0, -1] if nrows > 1 else axes[-1]
ax.legend(bbox_to_anchor=(1.18 if nrows>1 else 1.05,1), borderaxespad=0)
### baseline
for ax in axes.flatten():
ax.axhline(y=0.3, ls='--', color='darkgrey', lw=0.5)
ax.axhline(y=0.6, ls='--', color='darkgrey', lw=0.5)
ax.set_title(ax.get_title().title() if ax.get_title() != 'aps' else 'APS')
if x=='mae':
ax.axvline(x=0.5, ls='--', color='lightgrey', lw=0.5)
elif x=='me':
beta = 0.05
ax.axvline(x=0.0-beta, ls='--', color='lightgrey', lw=0.5)
ax.axvline(x=0.0+beta, ls='--', color='lightgrey', lw=0.5)
### ylabel left
ylabel = 'Inequality\n(Gini coef. of entire rank distribution)'
#ylabel = 'Individual Inequality\n(Gini coef. of entire rank distribution)'
if nrows % 2 != 0:
ax = axes[int(axes.shape[0]/2), 0] if nrows > 1 else axes[0]
ax.set_ylabel(ylabel)
else:
ax = axes[int(axes.shape[0] / 2), 0] if nrows > 1 else axes[0]
ax.text(-0.85 if not group else -0.85,
1.1,
ylabel, {'ha': 'center', 'va': 'center'}, rotation=90)
### xlabel
xlabel = 'Inequity\n({} error of fraction of minorities across all top-k\% rank)'.format('Mean' if x=='me' else 'Mean absolute')
#xlabel = 'Group Inequality\n({} error of fraction of minorities across all top-k\%'.format('Mean' if x=='me' else 'Mean absolute')
if ncols % 2 != 0:
ax = axes[-1, int(axes.shape[1]/2)] if nrows > 1 else axes[int(axes.shape[0]/2)]
ax.set_xlabel(xlabel)
else:
ax = axes[-1, int(axes.shape[1] / 2)] if nrows > 1 else axes[int(axes.shape[0] / 2)]
ax.text(-0.20,
-0.1 if not group else 0.05,
xlabel, {'ha': 'center', 'va': 'center'}, rotation=0)
### limits
#for ax in axes.flatten():
# smooth=0.03
# mi=0 if x=='mae' else -1
# ax.set_ylim(0.0-smooth,1.0+smooth)
# ax.set_xlim(mi-smooth,1.0+smooth)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fig.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
################################################################################
# Ranking Synthetic
################################################################################
def setup_plot_HI_simplified(vtype='mae'):
x01 = ['mae','aefmt']
x11 = ['me','efmt']
pos = vtype in x01
neg = vtype in x11
vat = vtype #'aefmt' if vtype == 'mae' else 'efmt' if vtype == 'me' else None
mini = 0 if pos else -1 if neg else None
mid = 0.5 if pos else 0.0 if neg else None
#color = "YlGnBu" if pos else "YlOrRd" if neg else None
color = "YlGnBu" if pos else "RdBu" if neg else None
return vat, mini, mid, color
def plot_inequalities_simplified_by_model(df, model, metric, vtype='mae', title=False, fn=None):
vat, mini, mid, color = setup_plot_HI_simplified(vtype)
### data
data = df.query("metric == @metric & kind == @model").copy()
### x,y axis
values = ['gini', vat]
nrows = len(values)
ncols = data.fm.nunique()
ind = 'hMM'
col = 'hmm'
### plot
fig,axes = plt.subplots(nrows,ncols,figsize=(ncols*2,nrows*2),sharey=True,sharex=True)
for c,fm in enumerate(sorted(data.fm.unique())):
tmp = data.query("fm==@fm")
r = 0
for ax, value in zip(*(axes,values)):
vmin = mini if value == vat else 0
label = vtype.upper() if value == vat else value.upper()
cmap = color if value == vat else "YlGnBu"
ax = sns.heatmap(tmp.pivot_table(index=ind,columns=col,values=value,aggfunc=np.mean),
cmap=cmap,
vmin=vmin,
vmax=1,
ax=axes[r,c],
cbar=c==ncols-1,
cbar_kws={'label': label, 'use_gridspec':False, 'anchor':(2.2,2.2)})
ax.set_title("fm = {}".format(fm) if r==0 else '')
ax.set_xlabel(col if r==nrows-1 and c==int(ncols/2.) else '')
ax.set_xticklabels(ax.get_xticklabels() if r==nrows-1 else [], rotation=0)
ax.set_ylabel(ind if c==0 else '')
ax.set_aspect('auto')
ytl = ax.get_yticklabels()
if c == 0 and len(ytl)>0:
ax.set_yticklabels(ytl, rotation=0)
if c==0:
y = ax.get_yticklabels()
try:
cbar = ax.collections[0].colorbar
cbar.set_label(label, ha='right', rotation=-90, va='center')
except:
pass
r += 1
if title:
_ = fig.suptitle(model, y=1.)
### space between subplots
plt.gca().invert_yaxis()
plt.subplots_adjust(hspace=0.1, wspace=0.12)
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
def plot_inequalities_simplified(df, models, metric, vtype='mae', fm=None, sym=True, title=False, fn=None):
vat, mini, mid, color = setup_plot_HI_simplified(vtype)
### data
s = "hmm == hMM" if sym else "fm == @fm"
s = "metric == @metric & {}".format(s)
data = df.query(s).copy()
if sym:
data.rename(columns={'hmm':'h'}, inplace=True)
### x,y axis
values = ['gini', vat]
nrows = len(values)
ncols = len(models)
ind = 'fm' if sym else 'hMM'
col = 'h' if sym else 'hmm'
### plot
fig,axes = plt.subplots(nrows,ncols,figsize=(ncols*2,nrows*2),sharey=True,sharex=not sym)
for c,model in enumerate(models):
tmp = data.query("kind==@model")
r = 0
for ax, value in zip(*(axes,values)):
vmin = mini if value == vat else 0
label = vtype.upper() if value == vat else value.upper()
cmap = color if value == vat else "YlGnBu"
ax = sns.heatmap(tmp.pivot_table(index=ind,columns=col,values=value,aggfunc=np.mean),
cmap=cmap, vmin=vmin, vmax=1,
ax=axes[r,c],
cbar=c==ncols-1,
cbar_kws={'label': label, 'use_gridspec':False, 'anchor':(2.2,2.2)})
ax.set_title(model if r==0 else '')
ax.set_xlabel(col if r==nrows-1 and c==int(ncols/2.) else '')
ax.set_xticklabels(ax.get_xticklabels() if r==nrows-1 else [], rotation=0)
ax.set_ylabel(ind if c==0 else '')
ax.set_aspect('auto')
ytl = ax.get_yticklabels()
if c == 0 and len(ytl)>0:
ax.set_yticklabels(ytl, rotation=0)
if c==0:
y = ax.get_yticklabels()
try:
cbar = ax.collections[0].colorbar
cbar.set_label(label, ha='right', rotation=-90, va='center')
except:
pass
r += 1
if title:
_ = fig.suptitle("Symmetric Homophily" if sym else "hMM vs. hmm [fm={}]".format(fm), y=1.)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
plt.gca().invert_yaxis()
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
def plot_inequalities(df, models, markers, vtype='mae', mean=False, metric="pagerank", empirical=None, title=None, fn=None):
vat, mini, mid, color = setup_plot_HI_simplified(vtype)
title = 'Model' if title is None else title
### data
data = df.query("metric == @metric").copy()
if mean:
data = data.groupby(['kind','N','fm','d','hmm','hMM','ploM','plom'])[['gini','me','mae']].agg(['mean','std']).reset_index()
### color
colors = Set1_6.mpl_colors
colors = cycle(colors)
### plot
fig,ax = plt.subplots(1,1,figsize=(6,4))
zorder = len(models)
handles = []
zorder = len(models)
y = 'gini'
for model,marker in zip(*(models,markers)):
tmp = data[data[('kind','')]==model]
#ax.scatter(x=tmp[vtype], y=tmp[y], color=next(colors), label=model, marker=marker, zorder=zorder)
#zorder-=1
x = tmp[('me','mean')]
xe = tmp[('me','std')]
y = tmp[('gini','mean')]
ye = tmp[('gini','std')]
h, = ax.plot(x, y, 'o', color=next(colors), label=model, markersize=1, zorder=zorder)
handles.append(h)
ax.errorbar(x=x, y=y, xerr=xe, yerr=ye, fmt='none', alpha=0.5, ecolor='grey', zorder=zorder)
zorder -= 1
if empirical is not None:
legend1 = ax.legend(title=title, bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
else:
#ax.legend(title='Model',bbox_to_anchor=(0.5,1.0), loc="upper right", ncol=2)
ax.legend(title=title,
handles=handles,
ncol=len(models),
bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, frameon=False, markerscale=6)
### empirical
zorder = 1000
h = []
m = 's'
if empirical is not None:
datasets = sorted(empirical.dataset.unique().tolist())
for dataset in datasets:
tmp = empirical.query("dataset==@dataset & metric == @metric")
color = next(colors)
ax.scatter(x=tmp[vtype], y=tmp[y], color=color, label=dataset, marker=m, zorder=zorder)
h.append(plt.plot([],[], color=color, marker=m, ls="")[0])
zorder-=1
### second legend
ax.legend(handles=h, labels=datasets, title="Empirical", frameon=True,
bbox_to_anchor=(1.04,0), loc="lower left", borderaxespad=0)
plt.gca().add_artist(legend1)
### visuals
#for i in np.arange(mini,1.0+0.1,0.1):
# i = round(i,1)
# ax.axhline(y=i, lw=0.1, ls='--', c='lightgrey')
# ax.axvline(x=i, lw=0.1, ls='--', c='lightgrey')
ax.axhline(y=0.3, lw=0.5, ls='--', c='darkgrey')
ax.axhline(y=0.6, lw=0.5, ls='--', c='darkgrey')
if vtype in ['mae']:
ax.axvline(y=0.5, lw=0.5, ls='--', c='darkgrey')
else:
ax.axvline(x=0.05, lw=0.5, ls='--', c='darkgrey')
ax.axvline(x=-0.05, lw=0.5, ls='--', c='darkgrey')
ax.set_xlabel("")
ax.set_ylabel("")
ax.set_ylabel("Inequality\n(Gini coef. of entire rank distribution)")
ax.set_xlabel("Inequity\n(Mean error of fraction of minorities across all top-k\% rank)")
#ax.set_title(metric.upper())
ax.set_ylim((0-0.03,1+0.03))
ax.set_xlim((mini-0.03,1+0.03))
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
def plot_inequalities_symmetric(df, models, markers, mean=True, metric='pagerank', fn=None):
### data
h = [0.2,0.5,0.8]
fm = [0.1, 0.3, 0.5]
df_sym = df.query("rank == 5 & hmm == hMM & metric == @metric & hmm in @h & fm in @fm").copy()
df_sym.rename(columns={"hmm":"h"}, inplace=True)
### callback
def inequality_plot_sym(x, y, **kwargs):
ax = plt.gca()
data = kwargs.pop("data")
if mean:
data = data.groupby(['fm','h','kind']).mean().reset_index()
data.plot.scatter(x=x, y=y, ax=ax, grid=False, s=150, **kwargs)
### plot
fg = sns.FacetGrid(df_sym, col="fm", row="h",
hue="kind", hue_order=models, hue_kws=dict(marker=markers),
height=1 if df_sym.h.nunique()==11 else 2, aspect=1, margin_titles=True, dropna=False)
fg = fg.map_dataframe(inequality_plot_sym, "mae", "gini")
fg.add_legend(title='Model')
### visuals
for ax in fg.axes.flatten():
for i in np.arange(0.0,1.0+0.1,0.1):
i = round(i,1)
ax.axhline(y=i, lw=0.5 if i!=0.5 else 1, ls='--', c='lightgrey' if i!=0.5 else 'black')
ax.axvline(x=i, lw=0.5 if i!=0.5 else 1, ls='--', c='lightgrey' if i!=0.5 else 'black')
ax.set_xlabel("")
ax.set_ylabel("")
ax.set_ylim((0-0.03,1+0.03))
fg.axes[int(df_sym.fm.nunique()/2),0].set_ylabel("Vertical Inequality\n(Gini of rank distribution)")
fg.axes[-1,int(df_sym.h.nunique()/2)].set_xlabel("Horizontal Inequality\n(MAE fraction of minorities in top-k rank)")
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
def plot_inequalities_asymmetric(df, models, markers, mean=True, metric='pagerank', fn=None):
### data
hmm = [0.0, 0.5, 1.0]
hMM = [0.0, 0.2, 0.5, 0.8, 1.0]
fm = [0.1, 0.3, 0.5]
data = df.query("rank == 5 & hmm in @hmm & hMM in @hMM & fm in @fm & metric == @metric").copy()
### callback
def inequality_plot_asymm(x, y, **kwargs):
ax = plt.gca()
data = kwargs.pop("data")
c = kwargs.pop("color")
if data.hMM.unique() == 0.5:
c = "grey"
for i,model in enumerate(models):
tmp = data.query("kind==@model")
if mean:
tmp = tmp.groupby(['fm','hmm','hMM']).mean().reset_index()
if tmp.shape[0]>0:
tmp.plot.scatter(x=x, y=y, ax=ax, grid=False, c=c, marker=markers[i], s=150, **kwargs)
### plot
palette = BrBG_11 if data.hMM.nunique() == 11 else BrBG_5
fg = sns.FacetGrid(data, col="fm", row="hmm", hue="hMM",dropna=False,
height=2, aspect=1, margin_titles=True, palette=palette.mpl_colors)
fg = fg.map_dataframe(inequality_plot_asymm, "mae", "gini")
fg.add_legend()
### visuals
for ax in fg.axes.flatten():
for i in np.arange(0.0,1.0+0.1,0.1):
i = round(i,1)
ax.axhline(y=i, lw=0.5 if i!=0.5 else 1, ls='--', c='lightgrey' if i!=0.5 else 'black')
ax.axvline(x=i, lw=0.5 if i!=0.5 else 1, ls='--', c='lightgrey' if i!=0.5 else 'black')
ax.set_ylim((0-0.03,1+0.03))
ax.set_xlabel("")
ax.set_ylabel("")
fg.axes[1,0].set_ylabel("Vertical Inequality\n(Gini of rank distribution)")
fg.axes[-1,int(data.fm.nunique()/2.)].set_xlabel("Horizontal Inequality\n(MAE fraction of minorities in top-k rank)")
### second legend
h = [plt.plot([],[], color="black", marker=m, ls="")[0] for m,l in zip(*[markers,models])]
fg.axes[-1,int(data.fm.nunique()/2.)].legend(handles=h, labels=models, bbox_to_anchor=(-1.0,3.4,3,1),
loc="lower left", title="Model", frameon=False,
mode='expand',ncol=len(models), borderaxespad=0)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
def plot_synthetic_quadrant_homophily(df_rank, qtype='qae', model=None, metric='pagerank', fn=None):
'''
var qtype: quadrant type, qae (absolute error [0,1]) qe (error [-1,1])
'''
def facet_heatmap_quadrant_homo(data, color, **kwargs):
n = kwargs['vmax'] # quadrants
ROM = ['I','II','III','IV','V','VI','VII','VIII','IX']
ROM = ROM[:n]
ax = plt.gca()
tmp = data.pivot_table(index='hMM', columns='hmm', values=qtype, aggfunc=lambda x: x.mode().iat[0])
#print(tmp)
#cmap = sns.color_palette("Paired", 6 if qtype == 'qe' else 4)
#if qtype == 'qe':
# colors = [cmap[1],cmap[3],cmap[5]]
# individual ineq: high, low
#cmap = sns.color_palette("Paired", 6 if qtype == 'qe' else 4)
#if qtype == 'qe':
# colors = [cmap[1],cmap[3],cmap[5],cmap[4],cmap[2],cmap[0]]
# individual ineq: high, medium, low
if qtype == 'qe':
colors = Blues_3.mpl_colors + Reds_3.mpl_colors + Greens_3.mpl_colors
colors = [colors[5],colors[8],colors[2],
colors[4],colors[7],colors[1],
colors[3],colors[6],colors[0]]
colors = colors[:n]
ax = sns.heatmap(tmp, cmap=colors, fmt = '', **kwargs)
# modify colorbar:
colorbar = ax.collections[0].colorbar
colorbar.ax.set_ylabel("Disparity", rotation=270, labelpad=10) #Region
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.set_ticklabels(ROM[:n])
# change order
colorbar.ax.invert_yaxis()
from org.gesis.lib import paper
data = df_rank.query("kind==@model & metric==@metric & rank==100").copy()
important_cols = ['kind', 'metric', 'fm', 'hMM', 'hmm', 'gini', 'mae', 'me'] # global, using the whole ranking
data.drop(columns=[c for c in data.columns if c not in important_cols], inplace=True)
data.loc[:,'qe'] = data.apply(lambda row: paper.get_quadrant_error(row,herror='me',verror='gini'), axis=1)
data.loc[:,'qae'] = data.apply(lambda row: paper.get_quadrant_absolute_error(row,herror='mae',verror='gini'), axis=1)
htype = 'mae' if qtype == 'qae' else 'me'
vmin, vmax = data[qtype].min(), data[qtype].max()
col = 'fm'
fg = sns.FacetGrid(data=data, col=col, margin_titles=True, height=2.5, aspect=0.8)
cbar_ax = fg.fig.add_axes([.99, .3, .02, .4])
fg.map_dataframe(facet_heatmap_quadrant_homo, cbar_ax=cbar_ax, vmin=vmin, vmax=vmax)
nc = data[col].nunique()
nr = 1
hmms = sorted(data['hmm'].unique())
for k,ax in enumerate(fg.axes.flatten()):
r, c = int(k/nc), k%nc
ax.set_ylabel(r"$h_{MM}$" if c==0 and r==int(nr/2.) else '')
ax.set_xlabel(r"$h_{mm}$" if r==nr-1 and c==int(nc/2) else '')
if ax.get_title() != '':
ax.set_title(ax.get_title().replace("fm",r"$f_{m}$"))
ax.set_xticklabels([xtl.get_text() if i%2==0 else '' for i,xtl in enumerate(ax.get_xticklabels())], rotation=0)
if k==0:
ax.set_yticklabels([xtl.get_text() if i%2==0 else '' for i,xtl in enumerate(ax.get_yticklabels())], rotation=0)
### invert y axis
plt.gca().invert_yaxis()
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fg.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
def plot_synthetic_quadrants(df_rank, qtype='qae', model=None, metric='pagerank', all=False, fn=None):
'''
var qtype: quadrant type, qae (absolute error [0,1]) qe (error [-1,1])
'''
def facet_heatmap_quadrant(data, color, **kwargs):
n = kwargs['vmax'] # quadrants
ROM = ['I','II','III','IV','V','VI','VII','VIII','IX']
ax = plt.gca()
tmp = data.pivot_table(index='hMM', columns='rank', values=qtype, aggfunc=lambda x: x.mode().iat[0])
tmp_dir = data.pivot_table(index='hMM', columns='rank', values='dir', aggfunc=lambda x: x.mode().iat[0])
if qtype == 'qe':
### when error is signed (+ or -)
tmp_dir.replace(['+','-','='], '', inplace=True)
# individual ineq: high, low
#cmap = sns.color_palette("Paired", 6 if qtype == 'qe' else 4)
#if qtype == 'qe':
# colors = [cmap[1],cmap[3],cmap[5],cmap[4],cmap[2],cmap[0]]
# individual ineq: high, medium, low
if qtype == 'qe':
colors = Blues_3.mpl_colors + Reds_3.mpl_colors + Greens_3.mpl_colors
colors = [colors[5],colors[8],colors[2],
colors[4],colors[7],colors[1],
colors[3],colors[6],colors[0]]
ax = sns.heatmap(tmp, cmap=colors, fmt = '', **kwargs)
# modify colorbar:
colorbar = ax.collections[0].colorbar
colorbar.ax.set_ylabel("Disparity", rotation=270, labelpad=10) #Region
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.set_ticklabels(ROM[:n])
# change order
colorbar.ax.invert_yaxis()
hmm = [0.1, 0.5, 0.9]
hMM = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
fm = [0.1, 0.3, 0.5]
if all:
hmm = hMM
fm = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
data = df_rank.query("kind==@model & metric==@metric & hmm in @hmm & hMM in @hMM & fm in @fm").copy()
data.drop(columns=['dataset'], inplace=True)
vmin, vmax = data[qtype].min(), data[qtype].max()
col = 'fm'
row = 'hmm'
fg = sns.FacetGrid(data=data, col=col, row=row, margin_titles=True, height=2, aspect=1)
cbar_ax = fg.fig.add_axes([.99, .3, .02, .4])
fg.map_dataframe(facet_heatmap_quadrant, cbar_ax=cbar_ax, vmin=vmin, vmax=vmax)
nc = data[col].nunique()
nr = data[row].nunique()
ranks = sorted(data['rank'].unique())
for k,ax in enumerate(fg.axes.flatten()):
r, c = int(k/nc), k%nc
ax.set_ylabel(r"$h_{MM}$" if c==0 and r==int(nr/2.) else '')
ax.set_xlabel("Top-k\% rank" if r==nr-1 and c==int(nc/2) else '')
if r == nr-1:
ax.set_xticks([r+0.5 for r in np.arange(len(ranks))])
ax.set_xticklabels([r if r in [10,50,90] else '' for r in ranks], rotation=0)
### invert y axis
plt.gca().invert_yaxis()
### right-ylabel
#[plt.setp(ax.texts, text="") for ax in fg.axes.flat]
#fg.set_titles(row_template = row + ' = {row_name}', bbox=dict(boxstyle='square,pad=-0.3', fc="white", ec="none"))
for ax in fg.axes.flatten():
if ax.get_title() != '':
ax.set_title(ax.get_title().replace("fm",r"$f_{m}$"))
if ax.texts:
txt = ax.texts[0]
if txt.get_text() != '':
ax.text(txt.get_unitless_position()[0]+0.01,
txt.get_unitless_position()[1],
txt.get_text().replace("hmm",r"$h_{mm}$"),
transform=ax.transAxes,
rotation=270,
va='center')
ax.texts[0].remove()
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fg.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
def plot_synthetic_rankings(df_rank, model=None, metric='pagerank', y='fmt', sym=True, fn=None):
if y not in ['fmt','gt']:
raise Exception('Invalid x-axis (horizontal ineq.)')
col = 'fm'
row = 'hmm'
fm = [0.1, 0.3, 0.5]
data = df_rank.query("metric == @metric & fm in @fm").copy()
if model is not None:
data = data.query("kind == @model")
### Type of homophily: symmetric or not
if sym:
data = data.query("hmm == hMM").copy()
colors = BrBG_11.mpl_colors
colors[int(len(colors)/2)] = 'lightgrey'
fg = sns.catplot(data=data,
col=col,
hue='hMM',
x='rank', y=y,
kind='point',
sharey=True,
height=2.5, aspect=1,
legend=True,
legend_out=True,
palette=colors
)
else:
hm = [0.2, 0.5, 0.8]
hM = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
palette = BrBG_11 if len(hM)==11 else BrBG_5
colors = palette.mpl_colors
colors[int(len(colors)/2)] = 'lightgrey'
data = data.query("hmm in @hm and hMM in @hM").copy()
fg = sns.catplot(data=data,
col=col,
row=row,
hue='hMM',
margin_titles=True,
x='rank', y=y,
kind='point',
sharey=True,
height=2, aspect=1,
legend=True,
legend_out=True,
palette=colors)
### baseline: fm
ncol = data[col].nunique()
nrow = 1 if sym else data[row].nunique()
for i, ax in enumerate(fg.axes.flatten()):
# labels
ax.set_xlabel('')
ax.set_ylabel('')
# xticks
ax.set_xticklabels([int(float(xtl.get_text()))
if int(float(xtl.get_text())) in [10, 50, 90] else '' for xtl in ax.get_xticklabels()],
rotation=0)
# baseline
try:
r = int(i / ncol)
c = i - (r * ncol)
if y == 'fmt':
fm = float(fg.axes[0,c].get_title().replace("fm = ","").replace(" ",""))
else:
fm = 0
ax.axhline(fm, c='black', ls='--', lw=2.0, zorder=1000)
except:
pass
### labels
fg.axes[-1,int(ncol/2)].set_xlabel('Top-k rank %')
ylabel = 'Fraction of minorities' if y == 'fmt' else 'Gini'
fg.axes[int(fg.axes.shape[0]/2),0].set_ylabel('{} in Top-k rank %'.format(ylabel))
### legend
if sym:
fg._legend.set_title("h")
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fg.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
plt.show()
plt.close()
def plot_vh_inequalities_synthetic(df_rank, metric='pagerank', sym=True, fn=None):
### validation
VALID_METRICS = ['pagerank','wtf']
if metric not in VALID_METRICS:
raise ValueError('metric {} is not valid.'.format(metric))
### only main data points
tmp = df_rank.query("rank==5 & metric==@metric").copy()
tmp.drop(columns=['rank', 'fmt'], inplace=True)
if sym:
tmp = tmp.query("hmm == hMM").copy()
colors = BrBG_11.mpl_colors
else:
hm = [0.2,0.8]
hM = [0.0, 0.2, 0.5, 0.8, 1.0]
tmp = tmp.query("hmm in @hm and hMM in @hM").copy()
colors = BrBG_5.mpl_colors
colors[int(len(colors)/2)] = 'lightgrey'
tmp.sort_values(['hmm','hMM','fm'], inplace=True)
### main plot
nrows = 1 if sym else tmp.hmm.nunique()
ncols = tmp.fm.nunique()
fig, axes = plt.subplots(nrows, ncols, figsize=(ncols * 2.5, 5 if not sym else 2.5), sharey=True, sharex=True)
### subplots
for col, fm in enumerate(tmp.fm.unique()):
if sym:
axes[col].set_title("fm={}".format(fm))
for hue, hMM in enumerate(tmp.hMM.unique()):
data = tmp.query("fm==@fm & hmm==hMM & hMM==@hMM").copy()
axes[col].scatter(y=data.gini.values, x=data.mae.values, label=hMM, color=colors[hue], marker='x')
axes[0].legend(loc='lower left', title='homophily',
bbox_to_anchor=(-0.04, 1.12, ncols*1.075, 0.2), mode='expand',
ncol=tmp.hMM.nunique(), handletextpad=0.05, frameon=False)
# plt.legend(loc='center left', title='h', bbox_to_anchor=(1, 0.5))
else:
axes[0, col].set_title("fm={}".format(fm))
### ylabel (right)
for row, hmm in enumerate(tmp.hmm.unique()):
if col == ncols - 1:
s = 'hmm={}'.format(hmm)
axes[row, col].text(s=s,
y=0.67,
x=(df_rank.mae.max()-0.04) , rotation=-90)
### scatter plot
for hue, hMM in enumerate(tmp.hMM.unique()):
data = tmp.query("fm==@fm & hmm==@hmm & hMM==@hMM").copy()
axes[row, col].scatter(y=data.gini.values, x=data.mae.values, label=hMM, color=colors[hue], marker='x')
axes[0,1].legend(loc='lower left', title='hMM',
bbox_to_anchor=(-0.26, 1.12, 1.5, 0.2), mode='expand',
ncol=tmp.hMM.nunique(), handletextpad=0.05, frameon=False)
#axes[0, 2].legend(loc='lower left', title='hMM'
### ylabel (left)
ylabel = 'Gini coefficient'
row = int(nrows / 2)
ax = axes[0] if sym else axes[row,0]
if nrows % 2 != 0:
ax.set_ylabel(ylabel)
else:
ax.text(0.64 if sym else -0.25 , #100,
0.65 if sym else 0.8, #0.65,
ylabel, {'ha': 'center', 'va': 'center'}, rotation=90)
### xlabel
xlabel = 'MAE of fraction of minorities in top-k%'
col = int(ncols / 2)
ax = axes[-1,col] if not sym else axes[col]
if ncols % 2 != 0:
ax.set_xlabel(xlabel)
else:
ax.text(0.35,
-0.08 if not sym else -0.1,
xlabel, {'ha': 'center', 'va': 'center'}, rotation=0)
### space between subplots
plt.subplots_adjust(hspace=0.1, wspace=0.1)
### Save fig
if fn is not None:
fig.savefig(fn, bbox_inches='tight')
print('{} saved!'.format(fn))
################################################################################
# Special handlers
################################################################################
def _get_mean_val_from_df(df_emp, att, row):
s = "dataset=='{}' & metric=='{}' & rank=={}".format(row.dataset, row.metric, row['rank'])
return df_emp.query(s)[att].mean()
def feature_importance(data, model, metric, kfold, fn=None):
fig,axes = plt.subplots(2,2,figsize=(5,5))
df_summary = pd.DataFrame(columns=['kind','output','r2mean','r2std','feature','importance'])
for r,local in enumerate([False,True]):
for c,output in enumerate(['gini','error']):
if local:
df = data.query("kind==@model & metric==@metric").copy() #local
y = 'efmt' if output == 'error' else 'gt'
features = ['fm','hMM','hmm','rank','random', y]
else:
df = data.query("rank==5 & kind==@model & metric==@metric").copy() #global
y = 'me' if output == 'error' else 'gini'
features = ['fm','hMM','hmm','random', y]
df.loc[:,'random'] = np.random.random(size=df.shape[0])
df = df[features]
scaler = MinMaxScaler(feature_range=(0, 1))
Z = scaler.fit_transform(df)
X = Z[:,:-1]
y = Z[:,-1]
### model performance
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1)
r2s = cross_val_score(rf, X, y, cv=kfold)
preds = cross_val_predict(rf, X, y, cv=kfold)
axes[r,c].scatter(y, preds, edgecolors=(0, 0, 0))
axes[r,c].plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
axes[r,c].text(s='R2={:.2f}\n({:.4f})'.format(np.mean(r2s), np.std(r2s)),x=0,y=0.8,zorder=100)
axes[r,c].set_xlabel('Measured' if r==1 else '')
axes[r,c].set_ylabel('Predicted' if c==0 else '')
axes[r,c].set_title("{} {}".format('Local' if local else 'Global', output.title()))
### feature importance
cv = cross_validate(rf, X, y, cv=kfold, scoring = 'r2', return_estimator =True)
tmp = pd.DataFrame(columns = features[:-1])
for idx,estimator in enumerate(cv['estimator']):
tmp = tmp.append( | pd.Series(estimator.feature_importances_,index=features[:-1]) | pandas.Series |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Loads files with Tweeter messages and categories and returns dataframe
Input:
message_filepath: CSV file with messages
categories_filepath: CSV file with categoriess
Output:
- df: merged Dataframe
'''
messages = pd.read_csv(messages_filepath) #load csv file into pd
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import os
import glob
import pandas as pd
import csv
from collections import defaultdict
import pyrosetta
pyrosetta.init()
def emboss_needle_search(target_seq_path, template_seq_path):
for template_seq in template_seq_path:
target_seq_id = os.path.basename(target_seq_path).split('.')[0]
template_seq_id = os.path.basename(template_seq).split('.')[0]
print('needle -sid1 {0} -asequence {1} -sid2 {2} -bsequence {3} -gapopen 10.0 -gapextend 0.5 -aformat3 markx3 -outfile {0}_{2}.needle'.format(target_seq_id, target_seq_path, template_seq_id, template_seq))
os.system('needle -sid1 {0} -asequence {1} -sid2 {2} -bsequence {3} -gapopen 10.0 -gapextend 0.5 -aformat3 markx3 -outfile {0}_{2}.needle'.format(target_seq_id, target_seq_path, template_seq_id, template_seq))
def select_top_hits_from_emboss_and_rocs_pdb(emboss_align_file_path, rocs_align_file_path, target_seq_path):
emboss_result = | pd.DataFrame(columns = ('query', 'template', 'length', 'identity', 'similarity', 'gaps', 'score')) | pandas.DataFrame |
import re
import warnings
from datetime import datetime, timedelta
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from woodwork.logical_types import Double, Integer
from rayml.exceptions import (
MethodPropertyNotFoundError,
MissingComponentError,
ParameterNotUsedWarning,
)
from rayml.pipelines import ComponentGraph
from rayml.pipelines.components import (
DateTimeFeaturizer,
DropRowsTransformer,
ElasticNetClassifier,
Estimator,
Imputer,
LogisticRegressionClassifier,
NaturalLanguageFeaturizer,
OneHotEncoder,
RandomForestClassifier,
SelectColumns,
StandardScaler,
TargetImputer,
Transformer,
Undersampler,
)
from rayml.problem_types import is_classification
from rayml.utils import infer_feature_types
class DummyTransformer(Transformer):
name = "Dummy Transformer"
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
def transform(self, X, y=None):
return X
class TransformerA(DummyTransformer):
"""copy class"""
class TransformerB(DummyTransformer):
"""copy class"""
class TransformerC(DummyTransformer):
"""copy class"""
class DummyEstimator(Estimator):
name = "Dummy Estimator"
model_family = None
supported_problem_types = None
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
class EstimatorA(DummyEstimator):
"""copy class"""
class EstimatorB(DummyEstimator):
"""copy class"""
class EstimatorC(DummyEstimator):
"""copy class"""
@pytest.fixture
def dummy_components():
return TransformerA, TransformerB, TransformerC, EstimatorA, EstimatorB, EstimatorC
def test_init(example_graph):
comp_graph = ComponentGraph()
assert len(comp_graph.component_dict) == 0
graph = example_graph
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_str_components():
graph = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "OneHot_ElasticNet.x", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_instantiated():
graph = {
"Imputer": [
Imputer(numeric_impute_strategy="constant", numeric_fill_value=0),
"X",
"y",
]
}
component_graph = ComponentGraph(graph)
component_graph.instantiate(
{"Imputer": {"numeric_fill_value": 10, "categorical_fill_value": "Fill"}}
)
cg_imputer = component_graph.get_component("Imputer")
assert graph["Imputer"][0] == cg_imputer
assert cg_imputer.parameters["numeric_fill_value"] == 0
assert cg_imputer.parameters["categorical_fill_value"] is None
def test_invalid_init():
invalid_graph = {"Imputer": [Imputer, "X", "y"], "OHE": OneHotEncoder}
with pytest.raises(
ValueError, match="All component information should be passed in as a list"
):
ComponentGraph(invalid_graph)
graph = {
"Imputer": [
None,
"X",
"y",
]
}
with pytest.raises(
ValueError, match="may only contain str or ComponentBase subclasses"
):
ComponentGraph(graph)
graph = {
"Fake": ["Fake Component", "X", "y"],
"Estimator": [ElasticNetClassifier, "Fake.x", "y"],
}
with pytest.raises(MissingComponentError):
ComponentGraph(graph)
def test_init_bad_graphs():
graph_with_cycle = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "Estimator.x", "y"],
"Estimator": [RandomForestClassifier, "OHE.x", "y"],
}
with pytest.raises(ValueError, match="given graph contains a cycle"):
ComponentGraph(graph_with_cycle)
graph_with_more_than_one_final_component = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
with pytest.raises(ValueError, match="graph has more than one final"):
ComponentGraph(graph_with_more_than_one_final_component)
graph_with_unconnected_imputer = {
"Imputer": ["Imputer", "X", "y"],
"DateTime": ["DateTime Featurizer", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"DateTime.x",
"y",
],
}
with pytest.raises(ValueError, match="The given graph is not completely connected"):
ComponentGraph(graph_with_unconnected_imputer)
def test_order_x_and_y():
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
assert component_graph.compute_order == ["Imputer", "OHE", "Random Forest"]
def test_list_raises_error():
component_list = ["Imputer", "One Hot Encoder", RandomForestClassifier]
with pytest.raises(
ValueError,
match="component_dict must be a dictionary which specifies the components and edges between components",
):
ComponentGraph(component_list)
def test_instantiate_with_parameters(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert not isinstance(component_graph.get_component("Imputer"), Imputer)
assert not isinstance(
component_graph.get_component("Elastic Net"), ElasticNetClassifier
)
parameters = {
"OneHot_RandomForest": {"top_n": 3},
"OneHot_ElasticNet": {"top_n": 5},
"Elastic Net": {"max_iter": 100},
}
component_graph.instantiate(parameters)
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
assert isinstance(component_graph.get_component("Imputer"), Imputer)
assert isinstance(
component_graph.get_component("Random Forest"), RandomForestClassifier
)
assert isinstance(
component_graph.get_component("Logistic Regression Classifier"),
LogisticRegressionClassifier,
)
assert component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 3
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 5
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 100
@pytest.mark.parametrize("parameters", [None, {}])
def test_instantiate_without_parameters(parameters, example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
if parameters is not None:
component_graph.instantiate(parameters)
else:
component_graph.instantiate()
assert (
component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 10
)
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 10
assert component_graph.get_component(
"OneHot_RandomForest"
) is not component_graph.get_component("OneHot_ElasticNet")
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
def test_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
with pytest.raises(ValueError, match="Cannot reinstantiate a component graph"):
component_graph.instantiate({"OneHot": {"top_n": 7}})
def test_bad_instantiate_can_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Error received when instantiating component"):
component_graph.instantiate(
parameters={"Elastic Net": {"max_iter": 100, "fake_param": None}}
)
component_graph.instantiate({"Elastic Net": {"max_iter": 22}})
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 22
def test_get_component(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder
assert (
component_graph.get_component("Logistic Regression Classifier")
== LogisticRegressionClassifier
)
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_component("Fake Component")
component_graph.instantiate(
{
"OneHot_RandomForest": {"top_n": 3},
"Random Forest": {"max_depth": 4, "n_estimators": 50},
}
)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder()
assert component_graph.get_component("OneHot_RandomForest") == OneHotEncoder(
top_n=3
)
assert component_graph.get_component("Random Forest") == RandomForestClassifier(
n_estimators=50, max_depth=4
)
def test_get_estimators(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Cannot get estimators until"):
component_graph.get_estimators()
component_graph.instantiate()
assert component_graph.get_estimators() == [
RandomForestClassifier(),
ElasticNetClassifier(),
LogisticRegressionClassifier(),
]
component_graph = ComponentGraph({"Imputer": ["Imputer", "X", "y"]})
component_graph.instantiate()
assert component_graph.get_estimators() == []
def test_parents(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
component_graph.instantiate()
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
def test_get_last_component(example_graph):
component_graph = ComponentGraph()
with pytest.raises(
ValueError, match="Cannot get last component from edgeless graph"
):
component_graph.get_last_component()
component_graph = ComponentGraph(example_graph)
assert component_graph.get_last_component() == LogisticRegressionClassifier
component_graph.instantiate()
assert component_graph.get_last_component() == LogisticRegressionClassifier()
component_graph = ComponentGraph({"Imputer": [Imputer, "X", "y"]})
assert component_graph.get_last_component() == Imputer
component_graph = ComponentGraph(
{"Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"]}
)
assert component_graph.get_last_component() == OneHotEncoder
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_fit_component_graph(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
mock_fit_transform.return_value = pd.DataFrame(X)
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 3
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.TargetImputer.fit_transform")
@patch("rayml.pipelines.components.OneHotEncoder.fit_transform")
def test_fit_correct_inputs(
mock_ohe_fit_transform, mock_imputer_fit_transform, X_y_binary
):
X, y = X_y_binary
X = | pd.DataFrame(X) | pandas.DataFrame |
#=======================================================================================================================
#
# ALLSorts v2 - The STAR aligner counts creator thingy!
# Note: Only for hg19
#
# Author: <NAME>
# License: MIT
#
# Input: user --help for all parameters
# Output: Counts formatted for ALLSorts
#
#=======================================================================================================================
''' --------------------------------------------------------------------------------------------------------------------
Imports
---------------------------------------------------------------------------------------------------------------------'''
import pandas as pd
import numpy as np
import glob
import sys, argparse
from pathlib import Path
''' --------------------------------------------------------------------------------------------------------------------
Functions
---------------------------------------------------------------------------------------------------------------------'''
def root() -> Path:
"""Returns project root folder."""
return Path(__file__).parent.parent
def user_input():
cli = argparse.ArgumentParser(description="ALLSorts Counts CLI")
cli.add_argument('-directory', '-d',
required=True,
help=("""Path to the STAR output with quantMode Gene parameter enabled.
I.e. *_ReadsPerGene.out.tab exist within this directory."""))
cli.add_argument('-reference', '-r',
required=False,
help=("""List of genes and their aliases. Generally speaking, leave as the default."""))
cli.add_argument('-strand', '-s',
required=True,
help=("""Please indicate whether the alignments are unstranded/reverse/forward strand. i.e. -strand no, (no, reverse, forward)"""))
cli.add_argument('-output', '-o',
required=True,
help=("""Please indicate the output path (/path/to/output/counts.txt)."""))
user_input = cli.parse_args()
return user_input
def load_references(gtf):
if not gtf:
gtf = str(root())+"/resources/genes_filtered.gtf"
gene_lookup = pd.read_csv(gtf,
header=None,
index_col=0,
sep="\t").to_dict()[1]
return gene_lookup
def harmoniser(genes, gene_info):
update_genes = []
for gene in list(genes):
try:
update_genes.append(gene_info[gene])
except:
update_genes.append("dropitlikeitshot")
return update_genes
def collate_counts(directory, strand):
first = True
progress = []
if directory[-1] != "/":
directory += "/"
for file in glob.glob(directory+"*ReadsPerGene.out.tab"):
name = file.split("/")[-1].split("_ReadsPerGene")[0]
tab_open = pd.read_csv(file, sep="\t", index_col=0, header=None)
tab_open.columns = ["no", "forward", "reverse"]
tab_open.drop(["N_unmapped", "N_multimapping", "N_noFeature", "N_ambiguous"], inplace=True)
if first:
first = False
counts = pd.DataFrame(index=tab_open.index)
try:
temp = pd.DataFrame(tab_open[user.strand])
except:
print("Strand information incorrect. Choose one of [no/forward/reverse]")
sys.exit(0)
temp.columns = [name]
progress.append(temp)
counts = | pd.concat(progress, join="inner", axis=1) | pandas.concat |
import subprocess, os
import matplotlib.pyplot as plt
import pandas as pd
def get_performance(cmd):
command = subprocess.run(cmd.split(), stdout=subprocess.PIPE)
return command.stdout.decode('utf-8').split()[-1]
cmd = "./model/{} ../sample_inputs/{} {} {} {} {} {}"
def search_sync():
file_name = "glife_kernel_1"
input_name = "make-a_71_81"
method_names = ["single_thread", "multi_thread", "gpu"]
display = 0
cores= [1, 16, 0]
gens = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
result = {}
result['gens'] = gens
for method_name, core in zip(method_names, cores):
print(method_name+'...', end="", flush=True)
width, height = 500, 500
result[method_name] = []
for gen in gens:
cmd_exc = cmd.format(file_name, input_name, display, core, gen, width, height)
result[method_name].append(get_performance(cmd_exc))
print('done')
df = pd.DataFrame(result)
df.to_csv("gen.csv")
def search_size():
file_name = "glife_kernel_1"
input_name = "make-a_71_81"
method_names = ["single_thread", "multi_thread", "gpu"]
display = 0
cores= [1, 16, 0]
gen = 100
widths = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
heights = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
result = {}
result['size'] = widths
for method_name, core in zip(method_names, cores):
print(method_name+'...', end="", flush=True)
result[method_name] = []
for width, height in zip(widths, heights):
cmd_exc = cmd.format(file_name, input_name, display, core, gen, width, height)
result[method_name].append(get_performance(cmd_exc))
print('done')
df = | pd.DataFrame(result) | pandas.DataFrame |
import csv
import numpy as np
import pandas as pd
df1 = pd.read_table('./train1_robert_result.txt',header=None)
df2 = pd.read_table('./train1_nezha_result.txt',header=None)
df3 = | pd.read_table('./train1_skep_result.txt',header=None) | pandas.read_table |
import collections
import json
import re
from collections import defaultdict
from io import StringIO
import numpy as np
import pandas as pd
import plotly.offline as opy
from clustergrammer import Network
from django.conf import settings
from django.urls import reverse
from django.utils import timezone
from loguru import logger
from linker.common import load_obj
from linker.constants import *
from linker.metadata import get_gene_names, get_compound_metadata, clean_label
from linker.models import Analysis, AnalysisData, Share, AnalysisHistory
from linker.reactome import ensembl_to_uniprot, uniprot_to_reaction, compound_to_reaction, \
reaction_to_pathway, reaction_to_uniprot, reaction_to_compound, uniprot_to_ensembl
from linker.reactome import get_reaction_df
from linker.views.pipelines import GraphOmicsInference
Relation = collections.namedtuple('Relation', 'keys values mapping_list')
def reactome_mapping(observed_gene_df, observed_protein_df, observed_compound_df,
compound_database_str, species_list, metabolic_pathway_only):
### all the ids that we have from the user ###
observed_gene_ids = get_ids_from_dataframe(observed_gene_df)
observed_protein_ids = get_ids_from_dataframe(observed_protein_df)
# try to convert all kegg ids to chebi ids, if possible
logger.info('Converting kegg ids -> chebi ids')
observed_compound_ids = get_ids_from_dataframe(observed_compound_df)
KEGG_2_CHEBI = load_obj(settings.EXTERNAL_KEGG_TO_CHEBI)
for cid in observed_compound_ids:
if cid not in KEGG_2_CHEBI:
logger.warning('Not found: %s' % cid)
KEGG_2_CHEBI[cid] = cid
if observed_compound_df is not None:
if compound_database_str == COMPOUND_DATABASE_CHEBI:
observed_compound_df.iloc[:, 0] = observed_compound_df.iloc[:, 0].map(
KEGG_2_CHEBI) # assume 1st column is id
observed_compound_ids = get_ids_from_dataframe(observed_compound_df)
### map genes -> proteins ###
logger.info('Mapping genes -> proteins')
gene_2_proteins_mapping, _ = ensembl_to_uniprot(observed_gene_ids, species_list)
gene_2_proteins = make_relations(gene_2_proteins_mapping, GENE_PK, PROTEIN_PK, value_key=None)
### maps proteins -> reactions ###
logger.info('Mapping proteins -> reactions')
protein_ids_from_genes = gene_2_proteins.values
known_protein_ids = list(set(observed_protein_ids + protein_ids_from_genes))
protein_2_reactions_mapping, _ = uniprot_to_reaction(known_protein_ids, species_list)
protein_2_reactions = make_relations(protein_2_reactions_mapping, PROTEIN_PK, REACTION_PK,
value_key='reaction_id')
### maps compounds -> reactions ###
logger.info('Mapping compounds -> reactions')
compound_2_reactions_mapping, _ = compound_to_reaction(observed_compound_ids, species_list)
compound_2_reactions = make_relations(compound_2_reactions_mapping, COMPOUND_PK, REACTION_PK,
value_key='reaction_id')
### maps reactions -> metabolite pathways ###
logger.info('Mapping reactions -> metabolite pathways')
reaction_ids_from_proteins = protein_2_reactions.values
reaction_ids_from_compounds = compound_2_reactions.values
reaction_ids = list(set(reaction_ids_from_proteins + reaction_ids_from_compounds))
reaction_2_pathways_mapping, reaction_2_pathways_id_to_names = reaction_to_pathway(reaction_ids,
species_list,
metabolic_pathway_only)
reaction_2_pathways = make_relations(reaction_2_pathways_mapping, REACTION_PK, PATHWAY_PK,
value_key='pathway_id')
### maps reactions -> proteins ###
logger.info('Mapping reactions -> proteins')
mapping, _ = reaction_to_uniprot(reaction_ids, species_list)
reaction_2_proteins = make_relations(mapping, REACTION_PK, PROTEIN_PK, value_key=None)
protein_2_reactions = merge_relation(protein_2_reactions, reverse_relation(reaction_2_proteins))
all_protein_ids = protein_2_reactions.keys
### maps reactions -> compounds ###
logger.info('Mapping reactions -> compounds')
if compound_database_str == COMPOUND_DATABASE_KEGG:
use_kegg = True
else:
use_kegg = False
reaction_2_compounds_mapping, reaction_to_compound_id_to_names = reaction_to_compound(reaction_ids, species_list,
use_kegg)
reaction_2_compounds = make_relations(reaction_2_compounds_mapping, REACTION_PK, COMPOUND_PK, value_key=None)
compound_2_reactions = merge_relation(compound_2_reactions, reverse_relation(reaction_2_compounds))
all_compound_ids = compound_2_reactions.keys
### map proteins -> genes ###
logger.info('Mapping proteins -> genes')
mapping, _ = uniprot_to_ensembl(all_protein_ids, species_list)
protein_2_genes = make_relations(mapping, PROTEIN_PK, GENE_PK, value_key=None)
gene_2_proteins = merge_relation(gene_2_proteins, reverse_relation(protein_2_genes))
all_gene_ids = gene_2_proteins.keys
### add links ###
# map NA to NA
gene_2_proteins = add_links(gene_2_proteins, GENE_PK, PROTEIN_PK, [NA], [NA])
protein_2_reactions = add_links(protein_2_reactions, PROTEIN_PK, REACTION_PK, [NA], [NA])
compound_2_reactions = add_links(compound_2_reactions, COMPOUND_PK, REACTION_PK, [NA], [NA])
reaction_2_pathways = add_links(reaction_2_pathways, REACTION_PK, PATHWAY_PK, [NA], [NA])
# map genes that have no proteins to NA
gene_pk_list = [x for x in all_gene_ids if x not in gene_2_proteins.keys]
gene_2_proteins = add_links(gene_2_proteins, GENE_PK, PROTEIN_PK, gene_pk_list, [NA])
# map proteins that have no genes to NA
protein_pk_list = [x for x in all_protein_ids if x not in gene_2_proteins.values]
gene_2_proteins = add_links(gene_2_proteins, GENE_PK, PROTEIN_PK, [NA], protein_pk_list)
# map proteins that have no reactions to NA
protein_pk_list = [x for x in all_protein_ids if x not in protein_2_reactions.keys]
protein_2_reactions = add_links(protein_2_reactions, PROTEIN_PK, REACTION_PK, protein_pk_list, [NA])
# map reactions that have no proteins to NA
reaction_pk_list = [x for x in reaction_ids if x not in protein_2_reactions.values]
protein_2_reactions = add_links(protein_2_reactions, PROTEIN_PK, REACTION_PK, [NA], reaction_pk_list)
# map compounds that have no reactions to NA
compound_pk_list = [x for x in all_compound_ids if x not in compound_2_reactions.keys]
compound_2_reactions = add_links(compound_2_reactions, COMPOUND_PK, REACTION_PK, compound_pk_list, [NA])
# map reactions that have no compounds to NA
reaction_pk_list = [x for x in reaction_ids if x not in compound_2_reactions.values]
compound_2_reactions = add_links(compound_2_reactions, COMPOUND_PK, REACTION_PK, [NA], reaction_pk_list)
# map reactions that have no pathways to NA
reaction_pk_list = [x for x in reaction_ids if x not in reaction_2_pathways.keys]
reaction_2_pathways = add_links(reaction_2_pathways, REACTION_PK, PATHWAY_PK, reaction_pk_list, [NA])
GTF_DICT = load_obj(settings.EXTERNAL_GENE_NAMES)
metadata_map = get_gene_names(all_gene_ids, GTF_DICT)
genes_json = pk_to_json(GENE_PK, 'gene_id', all_gene_ids, metadata_map, observed_gene_df,
observed_ids=observed_gene_ids)
gene_2_proteins_json = json.dumps(gene_2_proteins.mapping_list)
# metadata_map = get_uniprot_metadata_online(uniprot_ids)
proteins_json = pk_to_json('protein_pk', 'protein_id', all_protein_ids, metadata_map, observed_protein_df,
observed_ids=observed_protein_ids)
protein_2_reactions_json = json.dumps(protein_2_reactions.mapping_list)
# TODO: this feels like a very bad way to implement this
# We need to deal with uploaded peak data from PiMP, which contains a lot of duplicate identifications per peak
KEGG_ID_2_DISPLAY_NAMES = load_obj(settings.EXTERNAL_COMPOUND_NAMES)
metadata_map = get_compound_metadata(all_compound_ids, KEGG_ID_2_DISPLAY_NAMES, reaction_to_compound_id_to_names)
try:
mapping = get_mapping(observed_compound_df)
except KeyError:
mapping = None
except AttributeError:
mapping = None
compounds_json = pk_to_json('compound_pk', 'compound_id', all_compound_ids, metadata_map, observed_compound_df,
observed_ids=observed_compound_ids, mapping=mapping)
if mapping:
compound_2_reactions = expand_relation(compound_2_reactions, mapping, 'compound_pk')
compound_2_reactions_json = json.dumps(compound_2_reactions.mapping_list)
metadata_map = {}
for name in reaction_2_pathways_id_to_names:
tok = reaction_2_pathways_id_to_names[name]['name']
filtered = clean_label(tok)
species = reaction_2_pathways_id_to_names[name]['species']
metadata_map[name] = {'display_name': filtered, 'species': species}
reaction_count_df = None
pathway_count_df = None
pathway_ids = reaction_2_pathways.values
reactions_json = pk_to_json('reaction_pk', 'reaction_id', reaction_ids, metadata_map, reaction_count_df,
has_species=True)
pathways_json = pk_to_json('pathway_pk', 'pathway_id', pathway_ids, metadata_map, pathway_count_df,
has_species=True)
reaction_2_pathways_json = json.dumps(reaction_2_pathways.mapping_list)
results = {
GENOMICS: genes_json,
PROTEOMICS: proteins_json,
METABOLOMICS: compounds_json,
REACTIONS: reactions_json,
PATHWAYS: pathways_json,
GENES_TO_PROTEINS: gene_2_proteins_json,
PROTEINS_TO_REACTIONS: protein_2_reactions_json,
COMPOUNDS_TO_REACTIONS: compound_2_reactions_json,
REACTIONS_TO_PATHWAYS: reaction_2_pathways_json,
}
return results
def get_mapping(observed_compound_df):
mapping = defaultdict(list)
for idx, row in observed_compound_df.iterrows():
identifier = row[IDENTIFIER_COL]
peak_id = row[PIMP_PEAK_ID_COL]
mapping[identifier].append('%s_%s' % (identifier, peak_id))
return dict(mapping)
def save_analysis(analysis_name, analysis_desc,
genes_str, proteins_str, compounds_str, compound_database_str,
results, species_list, current_user, metabolic_pathway_only,
publication, publication_link):
metadata = {
'genes_str': genes_str,
'proteins_str': proteins_str,
'compounds_str': compounds_str,
'compound_database_str': compound_database_str,
'species_list': species_list,
'metabolic_pathway_only': metabolic_pathway_only
}
analysis = Analysis.objects.create(name=analysis_name,
description=analysis_desc,
metadata=metadata,
publication=publication,
publication_link=publication_link)
share = Share(user=current_user, analysis=analysis, read_only=False, owner=True)
share.save()
logger.info('Saved analysis %d (%s)' % (analysis.pk, species_list))
datatype_json = {
GENOMICS: (results[GENOMICS], 'genes_json', results['group_gene_df']),
PROTEOMICS: (results[PROTEOMICS], 'proteins_json', results['group_protein_df']),
METABOLOMICS: (results[METABOLOMICS], 'compounds_json', results['group_compound_df']),
REACTIONS: (results[REACTIONS], 'reactions_json', None),
PATHWAYS: (results[PATHWAYS], 'pathways_json', None),
GENES_TO_PROTEINS: (results[GENES_TO_PROTEINS], 'gene_proteins_json', None),
PROTEINS_TO_REACTIONS: (results[PROTEINS_TO_REACTIONS], 'protein_reactions_json', None),
COMPOUNDS_TO_REACTIONS: (results[COMPOUNDS_TO_REACTIONS], 'compound_reactions_json', None),
REACTIONS_TO_PATHWAYS: (results[REACTIONS_TO_PATHWAYS], 'reaction_pathways_json', None),
}
data = {}
for data_type, data_value in datatype_json.items():
# data_value is a tuple defined in the datatype_json dictionary above
json_str, ui_label, group_info = data_value
data[ui_label] = json_str
json_data = json.loads(json_str)
json_design = json.loads(group_info.to_json()) if group_info is not None else None
# key: comparison_name, value: a list of comparison results (p-values and FCs), if any
comparison_data = defaultdict(list)
# if it's a measurement data
if data_type in PKS:
# check the first row in json_data to see if there are any comparison results (p-values and FCs)
comparison_names = []
first_row = json_data[0]
for col_name, col_value in first_row.items():
if col_name.startswith(
PADJ_COL_PREFIX): # assume if we have the p-value column, there's also the FC column
comparison_name = col_name.replace(PADJ_COL_PREFIX, '', 1)
comparison_names.append(comparison_name)
# collect all measurement and comparison data
pk_col = PKS[data_type]
measurement_data = []
for row in json_data:
# separate the measurement data and the comparison data
new_measurement_row = {}
new_comparison_rows = defaultdict(
dict) # key: comparison_name, value: a comparison row (a dict of key: value pair)
for col_name, col_value in row.items():
# insert id columns into both comparison and measurement rows
if col_name == pk_col:
new_measurement_row[col_name] = col_value
for comparison_name in comparison_names:
new_comparison_rows[comparison_name].update({col_name: col_value})
# insert p-value column into comparison row
elif col_name.startswith(PADJ_COL_PREFIX):
comparison_name = col_name.replace(PADJ_COL_PREFIX, '', 1)
new_comparison_rows[comparison_name].update({'padj': col_value})
# insert FC column into comparison row
elif col_name.startswith(FC_COL_PREFIX):
comparison_name = col_name.replace(FC_COL_PREFIX, '', 1)
new_comparison_rows[comparison_name].update({'log2FoldChange': col_value})
# insert everything else into measuremnet rows
else:
new_measurement_row[col_name] = col_value
measurement_data.append(new_measurement_row)
for comparison_name in new_comparison_rows:
new_comparison_row = new_comparison_rows[comparison_name]
comparison_data[comparison_name].append(new_comparison_row)
else: # if it's other linking data, just store it directly
measurement_data = json_data
# create a new analysis data and save it
analysis_data = AnalysisData(analysis=analysis,
json_data=measurement_data,
json_design=json_design,
data_type=data_type)
# make clustergrammer if we have data
if data_type in [GENOMICS, PROTEOMICS, METABOLOMICS]:
cluster_json = get_clusters(analysis_data, data_type)
analysis_data.metadata = {
'clustergrammer': cluster_json
}
analysis_data.save()
logger.info('Saved analysis data %d for analysis %d' % (analysis_data.pk, analysis.pk))
# save each comparison separately into an AnalysisHistory
for comparison_name in comparison_data:
comparisons = comparison_data[comparison_name]
result_df = pd.DataFrame(comparisons)
pk_col = [col for col in result_df.columns if col in PKS.values()][0]
result_df.set_index(pk_col, inplace=True)
tokens = comparison_name.split('_vs_')
case = tokens[0]
control = tokens[1]
display_name = 'Loaded: %s_vs_%s' % (case, control)
inference_data = get_inference_data(data_type, case, control, result_df)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_LOADED)
# if settings.DEBUG:
# save_json_string(v[0], 'static/data/debugging/' + v[1] + '.json')
return analysis
def get_clusters(analysis_data, data_type):
axis = 1
X_std, data_df, design_df = get_standardized_df(analysis_data, axis, pk_cols=IDS)
if data_type == GENOMICS:
json_data = to_clustergrammer(X_std, design_df, run_enrichr=None, enrichrgram=True)
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
json_data = to_clustergrammer(X_std, design_df)
return json_data
def get_standardized_df(analysis_data, axis, pk_cols=PKS):
data_type = analysis_data.data_type
data_df, design_df = get_dataframes(analysis_data, pk_cols)
# standardise data differently for genomics vs proteomics/metabolomics
X_std = None
if data_type == GENOMICS:
inference = GraphOmicsInference(data_df, design_df, data_type, min_value=MIN_REPLACE_GENOMICS)
X_std = inference.standardize_df(inference.data_df, axis=axis)
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
inference = GraphOmicsInference(data_df, design_df, data_type, min_value=MIN_REPLACE_PROTEOMICS_METABOLOMICS)
X_std = inference.standardize_df(inference.data_df, log=True, axis=axis)
return X_std, data_df, design_df
def to_clustergrammer(data_df, design_df, run_enrichr=None, enrichrgram=None):
json_data = None
if not data_df.empty:
net = Network()
data_df = data_df[~data_df.index.duplicated(keep='first')] # remove rows with duplicate indices
net.load_df(data_df)
cats = {}
for k, v in design_df.groupby('group').groups.items():
cats[k] = v.values.tolist()
net.add_cats('col', [
{
'title': 'Group',
'cats': cats
}
])
# net.filter_sum('row', threshold=20)
# net.normalize(axis='col', norm_type='zscore')
# net.filter_N_top('row', 1000, rank_type='var')
# net.filter_threshold('row', threshold=3.0, num_occur=4)
# net.swap_nan_for_zero()
# net.downsample(ds_type='kmeans', axis='col', num_samples=10)
# net.random_sample(random_state=100, num_samples=10, axis='col')
net.cluster(dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=run_enrichr, enrichrgram=enrichrgram)
json_data = net.export_net_json()
return json_data
def get_last_data(analysis, data_type):
analysis_data = AnalysisData.objects.filter(analysis=analysis, data_type=data_type).order_by('-timestamp')[0]
return analysis_data
def get_context(analysis, current_user):
show_selection_group = True if not current_user.is_anonymous else False
view_names = {
TABLE_IDS[GENOMICS]: get_reverse_url('get_ensembl_gene_info', analysis),
TABLE_IDS[PROTEOMICS]: get_reverse_url('get_uniprot_protein_info', analysis),
TABLE_IDS[METABOLOMICS]: get_reverse_url('get_kegg_metabolite_info', analysis),
TABLE_IDS[REACTIONS]: get_reverse_url('get_reactome_reaction_info', analysis),
TABLE_IDS[PATHWAYS]: get_reverse_url('get_reactome_pathway_info', analysis),
'get_firdi_data': get_reverse_url('get_firdi_data', analysis),
'get_heatmap_data': get_reverse_url('get_heatmap_data', analysis),
'get_short_info': get_reverse_url('get_short_info', None),
'save_group': get_reverse_url('save_group', analysis),
'load_group': get_reverse_url('load_group', analysis),
'list_groups': get_reverse_url('list_groups', analysis),
'get_boxplot': get_reverse_url('get_boxplot', analysis),
'get_gene_ontology': get_reverse_url('get_gene_ontology', analysis),
}
context = {
'analysis_id': analysis.pk,
'analysis_name': analysis.name,
'analysis_description': analysis.description,
'analysis_species': analysis.get_species_str(),
'publication': analysis.publication,
'publication_link': analysis.publication_link,
'view_names': json.dumps(view_names),
'show_gene_data': show_data_table(analysis, GENOMICS),
'show_protein_data': show_data_table(analysis, PROTEOMICS),
'show_compound_data': show_data_table(analysis, METABOLOMICS),
'read_only': analysis.get_read_only_status(current_user),
'show_selection_group': show_selection_group
}
return context
def show_data_table(analysis, data_type):
analysis_data = get_last_analysis_data(analysis, data_type)
data_df, design_df = get_dataframes(analysis_data, IDS)
return np.any(data_df['obs'] == True) # show table if there's any observation
def get_reverse_url(viewname, analysis):
if analysis is not None:
return reverse(viewname, kwargs={'analysis_id': analysis.id})
else:
return reverse(viewname)
# TODO: no longer used, can remove?
def get_count_df(gene_2_proteins_mapping, protein_2_reactions_mapping, compound_2_reactions_mapping,
reaction_2_pathways_mapping, species_list):
count_df, pathway_compound_counts, pathway_protein_counts = get_reaction_df(
gene_2_proteins_mapping,
protein_2_reactions_mapping,
compound_2_reactions_mapping,
reaction_2_pathways_mapping,
species_list)
reaction_count_df = count_df.rename({
'reaction_id': 'reaction_pk',
'observed_protein_count': 'R_E',
'observed_compound_count': 'R_C'
}, axis='columns')
reaction_count_df = reaction_count_df.drop([
'reaction_name',
'protein_coverage',
'compound_coverage',
'all_coverage',
'protein',
'all_protein_count',
'compound',
'all_compound_count',
'pathway_ids',
'pathway_names'
], axis=1)
pathway_pks = set(list(pathway_compound_counts.keys()) + list(pathway_protein_counts.keys()))
data = []
for pathway_pk in pathway_pks:
try:
p_e = pathway_protein_counts[pathway_pk]
except KeyError:
p_e = 0
try:
p_c = pathway_compound_counts[pathway_pk]
except KeyError:
p_c = 0
data.append((pathway_pk, p_e, p_c))
pathway_count_df = pd.DataFrame(data, columns=['pathway_pk', 'P_E', 'P_C'])
return reaction_count_df, pathway_count_df
def save_json_string(data, outfile):
with open(outfile, 'w') as f:
f.write(data)
logger.debug('Saving %s' % outfile)
def csv_to_dataframe(csv_str):
# extract group, if any
filtered_str = ''
group_str = None
for line in csv_str.splitlines(): # go through all lines and remove the line containing the grouping info
if re.match(GROUP_COL, line, re.I):
group_str = line
else:
filtered_str += line + '\n'
# extract id values
data = StringIO(filtered_str)
try:
data_df = pd.read_csv(data)
data_df.columns = data_df.columns.str.replace('.',
'_') # replace period with underscore to prevent alasql breaking
data_df.columns = data_df.columns.str.replace('-',
'_') # replace dash with underscore to prevent alasql breaking
data_df.columns = data_df.columns.str.replace('#', '') # remove funny characters
rename = {data_df.columns.values[0]: IDENTIFIER_COL}
for i in range(len(data_df.columns.values[1:])): # sql doesn't like column names starting with a number
col_name = data_df.columns.values[i]
if col_name[0].isdigit():
new_col_name = '_' + col_name # append an underscore in front of the column name
rename[col_name] = new_col_name
data_df = data_df.rename(columns=rename)
data_df.iloc[:, 0] = data_df.iloc[:, 0].astype(str) # assume id is in the first column and is a string
except pd.errors.EmptyDataError:
data_df = None
# create grouping dataframe
group_df = None
if data_df is not None:
sample_data = data_df.columns.values
if group_str is not None:
group_data = group_str.split(',')
else:
num_samples = len(sample_data)
group_data = [DEFAULT_GROUP_NAME for x in
range(num_samples)] # assigns a default group if nothing specified
# skip non-measurement columns
filtered_sample_data = []
filtered_group_data = []
for i in range(len(sample_data)):
sample_name = sample_data[i]
if sample_name == IDENTIFIER_COL or \
sample_name == PIMP_PEAK_ID_COL or \
sample_name.startswith(PADJ_COL_PREFIX) or \
sample_name.startswith(FC_COL_PREFIX):
continue
filtered_sample_data.append(sample_data[i])
filtered_group_data.append(group_data[i])
# convert to dataframe
if len(filtered_group_data) > 0:
group_df = pd.DataFrame(list(zip(filtered_sample_data, filtered_group_data)),
columns=[SAMPLE_COL, GROUP_COL])
return data_df, group_df
def get_ids_from_dataframe(df):
if df is None:
return []
else:
return df.iloc[:, 0].values.tolist() # id is always the first column
def merge_relation(r1, r2):
unique_keys = list(set(r1.keys + r2.keys))
unique_values = list(set(r1.values + r2.values))
mapping_list = r1.mapping_list + r2.mapping_list
mapping_list = list(map(dict, set(map(lambda x: frozenset(x.items()), mapping_list)))) # removes duplicates, if any
return Relation(keys=list(unique_keys), values=list(unique_values),
mapping_list=mapping_list)
def reverse_relation(rel):
return Relation(keys=rel.values, values=rel.keys, mapping_list=rel.mapping_list)
def expand_relation(rel, mapping, pk_col):
expanded_keys = substitute(rel.keys, mapping)
expanded_values = substitute(rel.values, mapping)
expanded_mapping_list = []
for row in rel.mapping_list:
expanded = expand_each(row, mapping, pk_col)
if len(expanded) == 0:
expanded = [row]
expanded_mapping_list.extend(expanded)
return Relation(keys=expanded_keys, values=expanded_values, mapping_list=expanded_mapping_list)
def substitute(my_list, mapping):
new_list = []
for x in my_list:
if x in mapping:
new_list.extend(mapping[x])
else:
new_list.append(x)
return new_list
def expand_each(row, mapping, pk_col):
results = []
pk = row[pk_col]
try:
replacements = mapping[pk]
for rep in replacements:
new_row = without_keys(row, [pk_col])
new_row[pk_col] = rep
results.append(new_row)
except KeyError:
pass
return results
# https://stackoverflow.com/questions/31433989/return-copy-of-dictionary-excluding-specified-keys
def without_keys(d, keys):
return {x: d[x] for x in d if x not in keys}
def pk_to_json(pk_label, display_label, data, metadata_map, observed_df, has_species=False,
observed_ids=None, mapping=None):
if observed_df is not None:
if PIMP_PEAK_ID_COL in observed_df.columns: # if peak id is present, rename the identifier column to include it
observed_df[IDENTIFIER_COL] = observed_df[IDENTIFIER_COL] + '_' + observed_df[PIMP_PEAK_ID_COL].astype(str)
if mapping is not None:
data = expand_data(data, mapping)
observed_df = observed_df.set_index(IDENTIFIER_COL) # set identifier as index
observed_df = observed_df[~observed_df.index.duplicated(keep='first')] # remove row with duplicate indices
observed_df = observed_df.fillna(value=0) # replace all NaNs with 0s
output = []
for item in sorted(data):
if item == NA:
continue # handled below after this loop
if '_' in item:
tokens = item.split('_')
assert len(tokens) == 2
item = tokens[0]
peak_id = tokens[1]
else:
peak_id = None
# add observed status and the primary key label to row data
row = {}
if observed_ids is not None:
if item in observed_ids:
row['obs'] = True
else:
row['obs'] = False
else:
row['obs'] = None
if peak_id:
key = '%s_%s' % (item, peak_id)
row[pk_label] = key
else:
row[pk_label] = item
# add display label to row_data
species = None
if len(metadata_map) > 0 and item in metadata_map and metadata_map[item] is not None:
if peak_id:
label = '%s (%s)' % (metadata_map[item]['display_name'].capitalize(), peak_id)
else:
label = metadata_map[item]['display_name'].capitalize()
# get the species if it's there too
if has_species and 'species' in metadata_map[item]:
species = metadata_map[item]['species']
else:
label = item # otherwise use the item id as the label
row[display_label] = label
# add the remaining data columns to row
if observed_df is not None:
try:
if peak_id:
observed_values = observed_df.loc[key].to_dict()
else:
observed_values = observed_df.loc[item].to_dict()
except KeyError: # missing data
observed_values = {}
for col in observed_df.columns:
observed_values.update({col: None})
observed_values.pop(PIMP_PEAK_ID_COL, None) # remove pimp peakid column
# convert numpy bool to python bool, else json serialisation will break
for k, v in observed_values.items():
if type(v) == np.bool_:
observed_values[k] = bool(v)
row.update(observed_values)
if has_species:
row['species'] = species
if row not in output:
output.append(row)
# add dummy entry
row = {'obs': NA, pk_label: NA, display_label: NA}
if has_species:
row['species'] = NA
if observed_df is not None: # also add the remaining columns
for col in observed_df.columns:
if col == PIMP_PEAK_ID_COL:
continue
row.update({col: 0})
if row not in output:
output.append(row)
output_json = json.dumps(output)
return output_json
def expand_data(data, mapping):
new_data = []
for x in data:
if x in mapping:
new_data.extend(mapping[x])
else:
new_data.append(x)
data = new_data
return data
def make_relations(mapping, source_pk, target_pk, value_key=None):
id_values = []
mapping_list = []
for key in mapping:
value_list = mapping[key]
# value_list can be either a list of strings or dictionaries
# check if the first element is a dict, else assume it's a string
assert len(value_list) > 0
is_string = True
first = value_list[0]
if isinstance(first, dict):
is_string = False
# process each element in value_list
for value in value_list:
if is_string: # value_list is a list of string
actual_value = value
else: # value_list is a list of dicts
assert value_key is not None, 'value_key is missing'
actual_value = value[value_key]
id_values.append(actual_value)
row = {source_pk: key, target_pk: actual_value}
mapping_list.append(row)
unique_keys = set(mapping.keys())
unique_values = set(id_values)
return Relation(keys=list(unique_keys), values=list(unique_values),
mapping_list=mapping_list)
def add_dummy(relation, source_ids, target_ids, source_pk_label, target_pk_label):
to_add = [x for x in source_ids if x not in relation.keys]
relation = add_links(relation, source_pk_label, target_pk_label, to_add, [NA])
# to_add = [x for x in target_ids if x not in relation.values]
# relation = add_links(relation, source_pk_label, target_pk_label, [NA], to_add)
# relation = add_links(relation, source_pk_label, target_pk_label, [NA], [NA])
return relation
def add_links(relation, source_pk_label, target_pk_label, source_pk_list, target_pk_list):
rel_mapping_list = list(relation.mapping_list)
rel_keys = relation.keys
rel_values = relation.values
for s1 in source_pk_list:
if s1 not in rel_keys: rel_keys.append(s1)
for s2 in target_pk_list:
rel_mapping_list.append({source_pk_label: s1, target_pk_label: s2})
if s2 not in rel_keys: rel_values.append(s2)
return Relation(keys=rel_keys, values=rel_values, mapping_list=rel_mapping_list)
def change_column_order(df, col_name, index):
cols = df.columns.tolist()
cols.remove(col_name)
cols.insert(index, col_name)
return df[cols]
# https://stackoverflow.com/questions/19798112/convert-pandas-dataframe-to-a-nested-dict
def recur_dictify(frame):
if len(frame.columns) == 1:
if frame.values.size == 1: return frame.values[0][0]
return frame.values.squeeze()
grouped = frame.groupby(frame.columns[0])
d = {k: recur_dictify(g.iloc[:, 1:]) for k, g in grouped}
return d
def get_last_analysis_data(analysis, data_type):
analysis_data = [x for x in analysis.analysisdata_set.all().order_by('-timestamp') if x.data_type == data_type][0]
return analysis_data
def get_dataframes(analysis_data, pk_cols):
pk_col = pk_cols[analysis_data.data_type]
data_df = | pd.DataFrame(analysis_data.json_data) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv(path)
#Code starts here
# data['Rating'].hist()
x = data['Rating'] <= 5
data = data[x]
data.hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())
missing_data = pd.concat([total_null, percent_null], axis=1 , keys=['Total','Percent'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null/data.isnull().count())
missing_data_1 = pd.concat([total_null_1, percent_null_1], axis=1 , keys=['Total','Percent'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
g = sns.catplot(x="Category",y="Rating",data=data, kind="box", height = 10)
g.set_titles("Rating vs Category [BoxPlot]")
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
# data['Installs'].value_counts()
data['Installs'] = data['Installs'].str.replace('+', '')
data['Installs'] = data['Installs'].str.replace(',', '')
# data['Installs'] = [x.strip(',') for x in data['Installs']]
# data['Installs'] = [x.strip('+') for x in data['Installs']]
data['Installs'] = pd.to_numeric(data['Installs'], downcast='integer')
le = LabelEncoder()
data['Installs'] = le.fit_transform(data['Installs'])
data['Installs'] = le.fit_transform(data['Installs'])
g = sns.regplot(x="Installs", y="Rating" , data=data)
# g.set_titles('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Price'].value_counts()
data['Price'] = data['Price'].str.replace('$', '')
data['Price'] = pd.to_numeric(data['Price'])
data['Price'] = | pd.to_numeric(data['Price'], downcast='float') | pandas.to_numeric |
"""
Pull data from CA open data related to medical surge facilities
and hospital data
"""
import pandas as pd
from processing_utils import default_parameters
"""
The catalog file seems to throw up an error
because the dataset IDs disappear and appear at
different times. Let's stick with the download URL for now.
import intake
import intake_dcat
import os
catalog = intake.open_catalog("/app/catalog.yml")
"""
HOSPITAL_DATA_URL = (
"https://data.chhs.ca.gov/dataset/"
"2df3e19e-9ee4-42a6-a087-9761f82033f6/resource/"
"47af979d-8685-4981-bced-96a6b79d3ed5/download/covid19hospitalbycounty.csv"
)
SURGE_CAPACITY_URL = (
"https://data.ca.gov/dataset/"
"cbbfb307-ac91-47ec-95c0-f05684e06065/resource/"
"ef6675e7-cd3a-4762-ba75-2ef78d6dc334/download/bed_surge.csv"
)
S3_FILE_PATH = default_parameters.S3_FILE_PATH
def clean_surge_data(df):
keep = ["county", "date", "type_of_facility", "available_beds", "occupied_beds"]
df = (df[df.status=="Ready"]
.assign(
date = pd.to_datetime(df.date),
available_beds = df.beds_ready_to_accept_patients.astype("Int64"),
occupied_beds = df.patients_in_beds.astype("Int64"),
)[keep]
)
# Get county total available / occupied beds
group_cols = ["county", "date"]
for col in ["available_beds", "occupied_beds"]:
new_col = f"county_{col}"
df[new_col] = df.groupby(group_cols)[col].transform("sum")
return df
def clean_hospital_data(df):
df = (df.assign(
date = pd.to_datetime(df.todays_date),
).rename(
columns = {
"hospitalized_covid_confirmed_patients": "hospitalized_confirmed",
"hospitalized_suspected_covid_patients": "hospitalized_suspected",
"hospitalized_covid_patients": "hospitalized_covid",
"icu_covid_confirmed_patients": "icu_confirmed",
"icu_suspected_covid_patients": "icu_suspected",
"icu_available_beds": "icu_available",
}).drop(columns = ["todays_date"])
)
integrify_me = ["hospitalized_confirmed", "hospitalized_suspected",
"hospitalized_covid", "all_hospital_beds",
"icu_confirmed", "icu_suspected", "icu_available"]
df[integrify_me] = df[integrify_me].astype("Int64")
df = df.assign(
hospitalized_covid = df.hospitalized_confirmed + df.hospitalized_suspected,
icu_covid = df.icu_confirmed + df.icu_suspected,
all_icu_beds = df.icu_confirmed + df.icu_suspected + df.icu_available,
)
col_order = [
"county", "date",
"hospitalized_confirmed", "hospitalized_suspected",
"hospitalized_covid", "all_hospital_beds",
"icu_confirmed", "icu_suspected", "icu_covid", "icu_available", "all_icu_beds"
]
# There are some dates that are NaT...drop these
# Note: there are obs that have NaT but only have all_hospital_beds filled....
# If there is no data, let's exclude for now, instead of interpolating and assuming.
df = (df[df.date.notna()]
.sort_values(["county", "date"])
.reindex(columns = col_order)
.reset_index(drop=True)
)
return df
def grab_county_fips(df):
crosswalk = pd.read_csv('/app/data/msa_county_pop_crosswalk.csv', dtype = {"county_fips":"str"})
keep = ["county", "county_fips"]
crosswalk = (crosswalk[crosswalk.state == "California"][keep]
.assign(county = crosswalk.county.str.replace(" County", ""))
)
df = pd.merge(df, crosswalk, on = "county", how = "left", validate = "m:1")
# Reorder county_fips to be right after county
cols_to_order = ["county", "county_fips", "date"]
new_col_order = cols_to_order + (df.columns.drop(cols_to_order).tolist())
df = df[new_col_order]
return df
def update_ca_surge_hospital_data(**kwargs):
# Grab hospital capacity data
#hospital_df = catalog.ca_open_data.hospital_capacity.read()
hospital_df = | pd.read_csv(HOSPITAL_DATA_URL) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
# %%
pd.set_option('display.max_rows', 999)
pd.set_option('precision', 3)
pd.describe_option('precision')
| pd.get_option('expand_frame_repr') | pandas.get_option |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""distance_from_median_pis.py
This script investigates the L1 distance between each of the images to the
median vectorial representation of a persistence image within a diagnostic
category.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import matplotlib.pyplot as plt
from textwrap import wrap
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.graph_objects as go
import plotly.figure_factory as ff
import gtda
from gtda.images import ErosionFiltration
from gtda.homology import VietorisRipsPersistence
from gtda.diagrams import (
PersistenceLandscape,
PersistenceImage,
Scaler,
BettiCurve,
PairwiseDistance,
)
from gtda.homology import CubicalPersistence
import json
import dotenv
import os
from scipy.spatial import distance
DOTENV_KEY2VAL = dotenv.dotenv_values()
N_JOBS = -1
def format_patient(patient, diagnoses):
patient = patient + "-" + list(diagnoses[patient].keys())[0] + "-MNI.npy"
return patient.replace("-ses", "")
def get_earliest_available_diagnosis(path_to_diags):
"""Gets diagnosis at first available timepoint"""
cn_patients = []
mci_patients = []
ad_patients = []
with open(path_to_diags) as f:
diagnoses = json.load(f)
for patient in list(diagnoses.keys()):
if diagnoses[patient][list(diagnoses[patient].keys())[0]] == "CN":
cn_patients.append(format_patient(patient, diagnoses))
elif diagnoses[patient][list(diagnoses[patient].keys())[0]] == "MCI":
mci_patients.append(format_patient(patient, diagnoses))
elif diagnoses[patient][list(diagnoses[patient].keys())[0]] == "AD":
ad_patients.append(format_patient(patient, diagnoses))
return cn_patients, mci_patients, ad_patients
def make_dir(directory):
"""Makes directory and handles errors"""
try:
os.mkdir(directory)
except OSError:
print("Creation of the directory %s failed" % directory)
else:
print("Successfully created the directory %s " % directory)
def cubical_persistence(
images, title, plot_diagrams=False, betti_curves=False, scaled=False
):
homology_dimensions = (0, 1, 2)
cp = CubicalPersistence(
homology_dimensions=homology_dimensions,
coeff=2,
periodic_dimensions=None,
infinity_values=None,
reduced_homology=True,
n_jobs=N_JOBS,
)
diagrams_cubical_persistence = cp.fit_transform(images)
if scaled:
sc = Scaler(metric="bottleneck")
diagrams_cubical_persistence = sc.fit_transform(
diagrams_cubical_persistence
)
else:
scaled_diagrams_cubical_persistence = diagrams_cubical_persistence
if plot_diagrams:
fig = cp.plot(diagrams_cubical_persistence)
fig.update_layout(title=title)
fig.show()
if betti_curves:
BC = BettiCurve()
X_betti_curves = BC.fit_transform(diagrams_cubical_persistence)
fig = BC.plot(X_betti_curves)
fig.update_layout(title=title)
fig.show()
if title is not None:
print(f"Computed CP for {title}")
return diagrams_cubical_persistence
def compute_median_pi(patient_list, image_dir):
file_names = []
images = []
for patient in patient_list:
try:
images.append(np.load(image_dir + patient))
file_names.append(patient)
except FileNotFoundError:
print(f"{patient} not found, skipping.")
images = np.stack(images, axis=0)
pds = cubical_persistence(
images, None, plot_diagrams=False, betti_curves=False, scaled=False
)
pi = PersistenceImage(
sigma=0.001, n_bins=100, weight_function=None, n_jobs=N_JOBS
)
pis = pi.fit_transform(pds)
return pis, np.median(pis, axis=0), file_names
def compute_pi_distance(
pis, average_pi, p, dest_dir, file_names, patient_category
):
diffs = []
for pl in range(pis.shape[0]):
# Loop through each patient
patient_dist_from_avg = []
for h_dim in range(pis.shape[1]):
# Loop through each dimension
patient_dist_from_avg.append(
distance.minkowski(
pis[pl, h_dim, :, :].flatten(),
average_pi[h_dim, :, :].flatten(),
p,
)
)
diffs.append(patient_dist_from_avg)
diffs = np.array(diffs)
# with open(dest_dir + ".npy", "wb") as f:
# np.save(f, diffs)
diffs = | pd.DataFrame(diffs, columns=["H_0", "H_1", "H_2"]) | pandas.DataFrame |
import os
import time
import pytest
import pandas as pd
import numpy as np
import ray
from ray.data.dataset_pipeline import DatasetPipeline
from ray.tests.conftest import * # noqa
def test_pipeline_actors(shutdown_only):
ray.init(num_cpus=2, num_gpus=1)
pipe = ray.data.range(3) \
.repeat(10) \
.map(lambda x: x + 1) \
.map(lambda x: x + 1, compute="actors", num_gpus=1)
assert sorted(pipe.take(999)) == sorted([2, 3, 4] * 10)
def test_incremental_take(shutdown_only):
ray.init(num_cpus=2)
# Can read incrementally even if future results are delayed.
def block_on_ones(x: int) -> int:
if x == 1:
time.sleep(999999)
return x
pipe = ray.data.range(2).pipeline(parallelism=1)
pipe = pipe.map(block_on_ones)
assert pipe.take(1) == [0]
def test_basic_pipeline(ray_start_regular_shared):
ds = ray.data.range(10)
pipe = ds.pipeline(parallelism=1)
assert str(pipe) == "DatasetPipeline(length=10, num_stages=1)"
for _ in range(2):
assert pipe.count() == 10
pipe = ds.pipeline(parallelism=1).map(lambda x: x).map(lambda x: x)
assert str(pipe) == "DatasetPipeline(length=10, num_stages=3)"
assert pipe.take() == list(range(10))
pipe = ds.pipeline(parallelism=999)
assert str(pipe) == "DatasetPipeline(length=1, num_stages=1)"
assert pipe.count() == 10
pipe = ds.repeat(10)
assert str(pipe) == "DatasetPipeline(length=10, num_stages=1)"
for _ in range(2):
assert pipe.count() == 100
assert pipe.sum() == 450
def test_from_iterable(ray_start_regular_shared):
pipe = DatasetPipeline.from_iterable(
[lambda: ray.data.range(3), lambda: ray.data.range(2)])
assert pipe.take() == [0, 1, 2, 0, 1]
def test_repeat_forever(ray_start_regular_shared):
ds = ray.data.range(10)
pipe = ds.repeat()
assert str(pipe) == "DatasetPipeline(length=None, num_stages=1)"
for i, v in enumerate(pipe.iter_rows()):
assert v == i % 10, (v, i, i % 10)
if i > 1000:
break
def test_repartition(ray_start_regular_shared):
pipe = ray.data.range(10).repeat(10)
assert pipe.repartition(1).sum() == 450
assert pipe.repartition(10).sum() == 450
assert pipe.repartition(100).sum() == 450
def test_iter_batches(ray_start_regular_shared):
pipe = ray.data.range(10).pipeline(parallelism=2)
batches = list(pipe.iter_batches())
assert len(batches) == 10
assert all(len(e) == 1 for e in batches)
def test_iter_datasets(ray_start_regular_shared):
pipe = ray.data.range(10).pipeline(parallelism=2)
ds = list(pipe.iter_datasets())
assert len(ds) == 5
pipe = ray.data.range(10).pipeline(parallelism=5)
ds = list(pipe.iter_datasets())
assert len(ds) == 2
def test_foreach_dataset(ray_start_regular_shared):
pipe = ray.data.range(5).pipeline(parallelism=2)
pipe = pipe.foreach_dataset(lambda ds: ds.map(lambda x: x * 2))
assert pipe.take() == [0, 2, 4, 6, 8]
def test_schema(ray_start_regular_shared):
pipe = ray.data.range(5).pipeline(parallelism=2)
assert pipe.schema() == int
def test_split(ray_start_regular_shared):
pipe = ray.data.range(3) \
.map(lambda x: x + 1) \
.repeat(10)
@ray.remote
def consume(shard, i):
total = 0
for row in shard.iter_rows():
total += 1
assert row == i + 1, row
assert total == 10, total
shards = pipe.split(3)
refs = [consume.remote(s, i) for i, s in enumerate(shards)]
ray.get(refs)
def test_split_at_indices(ray_start_regular_shared):
indices = [2, 5]
n = 8
pipe = ray.data.range(n) \
.map(lambda x: x + 1) \
.repeat(2)
@ray.remote
def consume(shard, i):
total = 0
out = []
for row in shard.iter_rows():
total += 1
out.append(row)
if i == 0:
assert total == 2 * indices[i]
elif i < len(indices):
assert total == 2 * (indices[i] - indices[i - 1])
else:
assert total == 2 * (n - indices[i - 1])
return out
shards = pipe.split_at_indices(indices)
refs = [consume.remote(s, i) for i, s in enumerate(shards)]
outs = ray.get(refs)
np.testing.assert_equal(
np.array(outs, dtype=np.object),
np.array(
[[1, 2, 1, 2], [3, 4, 5, 3, 4, 5], [6, 7, 8, 6, 7, 8]],
dtype=np.object))
def test_parquet_write(ray_start_regular_shared, tmp_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
df = | pd.concat([df1, df2]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# (file-types:notebooks)=
# # Jupyter Notebook files
#
# You can create content with Jupyter notebooks.
# For example, the content for the current page is contained in {download}`this notebook file <./notebooks.ipynb>`.
#
# ```{margin}
# If you'd like to write in plain-text files, but still keep a notebook structure, you can write
# Jupyter notebooks with MyST Markdown, which are then automatically converted to notebooks.
# See [](./myst-notebooks.md) for more details.
# ```
#
# Jupyter Book supports all Markdown that is supported by Jupyter Notebook.
# This is mostly a flavour of Markdown called [CommonMark Markdown](https://commonmark.org/) with minor modifications.
# For more information about writing Jupyter-flavoured Markdown in Jupyter Book, see [](./markdown.md).
#
# ## Code blocks and image outputs
#
# Jupyter Book will also embed your code blocks and output in your book.
# For example, here's some sample Matplotlib code:
# In[1]:
from matplotlib import rcParams, cycler
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
# In[2]:
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot']);
# Note that the image above is captured and displayed in your site.
# In[3]:
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + .1*np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot'])
ax.set(title="Smoother linez")
# ```{margin} **You can also pop out content to the side!**
# For more information on how to do this,
# check out the {ref}`layout/sidebar` section.
# ```
# ## Removing content before publishing
#
# You can also remove some content before publishing your book to the web.
# For reference, {download}`you can download the notebook content for this page <notebooks.ipynb>`.
# In[4]:
thisvariable = "none of this should show up in the textbook"
fig, ax = plt.subplots()
x = np.random.randn(100)
y = np.random.randn(100)
ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)
ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)
ax.set_axis_off()
# You can **remove only the code** so that images and other output still show up.
# In[5]:
thisvariable = "this plot *will* show up in the textbook."
fig, ax = plt.subplots()
x = np.random.randn(100)
y = np.random.randn(100)
ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)
ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)
ax.set_axis_off()
# Which works well if you'd like to quickly display cell output without cluttering your content with code.
# This works for any cell output, like a Pandas DataFrame.
# In[6]:
import pandas as pd
| pd.DataFrame([['hi', 'there'], ['this', 'is'], ['a', 'DataFrame']], columns=['Word A', 'Word B']) | pandas.DataFrame |
import logging
import pickle
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
import streamlit as st
from sklearn.feature_selection import chi2
from utils import seed_everything, INDEX2LEVEL, LEVEL2INDEX
from preprocess import word_tokenize, clean_dialogue
seed_everything(seed=914)
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
st.set_page_config(page_title='eSalesHub Dashboard', layout='wide')
@st.cache
def get_esaleshub_data():
df = | pd.read_csv('./data/esaleshub.csv') | pandas.read_csv |
from __future__ import print_function
import os
import pandas as pd
from ..base import BASE
##################################################################### 1 Enter Data
# input
class read_table(BASE):
def fit(self):
# step1: check inputs
# step2: assign inputs to parameters if necessary (param = @token)
self.paramFROMinput()
# step3: check the dimension of input data frame
# step4: import module and make APIs
try:
df = | pd.read_table(**self.parameters) | pandas.read_table |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
'''
run this file from root folder:
python3 datasets/process_data.py datasets/messages.csv datasets/categories.csv datasets/DisasterResponse.db
'''
def load_data(messages_filepath, categories_filepath):
"""
PARAMETER:
messages_filepath - filepath for messages
categories_filepath - filepath for categories
RETURN:
df - merged messages and categories DataFrame
"""
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
"""
Spatial based Segregation Metrics
"""
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>> and <NAME> <<EMAIL>>"
import numpy as np
import pandas as pd
import geopandas as gpd
import warnings
import pysal.lib
from pysal.lib.weights import Queen, Kernel, lag_spatial
from pysal.lib.weights.util import fill_diagonal
from numpy import inf
from sklearn.metrics.pairwise import manhattan_distances, euclidean_distances, haversine_distances
from scipy.ndimage.interpolation import shift
from scipy.sparse.csgraph import floyd_warshall
from scipy.sparse import csr_matrix
from pysal.explore.segregation.aspatial.aspatial_indexes import _dissim, MinMax
from pysal.explore.segregation.aspatial.multigroup_aspatial_indexes import MultiInformationTheory, MultiDivergence
from pysal.explore.segregation.network import calc_access
from pysal.lib.weights.util import attach_islands
from pysal.explore.segregation.util.util import _dep_message, DeprecationHelper
# Including old and new api in __all__ so users can use both
__all__ = [
'Spatial_Prox_Prof',
'SpatialProxProf',
'Spatial_Dissim',
'SpatialDissim',
'Boundary_Spatial_Dissim',
'BoundarySpatialDissim',
'Perimeter_Area_Ratio_Spatial_Dissim',
'PerimeterAreaRatioSpatialDissim',
'SpatialMinMax',
'Distance_Decay_Isolation',
'DistanceDecayIsolation',
'Distance_Decay_Exposure',
'DistanceDecayExposure',
'Spatial_Proximity',
'SpatialProximity',
'Absolute_Clustering',
'AbsoluteClustering',
'Relative_Clustering',
'RelativeClustering',
'Delta',
'Absolute_Concentration',
'AbsoluteConcentration',
'Relative_Concentration',
'RelativeConcentration',
'Absolute_Centralization',
'AbsoluteCentralization',
'Relative_Centralization',
'RelativeCentralization',
'SpatialInformationTheory',
'SpatialDivergence',
'compute_segregation_profile'
]
# The Deprecation calls of the classes are located in the end of this script #
# suppress numpy divide by zero warnings because it occurs a lot during the
# calculation of many indices
np.seterr(divide='ignore', invalid='ignore')
def _build_local_environment(data, groups, w):
"""Convert observations into spatially-weighted sums.
Parameters
----------
data : DataFrame
dataframe with local observations
w : pysal.lib.weights object
weights matrix defining the local environment
Returns
-------
DataFrame
Spatialized data
"""
new_data = []
w = fill_diagonal(w)
for y in data[groups]:
new_data.append(lag_spatial(w, data[y]))
new_data = pd.DataFrame(dict(zip(groups, new_data)))
return new_data
def _return_length_weighted_w(data):
"""
Returns a PySAL weights object that the weights represent the length of the common boundary of two areal units that share border.
Author: <NAME> <<EMAIL>>.
Thank you, Levi!
Parameters
----------
data : a geopandas DataFrame with a 'geometry' column.
Notes
-----
Currently it's not making any projection.
"""
w = pysal.lib.weights.Rook.from_dataframe(
data, ids=data.index.tolist(), geom_col=data._geometry_column_name)
if (len(w.islands) == 0):
w = w
else:
warnings('There are some islands in the GeoDataFrame.')
w_aux = pysal.lib.weights.KNN.from_dataframe(
data,
ids=data.index.tolist(),
geom_col=data._geometry_column_name,
k=1)
w = attach_islands(w, w_aux)
adjlist = w.to_adjlist()
islands = pd.DataFrame.from_records([{
'focal': island,
'neighbor': island,
'weight': 0
} for island in w.islands])
merged = adjlist.merge(data.geometry.to_frame('geometry'), left_on='focal',
right_index=True, how='left')\
.merge(data.geometry.to_frame('geometry'), left_on='neighbor',
right_index=True, how='left', suffixes=("_focal", "_neighbor"))\
# Transforming from pandas to geopandas
merged = gpd.GeoDataFrame(merged, geometry='geometry_focal')
merged['geometry_neighbor'] = gpd.GeoSeries(merged.geometry_neighbor)
# Getting the shared boundaries
merged['shared_boundary'] = merged.geometry_focal.intersection(
merged.set_geometry('geometry_neighbor'))
# Putting it back to a matrix
merged['weight'] = merged.set_geometry('shared_boundary').length
merged_with_islands = | pd.concat((merged, islands)) | pandas.concat |
#!/usr/bin/env python
# Python Script for Kaggle Competition
# BNP Paribas Cardif claim management
# Doesn't work!
# Import Library & Modules
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
import numpy as np # linear algebraic manipulation
import pandas as pd # data processing, .csv files I/O
# When checking unique, numpy handles this error so far.
import warnings
warnings.filterwarnings('ignore', 'numpy equal will not check object identity in the future')
warnings.filterwarnings('ignore', 'numpy not_equal will not check object identity in the future')
# read csv file, seperated by , na values exists
data = pd.read_csv('../../dataset/train_splitted.csv', sep=',', na_values='.')
test = | pd.read_csv('../../dataset/test_splitted.csv') | pandas.read_csv |
#%% Loading irish data
import pandas as pd
data1 = pd.read_fwf('bible.txt', header=None)
data2 = pd.read_fwf('blogs.txt', header=None)
data3 = pd.read_fwf('legal.txt', header=None)
data4 = pd.read_fwf('news.txt', header=None)
data5 = pd.read_fwf('wiki.txt', header=None)
data = data1[0]+data2[0]+data3[0]+data4[0]+data5[0]
#%% Clean up data -> output to sentence
stripped = str.maketrans('','', '©º�³¬±¼!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~0123456789')
final = []
for i in data:
if str(i) != 'nan':
final.append(' '.join([w.translate(stripped) for w in str(i).split()]))
final = | pd.Series(final, copy=False) | pandas.Series |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy
#Code starts here
data = | pd.read_csv(path) | pandas.read_csv |
from typing import Union, Optional, List, Dict, Tuple, Any
import pandas as pd
import numpy as np
from .common.validators import validate_integer
from .macro import Inflation
from .common.helpers import Float, Frame, Date, Index
from .settings import default_ticker, PeriodLength, _MONTHS_PER_YEAR
from .api.data_queries import QueryData
from .api.namespaces import get_assets_namespaces
class Asset:
"""
A financial asset, that could be used in a list of assets or in portfolio.
Parameters
----------
symbol: str, default "SPY.US"
Symbol is an asset ticker with namespace after dot. The default value is "SPY.US" (SPDR S&P 500 ETF Trust).
Examples
--------
>>> asset = ok.Asset()
>>> asset
symbol SPY.US
name SPDR S&P 500 ETF Trust
country USA
exchange NYSE ARCA
currency USD
type ETF
first date 1993-02
last date 2021-03
period length 28.1
dtype: object
An Asset object could be easy created whithout specifying a symbol Asset() using the default symbol.
"""
def __init__(self, symbol: str = default_ticker):
if symbol is None or len(str(symbol).strip()) == 0:
raise ValueError("Symbol can not be empty")
self._symbol = str(symbol).strip()
self._check_namespace()
self._get_symbol_data(symbol)
self.ror: pd.Series = QueryData.get_ror(symbol)
self.first_date: pd.Timestamp = self.ror.index[0].to_timestamp()
self.last_date: pd.Timestamp = self.ror.index[-1].to_timestamp()
self.period_length: float = round(
(self.last_date - self.first_date) / np.timedelta64(365, "D"), ndigits=1
)
def __repr__(self):
dic = {
"symbol": self.symbol,
"name": self.name,
"country": self.country,
"exchange": self.exchange,
"currency": self.currency,
"type": self.type,
"first date": self.first_date.strftime("%Y-%m"),
"last date": self.last_date.strftime("%Y-%m"),
"period length": "{:.2f}".format(self.period_length),
}
return repr(pd.Series(dic))
def _check_namespace(self):
namespace = self._symbol.split(".", 1)[-1]
allowed_namespaces = get_assets_namespaces()
if namespace not in allowed_namespaces:
raise ValueError(
f"{namespace} is not in allowed assets namespaces: {allowed_namespaces}"
)
@property
def symbol(self) -> str:
"""
Return a symbol of the asset.
Returns
-------
str
"""
return self._symbol
def _get_symbol_data(self, symbol) -> None:
x = QueryData.get_symbol_info(symbol)
self.ticker: str = x["code"]
self.name: str = x["name"]
self.country: str = x["country"]
self.exchange: str = x["exchange"]
self.currency: str = x["currency"]
self.type: str = x["type"]
self.inflation: str = f"{self.currency}.INFL"
@property
def price(self) -> Optional[float]:
"""
Return live price of an asset.
Live price is delayed (15-20 minutes).
For certain namespaces (FX, INDX, PIF etc.) live price is not supported.
Returns
-------
float, None
Live price of the asset. Returns None if not defined.
"""
return QueryData.get_live_price(self.symbol)
@property
def dividends(self) -> pd.Series:
"""
Return dividends time series historical daily data.
Returns
-------
Series
Time series of dividends historical data (daily).
Examples
--------
>>> x = ok.Asset('VNQ.US')
>>> x.dividends
Date
2004-12-22 1.2700
2005-03-24 0.6140
2005-06-27 0.6440
2005-09-26 0.6760
...
2020-06-25 0.7590
2020-09-25 0.5900
2020-12-24 1.3380
2021-03-25 0.5264
Freq: D, Name: VNQ.US, Length: 66, dtype: float64
"""
div = QueryData.get_dividends(self.symbol)
if div.empty:
# Zero time series for assets where dividend yield is not defined.
index = pd.date_range(
start=self.first_date, end=self.last_date, freq="MS", closed=None
)
period = index.to_period("D")
div = pd.Series(data=0, index=period)
div.rename(self.symbol, inplace=True)
return div
@property
def nav_ts(self) -> Optional[pd.Series]:
"""
Return NAV time series (monthly) for mutual funds.
"""
if self.exchange == "PIF":
return QueryData.get_nav(self.symbol)
return np.nan
class AssetList:
"""
The list of financial assets implementation.
"""
def __init__(
self,
symbols: Optional[List[str]] = None,
*,
first_date: Optional[str] = None,
last_date: Optional[str] = None,
ccy: str = "USD",
inflation: bool = True,
):
self.__symbols = symbols
self.__tickers: List[str] = [x.split(".", 1)[0] for x in self.symbols]
self.__currency: Asset = Asset(symbol=f"{ccy}.FX")
self.__make_asset_list(self.symbols)
if inflation:
self.inflation: str = f"{ccy}.INFL"
self._inflation_instance: Inflation = Inflation(
self.inflation, self.first_date, self.last_date
)
self.inflation_ts: pd.Series = self._inflation_instance.values_ts
self.inflation_first_date: pd.Timestamp = self._inflation_instance.first_date
self.inflation_last_date: pd.Timestamp = self._inflation_instance.last_date
self.first_date = max(self.first_date, self.inflation_first_date)
self.last_date: pd.Timestamp = min(self.last_date, self.inflation_last_date)
# Add inflation to the date range dict
self.assets_first_dates.update({self.inflation: self.inflation_first_date})
self.assets_last_dates.update({self.inflation: self.inflation_last_date})
if first_date:
self.first_date = max(self.first_date, pd.to_datetime(first_date))
self.ror = self.ror[self.first_date :]
if last_date:
self.last_date = min(self.last_date, pd.to_datetime(last_date))
self.ror: pd.DataFrame = self.ror[self.first_date: self.last_date]
self.period_length: float = round(
(self.last_date - self.first_date) / np.timedelta64(365, "D"), ndigits=1
)
self.pl = PeriodLength(
self.ror.shape[0] // _MONTHS_PER_YEAR, self.ror.shape[0] % _MONTHS_PER_YEAR
)
self._pl_txt = f"{self.pl.years} years, {self.pl.months} months"
self._dividend_yield: pd.DataFrame = pd.DataFrame(dtype=float)
self._dividends_ts: pd.DataFrame = pd.DataFrame(dtype=float)
def __repr__(self):
dic = {
"symbols": self.symbols,
"currency": self.currency.ticker,
"first date": self.first_date.strftime("%Y-%m"),
"last_date": self.last_date.strftime("%Y-%m"),
"period length": self._pl_txt,
"inflation": self.inflation if hasattr(self, "inflation") else "None",
}
return repr(pd.Series(dic))
def __len__(self):
return len(self.symbols)
def __make_asset_list(self, ls: list) -> None:
"""
Make an asset list from a list of symbols.
"""
first_dates: Dict[str, pd.Timestamp] = {}
last_dates: Dict[str, pd.Timestamp] = {}
names: Dict[str, str] = {}
currencies: Dict[str, str] = {}
df = pd.DataFrame()
for i, x in enumerate(ls):
asset = Asset(x)
if i == 0: # required to use pd.concat below (df should not be empty).
if asset.currency == self.currency.name:
df = asset.ror
else:
df = self._set_currency(
returns=asset.ror, asset_currency=asset.currency
)
else:
if asset.currency == self.currency.name:
new = asset.ror
else:
new = self._set_currency(
returns=asset.ror, asset_currency=asset.currency
)
df = pd.concat([df, new], axis=1, join="inner", copy="false")
currencies.update({asset.symbol: asset.currency})
names.update({asset.symbol: asset.name})
first_dates.update({asset.symbol: asset.first_date})
last_dates.update({asset.symbol: asset.last_date})
# Add currency to the date range dict
first_dates.update({self.currency.name: self.currency.first_date})
last_dates.update({self.currency.name: self.currency.last_date})
first_dates_sorted = sorted(first_dates.items(), key=lambda y: y[1])
last_dates_sorted = sorted(last_dates.items(), key=lambda y: y[1])
self.first_date: pd.Timestamp = first_dates_sorted[-1][1]
self.last_date: pd.Timestamp = last_dates_sorted[0][1]
self.newest_asset: str = first_dates_sorted[-1][0]
self.eldest_asset: str = first_dates_sorted[0][0]
self.names = names
currencies.update({"asset list": self.currency.currency})
self.currencies: Dict[str, str] = currencies
self.assets_first_dates: Dict[str, pd.Timestamp] = dict(first_dates_sorted)
self.assets_last_dates: Dict[str, pd.Timestamp] = dict(last_dates_sorted)
if isinstance(
df, pd.Series
): # required to convert Series to DataFrame for single asset list
df = df.to_frame()
self.ror = df
def _set_currency(self, returns: pd.Series, asset_currency: str) -> pd.Series:
"""
Set return to a certain currency.
"""
currency = Asset(symbol=f"{asset_currency}{self.currency.name}.FX")
asset_mult = returns + 1.0
currency_mult = currency.ror + 1.0
# join dataframes to have the same Time Series Index
df = pd.concat([asset_mult, currency_mult], axis=1, join="inner", copy="false")
currency_mult = df.iloc[:, -1]
asset_mult = df.iloc[:, 0]
x = asset_mult * currency_mult - 1.0
x.rename(returns.name, inplace=True)
return x
def _add_inflation(self) -> pd.DataFrame:
"""
Add inflation column to returns DataFrame.
"""
if hasattr(self, "inflation"):
return pd.concat(
[self.ror, self.inflation_ts], axis=1, join="inner", copy="false"
)
else:
return self.ror
def _remove_inflation(self, time_frame: int) -> pd.DataFrame:
"""
Remove inflation column from rolling returns if exists.
"""
if hasattr(self, "inflation"):
return self.get_rolling_cumulative_return(window=time_frame).drop(
columns=[self.inflation]
)
else:
return self.get_rolling_cumulative_return(window=time_frame)
@property
def symbols(self) -> List[str]:
"""
Return a list of financial symbols used to set the AssetList.
Symbols are similar to tickers but have a namespace information:
* SPY.US is a symbol
* SPY is a ticker
Returns
-------
list of str
List of symbols included in the Asset List.
"""
symbols = [default_ticker] if not self.__symbols else self.__symbols
if not isinstance(symbols, list):
raise ValueError("Symbols must be a list of string values.")
return symbols
@property
def tickers(self) -> List[str]:
"""
Return a list of tickers (symbols without a namespace) used to set the AssetList.
tickers are similar to symbols but do not have namespace information:
* SPY is a ticker
* SPY.US is a symbol
Returns
-------
list of str
List of tickers included in the Asset List.
"""
return self.__tickers
@property
def currency(self) -> Asset:
"""
Return the base currency of the Asset List.
Such properties as rate of return and risk are adjusted to the base currency.
Returns
-------
okama.Asset
Base currency of the Asset List in form of okama.Asset class.
"""
return self.__currency
@property
def wealth_indexes(self) -> pd.DataFrame:
"""
Calculate wealth index time series for the assets and accumulated inflation.
Wealth index (Cumulative Wealth Index) is a time series that presents the value of each asset over
historical time period. Accumulated inflation time series is added if `inflation=True` in the AssetList.
Wealth index is obtained from the accumulated return multiplicated by the initial investments.
That is: 1000 * (Acc_Return + 1)
Initial investments are taken as 1000 units of the AssetList base currency.
Returns
-------
DataFrame
Time series of wealth index values for each asset and accumulated inflation.
"""
df = self._add_inflation()
return Frame.get_wealth_indexes(df)
@property
def risk_monthly(self) -> pd.Series:
"""
Calculate monthly risks (standard deviation) for each asset.
Monthly risk of the asset is a standard deviation of the rate of return time series.
Standard deviation (sigma σ) is normalized by N-1.
Returns
-------
Series
Monthly risk (standard deviation) values for each asset in form of Series.
See Also
--------
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> al = ok.AssetList(['GC.COMM', 'SHV.US'], ccy='USD', last_date='2021-01')
>>> al.risk_monthly
GC.COMM 0.050864
SHV.US 0.001419
dtype: float64
"""
return self.ror.std()
@property
def risk_annual(self) -> pd.Series:
"""
Calculate annualized risks (standard deviation) for each asset.
Returns
-------
Series
Annualized risk (standard deviation) values for each asset in form of Series.
"""
risk = self.ror.std()
mean_return = self.ror.mean()
return Float.annualize_risk(risk, mean_return)
@property
def semideviation_monthly(self) -> pd.Series:
"""
Calculate semideviation monthly values for each asset.
Returns
-------
Series
Monthly semideviation values for each asset in form of Series.
"""
return Frame.get_semideviation(self.ror)
@property
def semideviation_annual(self) -> pd.Series:
"""
Return semideviation annualized values for each asset.
Returns
-------
Series
Annualized semideviation values for each asset in form of Series.
"""
return Frame.get_semideviation(self.ror) * 12 ** 0.5
def get_var_historic(self, time_frame: int = 12, level: int = 5) -> pd.Series:
"""
Calculate historic Value at Risk (VaR) for the assets.
The VaR calculates the potential loss of an investment with a given time frame and confidence level.
Loss is a positive number (expressed in cumulative return).
If VaR is negative there are gains at this confidence level.
Parameters
----------
time_frame : int, default 12
Time period size in months
level : int, default 5
Confidence level in percents to calculate the VaR. Default value is 5%.
Returns
-------
Series
VaR values for each asset in form of Series.
Examples
--------
>>> x = ok.AssetList(['SPY.US', 'AGG.US'])
>>> x.get_var_historic(time_frame=60, level=1)
SPY.US 0.2101
AGG.US -0.0867
Name: VaR, dtype: float64
"""
df = self._remove_inflation(time_frame)
return Frame.get_var_historic(df, level)
def get_cvar_historic(self, time_frame: int = 12, level: int = 5) -> pd.Series:
"""
Calculate historic Conditional Value at Risk (CVAR, expected shortfall) for the assets.
CVaR is the average loss over a specified time period of unlikely scenarios beyond the confidence level.
Loss is a positive number (expressed in cumulative return).
If CVaR is negative there are gains at this confidence level.
Parameters
----------
time_frame : int, default 12
Time period size in months
level : int, default 5
Confidence level in percents to calculate the VaR. Default value is 5%.
Returns
-------
Series
CVaR values for each asset in form of Series.
Examples
--------
>>> x = ok.AssetList(['SPY.US', 'AGG.US'])
>>> x.get_cvar_historic(time_frame=60, level=1)
SPY.US 0.2574
AGG.US -0.0766
dtype: float64
Name: VaR, dtype: float64
"""
df = self._remove_inflation(time_frame)
return Frame.get_cvar_historic(df, level)
@property
def drawdowns(self) -> pd.DataFrame:
"""
Calculate drawdowns time series for the assets.
The drawdown is the percent decline from a previous peak in wealth index.
Returns
-------
DataFrame
Time series of drawdowns.
"""
return Frame.get_drawdowns(self.ror)
def get_cagr(self, period: Optional[int] = None, real: bool = False) -> pd.Series:
"""
Calculate assets Compound Annual Growth Rate (CAGR) for a given trailing period.
Compound annual growth rate (CAGR) is the rate of return that would be required for an investment to grow from
its initial to its final value, assuming all incomes were reinvested.
Inflation adjusted annualized returns (real CAGR) are shown with `real=True` option.
Annual inflation value is calculated for the same period if inflation=True in the AssetList.
CAGR is not defined for periods less than 1 year.
Parameters
----------
period: int, optional
CAGR trailing period in years. None for full time CAGR.
real: bool, default False
CAGR is adjusted for inflation (real CAGR) if True.
AssetList should be initiated with Inflation=True for real CAGR.
Returns
-------
Series
CAGR values for each asset and annualized inflation (optional).
Examples
--------
>>> x = ok.AssetList()
>>> x.get_cagr(period=5)
SPY.US 0.1510
USD.INFL 0.0195
dtype: float64
To get inflation adjusted return (real annualized return) add `real=True` option:
>>> x = ok.AssetList(['EURUSD.FX', 'CNYUSD.FX'], inflation=True)
>>> x.get_cagr(period=5, real=True)
EURUSD.FX 0.000439
CNYUSD.FX -0.017922
dtype: float64
"""
# TODO: add "real" attribute
df = self._add_inflation()
dt0 = self.last_date
if period is None:
dt = self.first_date
else:
self._validate_period(period)
dt = Date.subtract_years(dt0, period)
cagr = Frame.get_cagr(df[dt:])
if real:
if not hasattr(self, "inflation"):
raise Exception(
"Real CAGR is not defined. Set inflation=True in AssetList to calculate it."
)
mean_inflation = Frame.get_cagr(self.inflation_ts[dt:])
cagr = (1. + cagr) / (1. + mean_inflation) - 1.
cagr.drop(self.inflation, inplace=True)
return cagr
def _validate_period(self, period: Any) -> None:
"""
Check if conditions are met:
* period should be an integer
* period should be positive
* period should not exceed history period length
Parameters
----------
period : Any
Returns
-------
None
No exceptions raised if validation passes.
"""
validate_integer("period", period, min_value=0, inclusive=False)
if period > self.pl.years:
raise ValueError(
f"'period' ({period}) is beyond historical data range ({self.period_length})."
)
def _make_real_return_time_series(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Calculate real monthly return time series.
Rate of return monthly data is adjusted for inflation.
"""
if not hasattr(self, "inflation"):
raise Exception(
"Real return is not defined. Set inflation=True in AssetList to calculate it."
)
df = (1. + df).divide(1. + self.inflation_ts, axis=0) - 1.
df.drop(columns=[self.inflation], inplace=True)
return df
def get_rolling_cagr(self, window: int = 12, real: bool = False) -> pd.DataFrame:
"""
Calculate rolling CAGR (Compound Annual Growth Rate) for each asset.
Parameters
----------
window : int, default 12
Size of the moving window in months. Window size should be at least 12 months for CAGR.
real: bool, default False
CAGR is adjusted for inflation (real CAGR) if True.
AssetList should be initiated with Inflation=True for real CAGR.
Returns
-------
DataFrame
Time series of rolling CAGR.
Examples
--------
Get inflation adjusted rolling return (real annualized return) win 5 years window:
>>> x = ok.AssetList(['DXET.XETR', 'DBXN.XETR'], ccy='EUR', inflation=True)
>>> x.get_rolling_cagr(window=5*12, real=True)
DXET.XETR DBXN.XETR
2013-09 0.012148 0.034538
2013-10 0.058834 0.034235
2013-11 0.072305 0.027890
2013-12 0.056456 0.022916
... ...
2020-12 0.038441 0.020781
2021-01 0.045849 0.012216
2021-02 0.062271 0.006188
2021-03 0.074446 0.006124
"""
df = self._add_inflation()
if real:
df = self._make_real_return_time_series(df)
return Frame.get_rolling_fn(df, window=window, fn=Frame.get_cagr)
def get_cumulative_return(self, period: Union[str, int, None] = None, real: bool = False) -> pd.Series:
"""
Calculate cumulative return over a given trailing period for each asset.
The cumulative return is the total change in the asset price during the investment period.
Inflation adjusted cumulative returns (real cumulative returns) are shown with `real=True` option.
Annual inflation data is calculated for the same period if `inflation=True` in the AssetList.
Parameters
----------
period: str, int or None, default None
Trailing period in years. Period should be more then 0.
None - full time cumulative return.
'YTD' - (Year To Date) period of time beginning the first day of the calendar year up to the last month.
real: bool, default False
Cumulative return is adjusted for inflation (real cumulative return) if True.
AssetList should be initiated with `Inflation=True` for real cumulative return.
Returns
-------
Series
Cumulative return values for each asset and cumulative inflation (optional).
Examples
--------
>>> x = ok.AssetList(['MCFTR.INDX'], ccy='RUB')
>>> x.get_cumulative_return(period='YTD')
MCFTR.INDX 0.1483
RUB.INFL 0.0485
dtype: float64
"""
df = self._add_inflation()
dt0 = self.last_date
if period is None:
dt = self.first_date
elif str(period).lower() == "ytd":
year = dt0.year
dt = str(year)
else:
self._validate_period(period)
dt = Date.subtract_years(dt0, period)
cr = Frame.get_cumulative_return(df[dt:])
if real:
if not hasattr(self, "inflation"):
raise Exception(
"Real cumulative return is not defined (no inflation information is available)."
"Set inflation=True in AssetList to calculate it."
)
cumulative_inflation = Frame.get_cumulative_return(self.inflation_ts[dt:])
cr = (1. + cr) / (1. + cumulative_inflation) - 1.
cr.drop(self.inflation, inplace=True)
return cr
def get_rolling_cumulative_return(self, window: int = 12, real: bool = False) -> pd.DataFrame:
"""
Calculate rolling cumulative return for each asset.
The cumulative return is the total change in the asset price.
Parameters
----------
window : int, default 12
Size of the moving window in months.
real: bool, default False
Cumulative return is adjusted for inflation (real cumulative return) if True.
AssetList should be initiated with `Inflation=True` for real cumulative return.
Returns
-------
DataFrame
Time series of rolling cumulative return.
"""
df = self._add_inflation()
if real:
df = self._make_real_return_time_series(df)
return Frame.get_rolling_fn(
df, window=window, fn=Frame.get_cumulative_return, window_below_year=True
)
@property
def annual_return_ts(self) -> pd.DataFrame:
"""
Calculate annual rate of return time series for each asset.
Rate of return is calculated for each calendar year.
Returns
-------
DataFrame
Calendar annual rate of return time series.
"""
return Frame.get_annual_return_ts_from_monthly(self.ror)
def describe(
self, years: Tuple[int, ...] = (1, 5, 10), tickers: bool = True
) -> pd.DataFrame:
"""
Generate descriptive statistics for a list of assets.
Statistics includes:
- YTD (Year To date) compound return
- CAGR for a given list of periods
- Dividend yield - yield for last 12 months (LTM)
Risk metrics (full period):
- risk (standard deviation)
- CVAR
- max drawdowns (and dates of the drawdowns)
Statistics also shows for each asset:
- inception date - first date available for each asset
- last asset date - available for each asset date
- Common last data date - common for the asset list data (may be set by last_date manually)
Parameters
----------
years : tuple of (int,), default (1, 5, 10)
List of periods for CAGR.
tickers : bool, default True
Defines whether show tickers (True) or assets names in the header.
Returns
-------
DataFrame
Table of descriptive statistics for a list of assets.
See Also
--------
get_cumulative_return : Calculate cumulative return.
get_cagr : Calculate assets Compound Annual Growth Rate (CAGR).
dividend_yield : Calculate dividend yield (LTM).
risk_annual : Return annualized risks (standard deviation).
get_cvar : Calculate historic Conditional Value at Risk (CVAR, expected shortfall).
drawdowns : Calculate drawdowns.
"""
description = pd.DataFrame()
dt0 = self.last_date
df = self._add_inflation()
# YTD return
ytd_return = self.get_cumulative_return(period="YTD")
row = ytd_return.to_dict()
row.update(period="YTD", property="Compound return")
description = description.append(row, ignore_index=True)
# CAGR for a list of periods
if self.pl.years >= 1:
for i in years:
dt = Date.subtract_years(dt0, i)
if dt >= self.first_date:
row = self.get_cagr(period=i).to_dict()
else:
row = {x: None for x in df.columns}
row.update(period=f"{i} years", property="CAGR")
description = description.append(row, ignore_index=True)
# CAGR for full period
row = self.get_cagr(period=None).to_dict()
row.update(period=self._pl_txt, property="CAGR")
description = description.append(row, ignore_index=True)
# Dividend Yield
row = self.dividend_yield.iloc[-1].to_dict()
row.update(period="LTM", property="Dividend yield")
description = description.append(row, ignore_index=True)
# risk for full period
row = self.risk_annual.to_dict()
row.update(period=self._pl_txt, property="Risk")
description = description.append(row, ignore_index=True)
# CVAR
if self.pl.years >= 1:
row = self.get_cvar_historic().to_dict()
row.update(period=self._pl_txt, property="CVAR")
description = description.append(row, ignore_index=True)
# max drawdowns
row = self.drawdowns.min().to_dict()
row.update(period=self._pl_txt, property="Max drawdowns")
description = description.append(row, ignore_index=True)
# max drawdowns dates
row = self.drawdowns.idxmin().to_dict()
row.update(period=self._pl_txt, property="Max drawdowns dates")
description = description.append(row, ignore_index=True)
# inception dates
row = {}
for ti in self.symbols:
# short_ticker = ti.split(".", 1)[0]
value = self.assets_first_dates[ti].strftime("%Y-%m")
row.update({ti: value})
row.update(period=None, property="Inception date")
if hasattr(self, "inflation"):
row.update({self.inflation: self.inflation_first_date.strftime("%Y-%m")})
description = description.append(row, ignore_index=True)
# last asset date
row = {}
for ti in self.symbols:
# short_ticker = ti.split(".", 1)[0]
value = self.assets_last_dates[ti].strftime("%Y-%m")
row.update({ti: value})
row.update(period=None, property="Last asset date")
if hasattr(self, "inflation"):
row.update({self.inflation: self.inflation_last_date.strftime("%Y-%m")})
description = description.append(row, ignore_index=True)
# last data date
row = {x: self.last_date.strftime("%Y-%m") for x in df.columns}
row.update(period=None, property="Common last data date")
description = description.append(row, ignore_index=True)
# rename columns
if hasattr(self, "inflation"):
description.rename(columns={self.inflation: "inflation"}, inplace=True)
description = Frame.change_columns_order(
description, ["inflation"], position="last"
)
description = Frame.change_columns_order(
description, ["property", "period"], position="first"
)
if not tickers:
for ti in self.symbols:
# short_ticker = ti.split(".", 1)[0]
description.rename(columns={ti: self.names[ti]}, inplace=True)
return description
@property
def mean_return(self) -> pd.Series:
"""
Calculate annualized mean return (arithmetic mean) for the assets.
Mean return calculated for the full history period. Arithmetic mean for the inflation is also shown
if there is an `inflation=True` option in AssetList.
Returns
-------
Series
Mean return value for each asset.
Examples
--------
>>> x = ok.AssetList(['MCFTR.INDX', 'RGBITR.INDX'], ccy='RUB', inflation=True)
>>> x.mean_return
MCFTR.INDX 0.209090
RGBITR.INDX 0.100133
RUB.INFL 0.081363
dtype: float64
"""
df = self._add_inflation()
mean = df.mean()
return Float.annualize_return(mean)
@property
def real_mean_return(self) -> pd.Series:
"""
Calculate annualized real mean return (arithmetic mean) for the assets.
Real rate of return is adjusted for inflation. Real return is defined if
there is an `inflation=True` option in AssetList.
Returns
-------
Series
Mean real return value for each asset.
Examples
--------
>>> x = ok.AssetList(['MCFTR.INDX', 'RGBITR.INDX'], ccy='RUB', inflation=True)
>>> x.real_mean_return
MCFTR.INDX 0.118116
RGBITR.INDX 0.017357
dtype: float64
"""
if not hasattr(self, "inflation"):
raise Exception(
"Real Return is not defined. Set inflation=True to calculate."
)
df = pd.concat(
[self.ror, self.inflation_ts], axis=1, join="inner", copy="false"
)
infl_mean = Float.annualize_return(self.inflation_ts.values.mean())
ror_mean = Float.annualize_return(df.loc[:, self.symbols].mean())
return (1. + ror_mean) / (1. + infl_mean) - 1.
def _get_asset_dividends(self, tick: str, remove_forecast: bool = True) -> pd.Series:
"""
Get dividend time series for a single symbol.
"""
first_period = pd.Period(self.first_date, freq="M")
first_day = first_period.to_timestamp(how="Start")
last_period = pd.Period(self.last_date, freq="M")
last_day = last_period.to_timestamp(how="End")
s = Asset(tick).dividends[
first_day:last_day
] # limit divs by first_day and last_day
if remove_forecast:
s = s[: pd.Period.now(freq="D")]
# Create time series with zeros to pad the empty spaces in dividends time series
index = pd.date_range(start=first_day, end=last_day, freq="D")
period = index.to_period("D")
pad_s = pd.Series(data=0, index=period)
return s.add(pad_s, fill_value=0)
def _get_dividends(self, remove_forecast=True) -> pd.DataFrame:
"""
Get dividend time series for all assets.
If `remove_forecast=True` all forecasted (future) data is removed from the time series.
"""
if self._dividends_ts.empty:
dic = {}
for tick in self.symbols:
s = self._get_asset_dividends(tick, remove_forecast=remove_forecast)
dic.update({tick: s})
self._dividends_ts = pd.DataFrame(dic)
return self._dividends_ts
@property
def dividend_yield(self) -> pd.DataFrame:
"""
Calculate last twelve months (LTM) dividend yield time series (monthly) for each asset.
All yields are calculated in the original asset currency (not adjusting to AssetList base currency).
Forecasted (future) dividends are removed.
Zero value time series are created for assets without dividends.
Returns
-------
DataFrame
Time series of LTM dividend yield for each asset.
Examples
--------
>>> x = ok.AssetList(['T.US', 'XOM.US'], first_date='1984-01', last_date='1994-12')
>>> x.dividend_yield
T.US XOM.US
1984-01 0.000000 0.000000
1984-02 0.000000 0.002597
1984-03 0.002038 0.002589
1984-04 0.001961 0.002346
... ...
1994-09 0.018165 0.012522
1994-10 0.018651 0.011451
1994-11 0.018876 0.012050
1994-12 0.019344 0.011975
[132 rows x 2 columns]
"""
if self._dividend_yield.empty:
frame = {}
df = self._get_dividends(remove_forecast=True)
for tick in self.symbols:
# Get dividends time series
div = df[tick]
# Get close (not adjusted) values time series.
# If the last_date month is current month live price of assets is used.
if div.sum() != 0:
div_monthly = div.resample("M").sum()
price = QueryData.get_close(tick, period="M").loc[
self.first_date : self.last_date
]
else:
# skipping prices if no dividends
div_yield = div.asfreq(freq="M")
frame.update({tick: div_yield})
continue
if price.index[-1] == pd.Period(pd.Timestamp.today(), freq="M"):
price.loc[
f"{pd.Timestamp.today().year}-{pd.Timestamp.today().month}"
] = Asset(tick).price
# Get dividend yield time series
div_yield = pd.Series(dtype=float)
div_monthly.index = div_monthly.index.to_timestamp()
for date in price.index.to_timestamp(how="End"):
ltm_div = div_monthly[:date].last("12M").sum()
last_price = price.loc[:date].iloc[-1]
value = ltm_div / last_price
div_yield.at[date] = value
div_yield.index = div_yield.index.to_period("M")
# Currency adjusted yield
# if self.currencies[tick] != self.currency.name:
# div_yield = self._set_currency(returns=div_yield, asset_currency=self.currencies[tick])
frame.update({tick: div_yield})
self._dividend_yield = pd.DataFrame(frame)
return self._dividend_yield
@property
def dividends_annual(self) -> pd.DataFrame:
"""
Return calendar year dividends sum time series for each asset.
Returns
-------
DataFrame
Annual dividends time series for each asset.
"""
return self._get_dividends().resample("Y").sum()
@property
def dividend_growing_years(self) -> pd.DataFrame:
"""
Return the number of years when the annual dividend was growing for each asset.
Returns
-------
DataFrame
Dividend growth length periods time series for each asset.
Examples
--------
>>> x = ok.AssetList(['T.US', 'XOM.US'], first_date='1984-01', last_date='1994-12')
>>> x.dividend_growing_years
T.US XOM.US
1985 1 1
1986 2 2
1987 3 3
1988 0 4
1989 1 5
1990 2 6
1991 3 7
1992 4 8
1993 5 9
1994 6 10
"""
div_growth = self.dividends_annual.pct_change()[1:]
df = pd.DataFrame()
for name in div_growth:
s = div_growth[name]
s1 = s.where(s > 0).notnull().astype(int)
s1_1 = s.where(s > 0).isnull().astype(int).cumsum()
s2 = s1.groupby(s1_1).cumsum()
df = pd.concat([df, s2], axis=1, copy="false")
return df
@property
def dividend_paying_years(self) -> pd.DataFrame:
"""
Return the number of years of consecutive dividend payments for each asset.
Returns
-------
DataFrame
Dividend payment period length time series for each asset.
Examples
--------
>>> x = ok.AssetList(['T.US', 'XOM.US'], first_date='1984-01', last_date='1994-12')
>>> x.dividend_paying_years
T.US XOM.US
1984 1 1
1985 2 2
1986 3 3
1987 4 4
1988 5 5
1989 6 6
1990 7 7
1991 8 8
1992 9 9
1993 10 10
1994 11 11
"""
div_annual = self.dividends_annual
frame = pd.DataFrame()
df = frame
for name in div_annual:
s = div_annual[name]
s1 = s.where(s != 0).notnull().astype(int)
s1_1 = s.where(s != 0).isnull().astype(int).cumsum()
s2 = s1.groupby(s1_1).cumsum()
df = | pd.concat([df, s2], axis=1, copy="false") | pandas.concat |
from Bio import SeqIO, GenBank
from Bio.Graphics import GenomeDiagram
from Bio.SeqFeature import FeatureLocation
import seaborn as sns
import argparse, os, sys, math, random
from ete3 import Tree, TreeStyle, NodeStyle
import matplotlib.colors as colors
from reportlab.lib import colors as rcolors
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import combinations
def match_seqs(fastafile,outdir,prefix):
o = open(os.path.abspath(prefix)+".txt",'w')
os.system("phmmer --tblout {} {} {}".format(os.path.abspath(prefix)+".phmmer.hits",os.path.abspath(fastafile),os.path.join(outdir,"all_groups.faa")))
hit_scores = {}
for line in open(os.path.abspath(prefix)+".phmmer.hits",'r'):
if line.startswith("#"):
continue
else:
vals = line.rstrip().split()
if vals[2] in hit_scores:
hit_scores[vals[2]].append((vals[0],float(vals[5])))
else:
hit_scores[vals[2]] = [(vals[0],float(vals[5]))]
hit_gene_lengths = {}
for seq in SeqIO.parse(open(os.path.join(outdir,"all_groups.faa"),'r'),'fasta'):
hit_gene_lengths[seq.id] = len(seq.seq)
norm_scores = {}
for query in hit_scores:
norm_scores[query] = [(h[0],h[1]/float(hit_gene_lengths[h[0]])) for h in hit_scores[query]]
for s in norm_scores.keys():
print(s)
o.write(s+"\n")
for i in sorted(norm_scores[s], reverse=True, key=lambda k: k[1]):
if i[1] > 1.0:
score = "Good match!"
if i[1] > 0.5 and i[1] < 1.0:
score = "Possible match..."
if i[1] < 0.5:
score = "Probably not a very good match..."
o.write(" ".join([str(x) for x in ["\t", i[0],round(i[1],3), score]])+"\n")
print("\t", i[0],round(i[1],3), score)
return
def add_group_to_tree(group,treefile,outdir,to_compress=False):
if to_compress:
compress = to_compress.split(",")
else:
compress = []
for line in open(os.path.join(outdir,"homolog_matrix.txt"),'r'):
if line.startswith("\t"):
header = line.rstrip().split("\t")[1:]
if not line.startswith(group):
continue
else:
vals = [int(x) for x in line.rstrip().split("\t")[1:]]
groupdata = dict(zip(header,vals))
ts = TreeStyle()
tree = Tree(os.path.abspath(treefile))
pal = sns.cubehelix_palette(rot=-.4, n_colors=13)
for node in tree.iter_descendants("preorder"):
this_node = []
nstyle = NodeStyle()
nstyle["shape"] = "circle"
if node.is_leaf():
try:
if groupdata[node.name] > 0:
nstyle["fgcolor"] = colors.rgb2hex(pal[12])
else:
nstyle["fgcolor"] = colors.rgb2hex(pal[0])
except KeyError:
nstyle["fgcolor"] = colors.rgb2hex(pal[0])
else:
species = {}
for x in node.iter_descendants("preorder"):
if x.is_leaf():
this_node.append(x.name)
s = x.name.split("_")[1]
if s in species:
species[s] += 1
else:
species[s] = 1
for c in compress:
try:
if float(species[c])/float(len(this_node)) > 0.95:
nstyle["draw_descendants"] = False
node.name = "{} clade".format(c)
except KeyError:
pass
count = 0
for t in this_node:
try:
if groupdata[t] > 0:
count +=1
except KeyError:
pass
v = int(round(float(count)/float(len(this_node))*12))
nstyle["fgcolor"] = colors.rgb2hex(pal[v])
nstyle["size"] = 3*math.sqrt(len(this_node))
node.set_style(nstyle)
return tree
def subset_matrix(strains,outdir,groups=None):
infile = open(os.path.join(outdir,"homolog_matrix.txt"),'r')
header_line = infile.readline().rstrip().split("\t")
indices = [header_line.index(s) for s in strains]
lines = []
all_groups = []
for line in infile:
vals = line.rstrip().split("\t")
# convert input data into boolean presence/absence
if groups:
if vals[0] in groups:
lines.append([int(bool(int(vals[i]))) for i in indices])
all_groups.append(vals[0])
else:
lines.append([int(bool(int(vals[i]))) for i in indices])
all_groups.append(vals[0])
a = np.stack(lines)
return a,all_groups
def dump_matrix(cc,strains,outfile):
o = open(outfile,'w')
o.write("\t{}\n".format("\t".join(strains)))
for i in range(0,len(strains)):
o.write("{}\t{}\n".format(strains[i],"\t".join([str(x) for x in cc[i]])))
o.close()
return
def find_unique_genes(a,strains,groups):
unique = {}
missing = {}
common = []
for s in strains:
unique[s] = []
missing[s] = []
for i in range(0,a.shape[0]):
nz = np.nonzero(a[i])
if len(nz[0]) == 1:
unique[strains[nz[0][0]]].append(groups[i])
elif len(nz[0]) == len(strains)-1:
missing[strains[np.nonzero(a[i] < 1)[0][0]]].append(groups[i])
elif len(nz[0]) == len(strains):
common.append(groups[i])
else:
pass
for s in strains:
print(s)
print("\t", len(unique[s]), "unique")
print("\t", len(missing[s]), "missing")
print(len(common), "common to all strains.")
return {"unique":unique,"missing":missing,"common":common}
def find_unique_loci(strain,outdir,uniq_info):
locusfile = open(os.path.join(outdir,"locustag_matrix.txt"),'r')
header_line = locusfile.readline().rstrip().split("\t")
i = header_line.index(strain)
unique_loci = []
for line in locusfile:
vals = line.rstrip().split("\t")
if vals[0] in uniq_info["unique"][strain]:
for x in vals[i].split(";"):
unique_loci.append(x)
return unique_loci
def plot_unique_genome_diagram(gbk, unique_loci):
parser = GenBank.FeatureParser()
fhandle = open(gbk, 'r')
genbank_entry = parser.parse(fhandle)
fhandle.close()
gdd = GenomeDiagram.Diagram(gbk)
gd_track_for_features = gdd.new_track(1, name="CDS",scale_smalltick_interval=100000)
gdfs = gd_track_for_features.new_set()
for feature in genbank_entry.features:
if feature.type == 'CDS':
feature.strand = 1
if feature.qualifiers['locus_tag'][0] in unique_loci:
gdfs.add_feature(feature, color=rcolors.HexColor("#93341F"))
else:
gdfs.add_feature(feature, color=rcolors.HexColor("#058F45"))
gdd.draw(format='circular', orientation='landscape',tracklines=0, pagesize='A5', fragments=5, circular=1)
return gdd
def synteny_check(gbk,outdir,map_to,strains,outfile,use_protein_id=False):
seqs = {}
for seq in SeqIO.parse(open(gbk,'r'),'genbank'):
seqs[seq.id] = []
for feat in seq.features:
if feat.type == "CDS":
if use_protein_id:
try:
seqs[seq.id].append(feat.qualifiers["protein_id"][0].split(".")[0])
except KeyError:
pass
else:
try:
seqs[seq.id].append(feat.qualifiers["locus_tag"][0].split(".")[0])
except KeyError:
pass
groupdict = {}
locustags = []
for seq in seqs:
for s in seqs[seq]:
locustags.append(s)
header = open(os.path.join(outdir,"locustag_matrix.txt"),'r').readline().rstrip().split("\t")
i = header.index(map_to)
indices = [header.index(x) for x in strains]
for line in open(os.path.join(outdir,"locustag_matrix.txt"),'r'):
vals = [v.split(".")[0] for v in line.rstrip().split("\t")[i].split(";")]
for s in locustags:
if s in vals:
groupdict[s] = line.rstrip().split("\t")[0]
groups = [g for g in groupdict.values()]
datadict = {}
for line in open(os.path.join(outdir,"locustag_matrix.txt"),'r'):
vals = line.rstrip().split("\t")
if vals[0] in groups:
datadict[vals[0]] = []
for i in indices:
if len(vals[i].split(";")) > 1:
datadict[vals[0]].append("Multiple")
else:
datadict[vals[0]].append(vals[i])
group_annotations = {}
for line in open(os.path.join(outdir,"group_descriptions.txt"),'r'):
vals = line.rstrip().split("\t")
counts = {}
for x in set(vals[1:]):
counts[x] = vals.count(x)
group_annotations[vals[0]] = sorted(counts.items(), reverse=True, key = lambda x: x[1])[0][0]
o = open(outfile,'w')
for seq in seqs:
o.write(">{}\n".format(seq))
o.write("{}\tgroup\t{}\tannotation\n".format(map_to,"\t".join(strains)))
for s in seqs[seq]:
try:
o.write("{}\t{}\t{}\t{}\n".format(s,groupdict[s],"\t".join(datadict[groupdict[s]]),group_annotations[groupdict[s]]))
except KeyError:
pass
return
def _parse_genbank(g,genomedb):
if g[2] == "ensembl" or g[2] == "NCBI":
FIELD = "protein_id"
elif g[2] == "prokka_in_house" or g[2] == "img":
FIELD = "locus_tag"
for seq in SeqIO.parse(open(os.path.join(os.path.abspath(genomedb),"gbk",g[0]+".gbk"),'r'),"genbank"):
for feat in seq.features:
if feat.type == "CDS":
try:
if feat.qualifiers[FIELD][0] == g[1]:
return seq, (int(feat.location.start), int(feat.location.end))
except KeyError:
pass
print(g)
print("locus not found. try again.")
return
def _make_tracks(seq, span, coords, g, GD, count, locus_tags, labels):
if g[2] == "ensembl" or g[2] == "NCBI":
FIELD = "protein_id"
elif g[2] == "prokka_in_house" or g[2] == "img":
FIELD = "locus_tag"
track = GD.new_track(count, height=1, name="CDS",\
scale_ticks=False,scale_largeticks=False,scale_largetick_labels=False, scale_largetick_interval=10000,\
scale_smallticks=False, scale_smalltick_labels=False, scale_smalltick_interval=1000,\
greytrack=False, hide=False, scale=False
)
feature_set = track.new_set()
count = 0
for feat in seq.features:
if feat.type == "CDS":
if int(feat.location.start) > (coords[0]-(span/2)) and int(feat.location.end) < (coords[1]+(span/2)):
newloc = FeatureLocation(int(feat.location.start-(coords[0]-(span/2))),int(feat.location.end-(coords[0]-(span/2))),strand=feat.strand)
feat.location = newloc
if g[2] == "prokka_in_house" or g[2] == "NCBI":
feature_set.add_feature(feat, sigil="BIGARROW", arrowshaft_height=1, arrowhead_length=.4,color="#D3D3D3", \
label=labels,name=feat.qualifiers['product'][0],label_strand=1,label_size = 8,label_position="middle", label_angle=20, \
border=rcolors.black)
try:
feature_set.add_feature(feat, sigil="BIGARROW", arrowshaft_height=1, arrowhead_length=.4,color="#D3D3D3", \
label=labels,name=feat.qualifiers[FIELD][0],label_strand=-1,label_size = 8,label_position="middle", label_angle=90, \
border=rcolors.black)
locus_tags[g[0].split(".")[0]].append(feat.qualifiers[FIELD][0].split(".")[0])
except KeyError:
pass
return
def plot_genomic_regions(locustagfile,genomedb,pypdir,span=50000,hl_groups=[],labels=False):
strains = []
for line in open(os.path.abspath(locustagfile),'r'):
vals = line.rstrip().split("\t")
strains.append([vals[0],vals[1]])
for line in open(os.path.join(os.path.abspath(genomedb),"genome_metadata.txt"),'r'):
vals = line.rstrip().split("\t")
for i in range(0,len(strains)):
if strains[i][0] == vals[2]:
strains[i].append(vals[6].split("-")[0])
GD = GenomeDiagram.Diagram('gbk',"temp.pdf")
count = 1
locus_tags = {}
for g in reversed(strains):
if g[0] not in locus_tags:
locus_tags[g[0]] = []
contigseq, coords = _parse_genbank(g,genomedb)
_make_tracks(contigseq, span, coords, g, GD, count, locus_tags, labels)
count += 1
groups = _find_homologs(GD, locus_tags,os.path.join(pypdir,"locustag_matrix.txt"),hl_groups,set([x[0] for x in strains]))
_change_colors(GD, groups)
return GD
def _find_homologs(GD, locus_tags, locus_mat,hl_groups,strains):
groups = {}
count = 0
indices = [open(locus_mat,'r').readline().rstrip().split("\t").index(s) for s in strains]
for line in open(locus_mat,'r'):
vals = line.rstrip().split("\t")
tags = set([v.split(".")[0] for v in [x.split(";") for x in [vals[i] for i in indices]] for v in v])
found = []
for strain in locus_tags:
for t in locus_tags[strain]:
if t in tags:
found.append((strain,t))
if vals[0] in hl_groups:
for f in found:
groups[f[1]] = "HIGHLIGHT"
elif len(found) > 1:
for f in found:
groups[f[1]] = count
count += 1
else:
pass
return groups
def _change_colors(GD, groups):
cl = [rcolors.HexColor(c) for c in sns.cubehelix_palette(len(set(groups.values())),dark=0.1,light=0.9,rot=2.5).as_hex()]
random.shuffle(cl)
for t in GD.get_tracks():
for s in t.get_sets():
for feat in s.get_features():
if feat.name.split(".")[0] in groups:
if groups[feat.name.split(".")[0]] == "HIGHLIGHT":
feat.color = rcolors.HexColor(u"#4df92a")
else:
feat.color = cl[groups[feat.name.split(".")[0]]]
return
def get_group_dna_seqs(group, genomedb,pypdir,strains=False):
# default behavior is to get dna seqs for all strains in the genomedb/gbk folder
o = open("{}.fna".format(group),'w')
locus_mat = os.path.join(pypdir,"locustag_matrix.txt")
if strains:
header = open(locus_mat,'r').readline().rstrip().split("\t")
strain_names = [line.rstrip() for line in open(os.path.abspath(strains),'r')]
indices = [header.index(k) for k in strain_names]
for line in open(locus_mat,'r'):
if line.startswith(group):
tagline = line.rstrip().split("\t")
tags = [tagline[i] for i in indices]
continue
else:
strain_names = open(locus_mat,'r').readline().rstrip().split("\t")
for line in open(locus_mat,'r'):
if line.startswith(group):
tags = line.rstrip().split("\t")
continue
genbank_files = [f.split(".")[0] for f in os.listdir(os.path.join(os.path.abspath(genomedb),"gbk"))]
o = open("{}.fna".format(group),'w')
for strain in strain_names:
group_tags = tags[strain_names.index(strain)].split(";")
if strain not in genbank_files:
print("File for", strain, "not found...")
else:
for seq in SeqIO.parse(open(os.path.join(os.path.abspath(genomedb),"gbk",strain+".gbk"),'r'),"genbank"):
for feat in seq.features:
if feat.type == "CDS":
try:
if feat.qualifiers["locus_tag"][0] in group_tags:
if feat.strand == 1:
DNAseq = seq.seq[int(feat.location.start):int(feat.location.end)]
elif feat.strand == -1:
DNAseq = seq.seq[int(feat.location.start):int(feat.location.end)].reverse_complement()
o.write(">{}\n{}\n".format(strain,DNAseq))
except KeyError:
pass
try:
if feat.qualifiers["protein_id"][0] in [x.split(".")[0] for x in group_tags] or feat.qualifiers["protein_id"][0] in group_tags:
if feat.strand == 1:
DNAseq = seq.seq[int(feat.location.start):int(feat.location.end)]
elif feat.strand == -1:
DNAseq = seq.seq[int(feat.location.start):int(feat.location.end)].reverse_complement()
o.write(">{}\n{}\n".format(strain,DNAseq))
except KeyError:
pass
o.close()
return
def plot_multigene_presence(groupfile,pypdir,tree_loc,outfile=None,add_labels=True):
strains = Tree(os.path.abspath(tree_loc)).get_leaf_names()
## parses leaf names from tree file into a correctly ordered array
groups = [line.rstrip().split("\t")[0] for line in open(os.path.abspath(groupfile),'r')]
labels = [line.rstrip().split("\t")[1] for line in open(os.path.abspath(groupfile),'r')]
dat = {}
header = open(os.path.join(pypdir,"homolog_matrix.txt"),'r').readline().rstrip().split("\t")
indices = [header.index(s) for s in strains]
for line in open(os.path.join(pypdir,"homolog_matrix.txt"),'r'):
vals = line.rstrip().split("\t")
if vals[0] in groups:
dat[vals[0]] = pd.Series(dict(zip(strains,[int(vals[i]) for i in indices])))
df = | pd.DataFrame(dat) | pandas.DataFrame |
import pandas as __pd
import datetime as __dt
from dateutil import relativedelta as __rd
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "production/"
def santraller(tarih=__dt.datetime.now().strftime("%Y-%m-%d")):
"""
İlgili tarihte EPİAŞ sistemine kayıtlı santrallerin bilgilerini vermektedir.
Parametre
----------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Santral Bilgileri(Id, Adı, EIC Kodu, Kısa Adı)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "power-plant?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["powerPlantList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu",
"shortName": "Kısa Adı"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def santral_veris_cekis_birimleri(tarih=__dt.datetime.today().strftime("%Y-%m-%d"), santral_id=""):
"""
İlgili tarih ve santral ID için santralin altında tanımlanmış uzlaştırmaya
esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
santral_id : metin yada tam sayı formatında santral id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
İlgili UEVÇB Bilgileri(Id, Adı, EIC Kodu)
"""
if __dogrulama.__tarih_id_dogrulama(tarih, santral_id):
try:
particular_url = __first_part_url + "uevcb?period=" + tarih + "&powerPlantId=" + str(santral_id)
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["uevcbList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_santraller_veris_cekis_birimleri(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih için EPİAŞ sistemine kayıtlı tüm santrallerin altında tanımlanmış uzlaştırmaya
esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
EPİAŞ Sistemine Kayıtlı Santraller ve UEVÇB Bilgileri("Santral Id, "Santral Adı", "Santral EIC Kodu",
"Santral Kısa Adı", "UEVÇB Id", "UEVÇB Adı", "UEVÇB EIC Kodu")
"""
if __dogrulama.__tarih_dogrulama(tarih):
list_santral = santraller()[["Id", "Adı", "EIC Kodu", "Kısa Adı"]].to_dict("records")
santral_len = len(list_santral)
list_date_santral_id = list(zip([tarih] * santral_len, list_santral))
list_date_santral_id = list(map(list, list_date_santral_id))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__santral_veris_cekis_birimleri, list_date_santral_id, chunksize=1)
return __pd.concat(list_df_unit).reset_index(drop=True)
def gercek_zamanli_uretim_yapan_santraller():
"""
İsteğin yapıldığı tarihte gerçek zamanlı üretim yapan UEVÇB bazında santral bilgilerini vermektedir.
Parametre
----------
Geri Dönüş Değeri
-----------------
Santral Bilgileri(Id, Adı, EIC Kodu, Kısa Adı)
"""
try:
particular_url = __first_part_url + "real-time-generation-power-plant-list"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["powerPlantList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu",
"shortName": "Kısa Adı"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def kurulu_guc(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden aylar için EPİAŞ sistemine kayıtlı santrallerin toplam kurulu güç bilgisini
vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Kurulu Güç Bilgisi (Tarih, Kurulu Güç)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son:
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__kurulu_guc, date_list)
return | __pd.concat(df_list, sort=False) | pandas.concat |
import logging
import itertools
import sys
import pandas as pd
import numpy as np
from capture.generate import calcs
from capture.models import chemical
from capture.generate.wolframsampler import WolframSampler
from capture.generate.qrandom import get_unique_chemical_names, build_reagent_vectors
import capture.devconfig as config
modlog = logging.getLogger('capture.generate.statespace')
def default_statedataframe(rxndict, expoverview, vollimits, rdict, experiment):
"""Generate a state set from the volume constraints of the experimental system ensuring that the limits are met.
Return the full df of volumes as well as the idealized conc df
:param rxndict:
:param expoverview:
:param vollimits:
:param rdict:
:param experiment:
:param volspacing:
:return:
"""
portionnum = 0
# TODO these two vars actually dont get used, just overwritten
prdf = pd.DataFrame()
prmmoldf = pd.DataFrame()
fullreagentnamelist = []
fullvollist = []
for portion in expoverview:
reagentnamelist = []
reagentvols = []
for reagent in portion:
# generate the list of possible volumes for each reagent
# and the associated mmol calculated values (for parsing later)
# Take the maximum volume limit and generate a list of all possible volumes from 0 to the max
reagentnamelist.append('Reagent%s (ul)' % reagent)
reagentvols.append(list(range(0, vollimits[portionnum][1]+1, config.volspacing)))
fullreagentnamelist.append('Reagent%s (ul)' % reagent)
# generate permutation of all of the volumes
testdf = pd.DataFrame(list(itertools.product(*reagentvols)))
testdf.astype(int)
# organize dataframe with the sums of the generated numbers
sumdf = testdf.sum(axis=1)
sumname = 'portion%s_volsum' % portionnum
reagentnamelist.append(sumname)
rdf = pd.concat([testdf, sumdf], axis=1, ignore_index=True)
rdf.columns = reagentnamelist
# Select only those which meet the volume critera specified by the portion of the experiment
finalrdf = rdf.loc[(rdf[sumname] >= int(vollimits[portionnum][0])) & (rdf[sumname] <= int(vollimits[portionnum][1]))]
finalrdf = finalrdf.drop(labels=sumname, axis=1)
fullvollist.append(finalrdf.values.tolist())
portionnum += 1
# permute all combinations of the portions that meet the requirements set by the user
fullpermlist = list(itertools.product(*fullvollist))
# combine the list of list for each rxn into a single list for import into pandas
finalfulllist = [list(itertools.chain.from_iterable(multivol)) for multivol in fullpermlist]
prdf = pd.DataFrame(finalfulllist)
prdf = prdf.drop_duplicates()
prdf.columns = fullreagentnamelist
prdf.astype(float)
finalmmoldf = pd.DataFrame()
for reagentname in fullreagentnamelist:
if "Reagent" in reagentname:
reagentnum = reagentname.split('t')[1].split(' ')[0]
mmoldf = calcs.mmolextension(prdf[reagentname], rdict, experiment, reagentnum)
finalmmoldf = | pd.concat([finalmmoldf, mmoldf], axis=1) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import re
import os
def get_plot_data(path, span=100):
df = pd.DataFrame()
with open(path + 'test.txt') as file:
data = | pd.read_csv(file, index_col=None) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import pandas as pd
import matplotlib.dates as mdates
from matplotlib import pyplot
import matplotlib.ticker as ticker
import datetime
import calendar
import os
import time
import numpy as np
def get_ndvi_profiles_from_csv(csv_file):
ndvi_profile = pd.read_csv(csv_file)
return ndvi_profile
def get_current_list_of_months(first_year_month, number_of_year_months):
textstrs_tuples = [
("201701", "2017\nJAN"),
("201702", "2017\nFEB"),
("201703", "2017\nMAR"),
("201704", "2017\nAPR"),
("201705", "2017\nMAY"),
("201706", "2017\nJUN"),
("201707", "2017\nJUL"),
("201708", "2017\nAUG"),
("201709", "2017\nSEP"),
("201710", "2017\nOCT"),
("201711", "2017\nNOV"),
("201712", "2017\nDEC"),
("201801", "2018\nJAN"),
("201802", "2018\nFEB"),
("201803", "2018\nMAR"),
("201804", "2018\nAPR"),
("201805", "2018\nMAY"),
("201806", "2018\nJUN"),
("201807", "2018\nJUL"),
("201808", "2018\nAUG"),
("201809", "2018\nSEP"),
("201810", "2018\nOCT"),
("201811", "2018\nNOV"),
("201812", "2018\nDEC"),
("201901", "2019\nJAN"),
("201902", "2019\nFEB"),
("201903", "2019\nMAR"),
("201904", "2019\nAPR"),
("201905", "2019\nMAY"),
("201906", "2019\nJUN"),
("201907", "2019\nJUL"),
("201908", "2019\nAUG"),
("201909", "2019\nSEP"),
("201910", "2019\nOCT"),
("201911", "2019\nNOV"),
("201912", "2019\nDEC"),
("202001", "2020\nJAN"),
("202002", "2020\nFEB"),
("202003", "2020\nMAR"),
("202004", "2020\nAPR"),
("202005", "2020\nMAY"),
("202006", "2020\nJUN"),
("202007", "2020\nJUL"),
("202008", "2020\nAUG"),
("202009", "2020\nSEP"),
("202010", "2020\nOCT"),
("202011", "2020\nNOV"),
("202012", "2020\nDEC"),
("202101", "2021\nJAN"),
("202102", "2021\nFEB"),
("202103", "2021\nMAR"),
("202104", "2021\nAPR"),
("202105", "2021\nMAY"),
("202106", "2021\nJUN"),
("202107", "2021\nJUL"),
("202108", "2021\nAUG"),
("202109", "2021\nSEP"),
("202110", "2021\nOCT"),
("202111", "2021\nNOV"),
("202112", "2021\nDEC"),
]
# find the index of the first occurrence of first_year_month in textstrs_tuples
# and return the rest secend elements of the tuples of the list
i = 0
first_year_month_index = i
for textstrs_tuple in textstrs_tuples:
if first_year_month == textstrs_tuple[0]:
first_year_month_index = i
i+=1
current_textstrs = []
for i in range(first_year_month_index, first_year_month_index + number_of_year_months):
current_textstrs.append(textstrs_tuples[i][1])
return current_textstrs
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def display_ndvi_profiles(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars = False):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
ndvi_profile = ndvi_profile[ndvi_profile['S2 NDVI']!='None']
ndvi_profile['S2 NDVI'] = ndvi_profile['S2 NDVI'].apply(pd.to_numeric)
ndvi_profile['ndvi_std'] = ndvi_profile['ndvi_std'].apply(pd.to_numeric)
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
# if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
# not ndvi_profile['ndvi_std'].dtypes == "float64":
# return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = 'blue', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndvi_profile['date']).date().month
min_year = min(ndvi_profile['date']).date().year
max_month = max(ndvi_profile['date']).date().month
max_year = max(ndvi_profile['date']).date().year
number_of_months = diff_month(max(ndvi_profile['date']).date(), min(ndvi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
# start_x = 0.045
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
ax0.yaxis.set_major_locator(ticker.MultipleLocator(y_tick_spacing))
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\traph_utils.display_ndvi_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_ndwi_profiles(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars = False):
"""
this function plots the NDWI profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndwi_folder = out_tif_folder_base + "/ndwi"
ndwi_csv_file = ndwi_folder + "/" + chip_folder + "_ndwi.csv"
output_graph_folder = out_tif_folder_base + "/ndwi_graphs"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndwi_profile = pd.read_csv(ndwi_csv_file)
ndwi_profile['acq_date'] = pd.to_datetime(ndwi_profile.acq_date)
ndwi_profile = ndwi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndwi_mean' to more meaningful name
ndwi_profile = ndwi_profile.rename(columns={'ndwi_mean': 'S2 NDWI'})
ndwi_profile = ndwi_profile.rename(columns={'acq_date': 'date'})
# check if there are real ndwi values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not ndwi_profile['S2 NDWI'].dtypes == "float64" or \
not ndwi_profile['ndwi_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not ndwi_profile.empty:
if add_error_bars:
ndwi_profile.plot(kind='line', marker='+', x='date',y='S2 NDWI', yerr='ndwi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndwi_profile.plot(kind='line', marker='+', x='date',y='S2 NDWI', color = 'blue', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDWI')
parcelNumber = ndwi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([-1,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndwi_profile['date']).date().month
min_year = min(ndwi_profile['date']).date().year
max_month = max(ndwi_profile['date']).date().month
max_year = max(ndwi_profile['date']).date().year
number_of_months = diff_month(max(ndwi_profile['date']).date(), min(ndwi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
# start_x = 0.045
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
ax0.yaxis.set_major_locator(ticker.MultipleLocator(y_tick_spacing))
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_ndwi.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\traph_utils.display_ndwi_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndwi_profile
def display_ndvi_profiles_with_mean_profile_of_the_crop(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars = False):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
mean_profile_folder = "c:/Users/Csaba/ownCloud/GTCAP/cbm_qa/be_fl/notebooks/output_csv_selected_v02"
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
mean_ndvi_csv_file = mean_profile_folder + "/" + crop #+ ".csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs_with_mean"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
mean_ndvi_csv_file_exists = False
if os.path.isfile(mean_ndvi_csv_file):
mean_ndvi_csv_file_exists = True
mean_ndvi_profile = pd.read_csv(mean_ndvi_csv_file)
mean_ndvi_profile['acq_date'] = pd.to_datetime(mean_ndvi_profile.acq_date)
mean_ndvi_profile = mean_ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI mean'})
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'acq_date': 'date'})
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
not ndvi_profile['ndvi_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = 'blue', ax=ax0)
if mean_ndvi_csv_file_exists:
mean_ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI mean', color = 'red', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndvi_profile['date']).date().month
min_year = min(ndvi_profile['date']).date().year
max_month = max(ndvi_profile['date']).date().month
max_year = max(ndvi_profile['date']).date().year
number_of_months = diff_month(max(ndvi_profile['date']).date(), min(ndvi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\graph_utils.display_ndvi_profiles_with_mean_profile_of_the_crop:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_ndvi_profiles_with_mean_profile_of_the_crop_with_std(parcel_id, crop, plot_title, out_tif_folder_base,
logfile, mean_profile_folder,
add_error_bars = False,
mean_color = 'green', current_color = 'magenta'):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
mean_ndvi_csv_file = mean_profile_folder + "/" + crop + ".csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs_with_mean"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
mean_ndvi_csv_file_exists = False
if os.path.isfile(mean_ndvi_csv_file):
mean_ndvi_csv_file_exists = True
mean_ndvi_profile = pd.read_csv(mean_ndvi_csv_file)
mean_ndvi_profile['acq_date'] = pd.to_datetime(mean_ndvi_profile.acq_date)
mean_ndvi_profile = mean_ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI mean'})
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'acq_date': 'date'})
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
not ndvi_profile['ndvi_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = current_color, ax=ax0,
capsize=4, ecolor='magenta', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = current_color, ax=ax0)
if mean_ndvi_csv_file_exists:
mean_ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI mean', color = mean_color, ax=ax0)
pyplot.fill_between(mean_ndvi_profile['date'],
mean_ndvi_profile['S2 NDVI mean']-mean_ndvi_profile['ndvi_stdev'],
mean_ndvi_profile['S2 NDVI mean']+mean_ndvi_profile['ndvi_stdev'],
alpha=0.2, color = mean_color)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndvi_profile['date']).date().month
min_year = min(ndvi_profile['date']).date().year
max_month = max(ndvi_profile['date']).date().month
max_year = max(ndvi_profile['date']).date().year
number_of_months = diff_month(max(ndvi_profile['date']).date(), min(ndvi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\graph_utils.display_ndvi_profiles_with_mean_profile_of_the_crop:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_s1_bs_profiles(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars, polarisation, orbit_orientation):
"""
this function plots the backscatter profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
s1_bs_folder = out_tif_folder_base + "/s1_bs"
s1_bs_csv_file = s1_bs_folder + "/" + chip_folder + "_s1bs_" + polarisation + "_" + orbit_orientation + ".csv"
output_graph_folder = out_tif_folder_base + "/s1_bs_graphs_" + polarisation + "_" + orbit_orientation
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
s1_bs_profile = pd.read_csv(s1_bs_csv_file)
s1_bs_profile['acq_date'] = | pd.to_datetime(s1_bs_profile.acq_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# ------------------------------------------------------------------------------
# Copyright IBM Corp. 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import sys
import urllib
import time
import xml.etree.ElementTree as ET
import sys
import types
import requests
from requests.exceptions import HTTPError
import pandas as pd
import numpy as np
from ibm_botocore.client import Config
import ibm_boto3
from datetime import datetime
import pyarrow
import os
import tempfile
class SQLQuery():
def __init__(self, api_key, instance_crn, target_cos_url=None, client_info=''):
self.endpoint_alias_mapping = {
"us-geo": "s3-api.us-geo.objectstorage.softlayer.net",
"us": "s3-api.us-geo.objectstorage.softlayer.net",
"dal-us-geo": "s3-api.dal-us-geo.objectstorage.softlayer.net",
"wdc-us-geo": "s3-api.wdc-us-geo.objectstorage.softlayer.net",
"sjc-us-geo": "s3-api.sjc-us-geo.objectstorage.softlayer.net",
"eu-geo": "s3.eu-geo.objectstorage.softlayer.net",
"eu": "s3.eu-geo.objectstorage.softlayer.net",
"ams-eu-geo": "s3.ams-eu-geo.objectstorage.softlayer.net",
"fra-eu-geo": "s3.fra-eu-geo.objectstorage.softlayer.net",
"mil-eu-geo": "s3.mil-eu-geo.objectstorage.softlayer.net",
"us-south": "s3.us-south.objectstorage.softlayer.net",
"us-east": "s3.us-east.objectstorage.softlayer.net",
"jp-tok": "s3.jp-tok.objectstorage.softlayer.net",
"ap-geo": "s3.ap-geo.objectstorage.softlayer.net",
"ap": "s3.ap-geo.objectstorage.softlayer.net",
"tok-ap-geo": "s3.tok-ap-geo.objectstorage.softlayer.net",
"seo-ap-geo": "s3.seo-ap-geo.objectstorage.softlayer.net",
"hkg-ap-geo": "s3.hkg-ap-geo.objectstorage.softlayer.net",
"eu-de": "s3.eu-de.objectstorage.softlayer.net",
"eu-gb": "s3.eu-gb.objectstorage.softlayer.net",
"ams03": "s3.ams03.objectstorage.softlayer.net",
"che01": "s3.che01.objectstorage.softlayer.net",
"mel01": "s3.mel01.objectstorage.softlayer.net",
"tor01": "s3.tor01.objectstorage.softlayer.net",
"mon01": "s3.mon01.objectstorage.softlayer.net",
"osl01": "s3.osl01.objectstorage.softlayer.net",
"sao01": "s3.sao01.objectstorage.softlayer.net",
"seo01": "s3.seo01.objectstorage.softlayer.net"
}
self.api_key = api_key
self.instance_crn = instance_crn
self.target_cos = target_cos_url
self.export_cos_url = target_cos_url
if client_info == '':
self.user_agent = 'IBM Cloud SQL Query Python SDK'
else:
self.user_agent = client_info
self.request_headers = {'Content-Type': 'application/json'}
self.request_headers.update({'Accept': 'application/json'})
self.request_headers.update({'User-Agent': self.user_agent})
self.request_headers_xml_content = {'Content-Type': 'application/x-www-form-urlencoded'}
self.request_headers_xml_content.update({'Accept': 'application/json'})
self.request_headers_xml_content.update({'User-Agent': self.user_agent})
self.logged_on = False
def logon(self):
if sys.version_info >= (3, 0):
data = urllib.parse.urlencode({'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': self.api_key})
else:
data = urllib.urlencode({'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': self.api_key})
response = requests.post(
'https://iam.bluemix.net/identity/token',
headers=self.request_headers_xml_content,
data=data)
if response.status_code == 200:
# print("Authentication successful")
bearer_response = response.json()
self.bearer_token = 'Bearer ' + bearer_response['access_token']
self.request_headers = {'Content-Type': 'application/json'}
self.request_headers.update({'Accept':'application/json'})
self.request_headers.update({'User-Agent': self.user_agent})
self.request_headers.update({'authorization': self.bearer_token})
self.logged_on = True
else:
print("Authentication failed with http code {}".format(response.status_code))
def submit_sql(self, sql_text, pagesize=None):
if not self.logged_on:
print("You are not logged on to IBM Cloud")
return
sqlData = {'statement': sql_text}
# If a valid pagesize is specified we need to append the proper PARTITIONED EVERY <num> ROWS clause
if pagesize or pagesize==0:
if type(pagesize) == int and pagesize>0:
if self.target_cos:
sqlData["statement"] += " INTO {}".format(self.target_cos)
elif " INTO " not in sql_text.upper():
raise SyntaxError("Neither resultset_target parameter nor \"INTO\" clause specified.")
elif " PARTITIONED " in sql_text.upper():
raise SyntaxError("Must not use PARTITIONED clause when specifying pagesize parameter.")
sqlData["statement"] += " PARTITIONED EVERY {} ROWS".format(pagesize)
else:
raise ValueError('pagesize parameter ({}) is not valid.'.format(pagesize))
elif self.target_cos:
sqlData.update({'resultset_target': self.target_cos})
try:
response = requests.post(
"https://api.sql-query.cloud.ibm.com/v2/sql_jobs?instance_crn={}".format(self.instance_crn),
headers=self.request_headers,
json=sqlData)
resp = response.json()
return resp['job_id']
except KeyError as e:
raise SyntaxError("SQL submission failed: {}".format(response.json()['errors'][0]['message']))
except HTTPError as e:
raise SyntaxError("SQL submission failed: {}".format(response.json()['errors'][0]['message']))
def wait_for_job(self, jobId):
if not self.logged_on:
print("You are not logged on to IBM Cloud")
return "Not logged on"
while True:
response = requests.get(
"https://api.sql-query.cloud.ibm.com/v2/sql_jobs/{}?instance_crn={}".format(jobId, self.instance_crn),
headers=self.request_headers,
)
if response.status_code == 200 or response.status_code == 201:
status_response = response.json()
jobStatus = status_response['status']
if jobStatus == 'completed':
# print("Job {} has completed successfully".format(jobId))
resultset_location = status_response['resultset_location']
break
if jobStatus == 'failed':
print("Job {} has failed".format(jobId))
break
else:
print("Job status check failed with http code {}".format(response.status_code))
break
time.sleep(2)
return jobStatus
def __iter__(self):
return 0
def get_result(self, jobId, pagenumber=None):
if not self.logged_on:
print("You are not logged on to IBM Cloud")
return
job_details = self.get_job(jobId)
job_status = job_details.get('status')
if job_status == 'running':
raise ValueError('SQL job with jobId {} still running. Come back later.')
elif job_status != 'completed':
raise ValueError('SQL job with jobId {} did not finish successfully. No result available.')
result_cos_url = job_details['resultset_location']
provided_cos_endpoint = result_cos_url.split("/")[2]
result_cos_endpoint = self.endpoint_alias_mapping.get(provided_cos_endpoint, provided_cos_endpoint)
result_cos_bucket = result_cos_url.split("/")[3]
result_cos_prefix = result_cos_url[result_cos_url.replace('/', 'X', 3).find('/')+1:]
result_location = "https://{}/{}?prefix={}".format(result_cos_endpoint, result_cos_bucket, result_cos_prefix)
result_format = job_details['resultset_format']
if result_format not in ["csv", "parquet", "json"]:
raise ValueError("Result object format {} currently not supported by get_result().".format(result_format))
response = requests.get(
result_location,
headers=self.request_headers,
)
if response.status_code == 200 or response.status_code == 201:
ns = {'s3': 'http://s3.amazonaws.com/doc/2006-03-01/'}
responseBodyXMLroot = ET.fromstring(response.text)
bucket_objects = []
# Find result objects with data
for contents in responseBodyXMLroot.findall('s3:Contents', ns):
key = contents.find('s3:Key', ns)
if int(contents.find('s3:Size', ns).text) > 0:
bucket_objects.append(key.text)
#print("Job result for {} stored at: {}".format(jobId, result_object))
else:
raise ValueError("Result object listing for job {} at {} failed with http code {}".format(jobId, result_location,
response.status_code))
cos_client = ibm_boto3.client(service_name='s3',
ibm_api_key_id=self.api_key,
ibm_auth_endpoint="https://iam.ng.bluemix.net/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url='https://' + result_cos_endpoint)
# When pagenumber is specified we only retrieve that page. Otherwise we concatenate all pages to one DF:
if pagenumber or pagenumber==0:
if " PARTITIONED EVERY " not in job_details['statement'].upper():
raise ValueError("pagenumber ({}) specified, but the job was not submitted with pagination option.".format(pagenumber))
if type(pagenumber) == int and 0 < pagenumber <= len(bucket_objects):
if result_format == "csv":
body = cos_client.get_object(Bucket=result_cos_bucket, Key=bucket_objects[pagenumber-1])['Body']
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType(self.__iter__, body)
result_df = pd.read_csv(body)
elif result_format == "parquet":
tmpfile = tempfile.NamedTemporaryFile()
tempfilename = tmpfile.name
tmpfile.close()
cos_client.download_file(Bucket=result_cos_bucket, Key=bucket_objects[pagenumber-1], Filename=tempfilename)
result_df = | pd.read_parquet(tempfilename) | pandas.read_parquet |
#!/usr/bin/env python
# Author: <NAME> (jsh) [<EMAIL>]
import argparse
import logging
import pathlib
import sys
import pandas as pd
import gamma_lib as gl
import model_lib as ml
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
_PACKAGEDIR = pathlib.Path(__file__).parent
TESTDIR = _PACKAGEDIR / 'testdata'
def parse_args():
"""Read in the arguments for the sgrna library construction code."""
logging.info('Parsing command line.')
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--genbank', type=str,
help='file: base genome for organism in genbank format',
default=str(TESTDIR / 'bsu.NC_000964.merged.gb'))
parser.add_argument(
'--targetfile', type=str,
help='file: ???.targets.all.tsv produced by traeki/sgrna_design',
default=str(TESTDIR / 'test.gb.targets.all.tsv'))
parser.add_argument(
'--controls', type=str,
help='file: list of just control guides',
default=str(TESTDIR / 'test.controls'))
parser.add_argument(
'--locifile', type=str,
help='file: list of applicable locus_tags',
default=str(TESTDIR / 'test.loci'))
parser.add_argument(
'--configdir', type=str, help='file: name of directory containing config.tsv',
default=str(TESTDIR))
parser.add_argument(
'--gammafile', type=str,
help='file: file to which to write annotated gamma measurements',
default=None)
parser.add_argument(
'--growth', type=int,
help='int: number of generations grown (in other words, g*t)',
default=10)
args = parser.parse_args()
# TODO(jsh): Add check that either all or none of these are specified
if args.gammafile is None:
args.gammafile = pathlib.Path(args.configdir) / 'gammas.tsv'
return args
def flatgamma(stacked_replicates, controls):
data = stacked_replicates
data['y_pred'] = ml.predict_mismatch_scores(data)
data.y_pred = data.y_pred.where(~data.variant.isin(controls), 0.0)
anno = data.drop(['rep', 'gamma', 'start_mask'], axis='columns')
anno = anno.drop_duplicates()
gamma = data[['variant', 'rep', 'gamma']].set_index(['variant', 'rep'])
gamma = gamma.unstack(level='rep').mean(axis='columns')
data = | pd.DataFrame(anno) | pandas.DataFrame |
import pandas as pd
def to_pandas_Xy(dataset):
"""
Extracts `data` and `target` from a scikit-learn dataset and returns them as a pandas DataFrame
and Series.
"""
X = pd.DataFrame(dataset.data, columns=dataset.feature_names)
y = | pd.Series(dataset.target, name="target") | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# Loading the json with the grade data:
# In[1]:
import json
with open('grades.json', 'rb') as f:
data = json.load(f)
# Extracting the relevant information out of the json for one course:
# In[2]:
build_dict = lambda course: {
'id': course['content']['achievementDto']['cpCourseLibDto']['id'],
'course_name_de': course['content']['achievementDto']['cpCourseLibDto']['courseTitle']['value'],
'course_name_en': course['content']['achievementDto']['cpCourseLibDto']['courseTitle']['translations']['translation'][1]['value'],
'course_number': course['content']['achievementDto']['cpCourseLibDto']['courseNumber']['courseNumber'],
'ects': course['content']['achievementDto']['cpCourseLibDto']['ectsCredits'],
'semester_code': course['content']['achievementDto']['semesterLibDto']['key'],
'semester_name_de': course['content']['achievementDto']['semesterLibDto']['semesterDesignation']
['value'],
'semester_name_en': course['content']['achievementDto']['semesterLibDto']['semesterDesignation']
['translations']['translation'][1]['value'],
'semester_start_date': course['content']['achievementDto']['semesterLibDto']['startOfAcademicSemester']['value'],
'semester_end_date': course['content']['achievementDto']['semesterLibDto']['endOfAcademicSemester']['value'],
'grade_date': course['content']['achievementDto']['achievementDate']['value'],
'grade_name_de': course['content']['achievementDto']['gradeDto']['name']['value'],
'grade_name_en': course['content']['achievementDto']['gradeDto']['name']['translations']['translation'][1]['value'],
'grade': course['content']['achievementDto']['gradeDto']['value'],
}
# Creating a list of dicts, each dict containing the info for one course.
# In[3]:
dicts = [build_dict(course) for course in data['resource']]
# For each course, parse the grades out of its html file, and add to its dict:
# In[4]:
from bs4 import BeautifulSoup
possible_grades = ['1.0', '1.3', '1.4', '1.7', '2.0', '2.3', '2.4', '2.7', '3.0', '3.3', '3.4', '3.7', '4.0', '4.3', '4.7', '5.0']
standard_possible_grades = ['1.0', '1.3', '1.7', '2.0', '2.3', '2.7', '3.0', '3.3', '3.7', '4.0', '4.3', '4.7', '5.0']
all_possible_grades = possible_grades + ['did_not_show_up']
for d in reversed(dicts): # iterating in reverse order so we can remove elements while iterating.
# University regulation: written exams from first semester are weighted half the points.
d['grade_weight'] = d['ects']
if ('Discrete Structures' in d['course_name_en']
or 'Introduction to Informatics' in d['course_name_en']
or 'Computer Organization' in d['course_name_en']
):
d['grade_weight'] >>= 1 # divide by 2 but leave as int (known to all be devisable by 2)
# read the html file to a string
try:
with open('stats/{}.html'.format(d['id']), 'rb') as f:
html_doc = f.read()
soup = BeautifulSoup(html_doc, 'html.parser')
# the data can be found in the titles of div objects with the class "kandcountbox"
divs = soup.find_all('div', 'kandcountbox')
titles = [div['title'] for div in divs]
# A list of tuples (<grade>, <number of students>) e.g. ('1.0', 3)
nums = [(ts[-1].split()[0], int(ts[-2].split()[0])) for t in titles if (ts := t.split(','))]
d.update((grade, 0) for grade in all_possible_grades) # All courses get all grades, also if 0 students that grade.
for i, t in enumerate(titles):
if 'Nicht erschienen' in t: # Students who did not show up
d['did_not_show_up'] = nums[i][1]
elif '5.0' in t: # add up fails and cheats together.
d['5.0'] += nums[i][1]
# We already counted all the 5.0s and added them, so don't add again.
d.update((tup for tup in nums if tup[0] != '5.0'))
except FileNotFoundError:
print("No statistics file for ", d['course_name_en'])
dicts.remove(d)
# Create a pandas dataframe with the data:
# In[5]:
import pandas as pd
df = pd.DataFrame(dicts)
df['did_show_up'] = df[possible_grades].sum(axis=1)
df['numeric_grade'] = | pd.to_numeric(df['grade']) | pandas.to_numeric |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from pytz import timezone, utc
from scipy import stats
from time import gmtime, strftime, mktime
def data_sampler_renamer_parser(path='weather-data.txt'):
# Take columns that are useful, rename them, parse the timestamp string
data = pd.read_csv(path, delimiter=r"\s+")
data_useful = data[
['YR--MODAHRMN', 'DIR', 'SPD', 'CLG', 'SKC', 'VSB', 'MW', 'AW', 'AW.1', 'TEMP', 'DEWP', 'SLP', 'ALT', 'MAX',
'MIN', 'PCP01', 'PCP06', 'PCP24', 'PCPXX', 'SD']]
data_useful.rename(
columns={'YR--MODAHRMN': 'timestamp', 'DIR': 'wind_direction', 'SPD': 'wind_speed', 'CLG': 'cloud_ceiling',
'SKC': 'sky_cover', 'VSB': 'visibility_miles', 'MW': 'manual_weather', 'AW': 'auto_weather',
'AW.1': 'auto_weather1', 'TEMP': 'temprature', 'DEWP': 'dew_point', 'SLP': 'sea_level',
'ALT': 'altimeter', 'MAX': 'max_temp', 'MIN': 'min_temp', 'PCP01': '1hour_precip',
'PCP06': '6hour_precip', 'PCP24': '24hour_precip', 'PCPXX': '3hour_precip', 'SD': 'snow_depth'},
inplace=True)
data_useful.timestamp = data_useful.timestamp.astype(str)
data_useful['year'] = data_useful.timestamp.str[0:4]
data_useful['month'] = data_useful.timestamp.str[4:6]
data_useful['day'] = data_useful.timestamp.str[6:8]
data_useful['hour'] = data_useful.timestamp.str[8:10]
data_useful['minutes'] = data_useful.timestamp.str[10:12]
data_useful.minutes = data_useful.minutes.astype(int)
data_useful.year = data_useful.year.astype(int)
data_useful.month = data_useful.month.astype(int)
data_useful.day = data_useful.day.astype(int)
data_useful.hour = data_useful.hour.astype(int)
return data_useful
def days_fixer(dataframe):
# Unify times to have observations at every hour. Fix all the dates/times based on this criteria
df = dataframe
df.loc[(df['minutes'].values < 31) & (df['minutes'].values != 0), 'minutes'] = 0
df.loc[(df['minutes'].values > 30) & (df['minutes'].values != 0), 'hour'] = df[(df.minutes != 0) & (
df.minutes > 30)].hour + 1
df.loc[(df['minutes'].values > 30) & (df['minutes'].values != 0), 'minutes'] = 0
df.loc[(df['hour'].values == 24), 'day'] = df[df.hour == 24].day + 1
df.loc[(df['hour'].values == 24), 'hour'] = 0
df.loc[(df['day'].values == 32), 'month'] = df[df.day == 32].month + 1
df.loc[(df['day'].values == 32), 'day'] = 1
df.loc[(df['day'].values == 29) & (df['month'].values == 2), ['month', 'day']] = 3, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 4), ['month', 'day']] = 5, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 6), ['month', 'day']] = 7, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 9), ['month', 'day']] = 10, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 11), ['month', 'day']] = 12, 1
df.loc[(df['day'].values == 1) & (df['month'].values == 13), ['month', 'day', 'year']] = 1, 1, 2016
df.hour = df.hour.map("{:02}".format)
df['datetime'] = pd.to_datetime(
df.year.astype(str) + ' ' + df.month.astype(str) + ' ' + df.day.astype(str) + ' ' + df.hour.astype(str),
format='%Y %m %d %H')
return df
def grouper(dataframe):
# Take a subset of colums and group them by time stamp. Afterwards take the mean/mode of the values depending on dataype
sub_df = dataframe[
['wind_direction', 'wind_speed', 'cloud_ceiling', 'sky_cover', 'visibility_miles', 'temprature', 'dew_point',
'sea_level', 'altimeter', '1hour_precip', 'datetime']]
sub_df = sub_df.convert_objects(convert_numeric=True)
f = {'wind_direction': ['mean'], 'wind_speed': ['mean'], 'cloud_ceiling': ['mean'], 'visibility_miles': ['mean'],
'temprature': ['mean'], 'dew_point': ['mean'], 'sea_level': ['mean'], 'altimeter': ['mean'],
'1hour_precip': ['mean']}
grouped = sub_df.groupby('datetime').agg(f)
grouped.columns = grouped.columns.droplevel(-1)
grouped2 = sub_df[['sky_cover', 'datetime']]
grouped2.loc[(grouped2['sky_cover'].values == '***'), 'sky_cover'] = np.nan
grouped3 = grouped2.groupby(['datetime']).agg(lambda x: stats.mode(x)[0][0])
grouped3.loc[(grouped3['sky_cover'].values == 0), 'sky_cover'] = np.nan
data_full = grouped.merge(grouped3, how='left', on=None, left_on=None, right_on=None, left_index=True,
right_index=True)
data_full.reset_index(inplace=True)
data_full['1hour_precip'].fillna(0, inplace=True)
data_full.loc[data_full[data_full['1hour_precip'] > 0.049].index, 'precip'] = 'high'
data_full.loc[data_full[data_full['1hour_precip'] <= 0.049].index, 'precip'] = 'low'
data_full.loc[data_full[data_full['1hour_precip'] == 0].index, 'precip'] = 'no'
data_full['precip_shift'] = data_full.precip.shift(-1)
data_full = pd.get_dummies(data_full, prefix=None, columns=['precip_shift'], sparse=False, drop_first=False)
data_full = data_full.fillna(method='bfill', axis=0, inplace=False, limit=None, downcast=None)
return data_full
def convert_gmt_to_easttime(string_date):
"""
:param string_date: GMT date
:return: Date converted to eastern time
"""
# Converts the string to datetime object
string_date = str(string_date)
try:
gtm = timezone('GMT')
eastern_tz = timezone('US/Eastern')
date_obj = datetime.strptime(string_date, '%Y-%m-%d %H:%M:%S')
date_obj = date_obj.replace(tzinfo=gtm)
date_eastern = date_obj.astimezone(eastern_tz)
date_str = date_eastern.strftime('%Y-%m-%d %H:%M:%S')
return date_str
except IndexError:
return ''
def add_easterntime_column(dataframe):
"""
:param dataframe: Weather dataframe
:return: dataframe with easter time column
"""
dataframe['est_datetime'] = dataframe['datetime'].apply(convert_gmt_to_easttime)
dataframe['est_datetime'] = pd.to_datetime(dataframe['est_datetime'])
return dataframe
#Set
#Interpolation
### 5 functions:
### ToTimestamp(d) from date to number
### toStringDate(d) from number to Date
### RepeatLast() interpolate by the last number
### toMinute() from hours to minutes
### Inter() Interpolate the dataset of weather
def toTimestamp(d):
return mktime(d.timetuple())
def repeatLast(left,right, values):
right= pd.concat((pd.DataFrame(right),pd.DataFrame(values)),axis=1)
right.columns=['first','second']
left.columns=['first']
inter = left.merge(right, how='left', on='first')
return inter.fillna(method='ffill')
def toMinute(datatime):
date_aux = datatime[0]
minute_dates = []
minute_dates_str=[]
while (date_aux <= datatime[len(datatime)-1]):
minute_dates.append(toTimestamp(date_aux))
minute_dates_str.append(date_aux)
date_aux +=timedelta(minutes=1) # days, seconds, then other fields.
return minute_dates, minute_dates_str
def inter(weather):
datatime = pd.to_datetime(weather['datetime'])
minute_dates, minute_dates_str=toMinute(weather['datetime'])
datatime = datatime.apply(toTimestamp)
wind = np.interp(minute_dates, datatime, weather['wind_speed'])
dew = np.interp(minute_dates, datatime, weather['dew_point'])
visibility= np.interp(minute_dates, datatime, weather['visibility_miles'])
wind_dir= np.interp(minute_dates, datatime, weather['wind_direction'])
sea_level= np.interp(minute_dates, datatime, weather['sea_level'])
altimeter = np.interp(minute_dates, datatime, weather['altimeter'])
temprature = np.interp(minute_dates, datatime, weather['temprature'])
precip=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip'])
precip_shift_high=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip_shift_high'])
precip_shift_low=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip_shift_low'])
precip_shift_no=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip_shift_no'])
interDf = pd.concat((pd.DataFrame(minute_dates_str), | pd.DataFrame(wind) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = | Series(1, index=idx) | pandas.Series |
# *- coding: utf-8 -*
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# from model.ESPNet_v2.SegmentationModel import EESPNet_Seg
# from model.CGNet import CGNet
# from model.ContextNet import ContextNet
# from model.DABNet import DABNet
# from model.EDANet import EDANet
# from model.ENet import ENet
# from model.ERFNet import ERFNet
# from model.ESNet import ESNet
# from model.ESPNet import ESPNet
# from model.FastSCNN import FastSCNN
# from model.FPENet import FPENet
# from model.FSSNet import FSSNet
# from model.LEDNet import LEDNet
# from model.LinkNet import LinkNet
# from model.SegNet import SegNet
# from model.SQNet import SQNet
# from model.UNet import UNet
pd.set_option('display.width', 1000)
| pd.set_option('display.max_rows', 500) | pandas.set_option |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = | DataFrame([['a', 'b'], ['c', 'd']]) | pandas.DataFrame |
import math
import os
import shutil
from copy import deepcopy
from shutil import copyfile
import numpy as np
import pandas
import tifffile
import yaml
from pathlib import Path
from speedrun import BaseExperiment, locate
from speedrun.yaml_utils import recursive_update
from .cellpose_training.start_training import start_cellpose_training
from .gui_widgets.main_gui import StartingGUI
from .io.images import read_uint8_img, write_image_to_file, write_ome_tiff
from .io.hdf5 import readHDF5, writeHDF5
from .preprocessing.utils import apply_preprocessing_to_image
from .qupath import update_qupath_proj as qupath_utils
from .qupath.save_labels import export_labels_from_qupath
from .io.various import yaml2dict, get_path_components, open_path
from .qupath.update_qupath_proj import add_image_to_project
class BaseAnnotationExperiment(BaseExperiment):
def __init__(self, experiment_directory):
self._main_window = None
assert isinstance(experiment_directory, str)
super(BaseAnnotationExperiment, self).__init__(experiment_directory)
# Simulate sys.argv, so that configuration is loaded from the experiment directory:
self._simulated_sys_argv = ["script.py", experiment_directory]
# Check if this is a new project or if we should load the previous config:
config_path = os.path.join(experiment_directory, "Configurations/main_config.yml")
load_prev_experiment = os.path.exists(config_path)
if load_prev_experiment:
old_config_path = os.path.join(experiment_directory, "Configurations/main_config_BAK.yml")
copyfile(config_path, old_config_path)
self._simulated_sys_argv += ["--inherit", old_config_path]
# Load config and setup:
self.auto_setup(update_git_revision=False)
# Set default values:
if not load_prev_experiment:
self.set("max_nb_extra_channels", 2)
self.set("extra_channels_names", ["Extra ch. 1", "Extra ch. 2"])
self.set("labeling_tool", "QuPath")
self.set_default_training_args()
self.set_default_preprocessing_config()
# Initialize or load dataframes:
self._rois_df = None
self._input_images_df = None
self._init_rois()
self._init_input_images_df()
self.dump_configuration()
def run(self):
self.show_start_page()
def show_start_page(self):
self.main_window.show()
self.dump_configuration()
@property
def main_window(self):
if self._main_window is None:
# self._main_window = widgets.Container(widgets=[StartWindow(self)])
self._main_window = StartingGUI(self)
# self._main_window.max_width = 30
self._main_window.show(run=True)
return self._main_window
# --------------------------------------------
# ROIs:
# --------------------------------------------
def update_rois_image(self, image_id, new_napari_rois):
if isinstance(new_napari_rois, list):
new_napari_rois = np.array(new_napari_rois)
# Get IDs of previous ROIs:
prev_roi_ids = self._get_roi_ids_by_image_id(image_id)
current_max_roi_id = self._napari_rois.shape[0]
prev_napari_rois = self._napari_rois[prev_roi_ids]
# Check if no new napari rois were passed:
if new_napari_rois.size == 0:
# Delete any previous ROIs:
self._delete_training_images(prev_roi_ids)
self._delete_roi_ids(prev_roi_ids)
else:
assert new_napari_rois.ndim == 3
assert new_napari_rois.shape[1] == 4 and new_napari_rois.shape[
2] == 2, "ROI array does not have the correct shape"
# Check what is there:
check_rois = np.array([[np.allclose(new_roi, old_roi) for old_roi in prev_napari_rois]
for new_roi in new_napari_rois])
# Add new ROIs:
rois_not_already_in_project = ~ np.any(check_rois, axis=1)
self._napari_rois = np.concatenate([self._napari_rois, new_napari_rois[rois_not_already_in_project]])
for i in range(current_max_roi_id, current_max_roi_id + rois_not_already_in_project.sum()):
self._rois_df.loc[i] = [i, image_id]
self._create_training_images([i])
# Remove ROIs that are not present anymore:
old_rois_to_be_deleted = ~ np.any(check_rois, axis=0)
old_rois_to_be_deleted = list(np.array(prev_roi_ids)[old_rois_to_be_deleted])
self._delete_training_images(old_rois_to_be_deleted)
self._delete_roi_ids(old_rois_to_be_deleted)
# Update saved files:
self.dump_rois()
def get_list_rois_per_image(self):
"""
Return a list of tuples, such that:
output_list[index_input_image] = (path_main_image, nb_rois)
"""
out_list = []
for id_image in range(self.nb_input_images):
selected_rows = self._input_images_df.loc[self._input_images_df["image_id"] == id_image]
assert len(selected_rows) == 1
nb_rois = len(self._get_roi_ids_by_image_id(id_image))
out_list.append((selected_rows["main_path"].item(), nb_rois))
return out_list
def get_napari_roi_by_image_id(self, image_id):
rois_ids = self._get_roi_ids_by_image_id(image_id)
# Check if there are ROIs at all:
if len(rois_ids):
return [roi for roi in self._napari_rois[rois_ids]]
else:
return None
def get_image_id_from_roi_id(self, roi_id):
df = self._rois_df
image_id = df.loc[df["roi_id"] == roi_id, "image_id"].tolist()
assert len(image_id) == 1
return image_id[0]
def _get_roi_ids_by_image_id(self, image_id):
df = self._rois_df
rois_ids = df.loc[df["image_id"] == image_id, "roi_id"].tolist()
return rois_ids
def _delete_roi_ids(self, roi_ids):
# TODO: Currently, ROIs are actually not deleted from the hdf5 file,
# but only from the dataframe (to avoid reordering)
# When done, uncomment assert to check consistency csv/hdf5
df = self._rois_df
self._rois_df = df[~df['roi_id'].isin(roi_ids)]
# TODO Delete also files!
def _init_rois(self):
if self._rois_df is None:
rois_csv_path = os.path.join(self.experiment_directory, "ROIs/rois.csv")
rois_hdf5_path = os.path.join(self.experiment_directory, "ROIs/rois.hdf5")
if os.path.exists(rois_csv_path):
self._rois_df = pandas.read_csv(rois_csv_path)
assert os.path.exists(rois_hdf5_path), "ROIs hdf5 file not found!"
self._napari_rois = readHDF5(rois_hdf5_path, "data")
rois_shape = self._napari_rois.shape
assert rois_shape[1] == 4 and rois_shape[2] == 2
# assert len(self._rois_df) == rois_shape[0], "ROIs csv and hdf5 files do not match!"
else:
# Create empty a dataframe and array:
self._rois_df = pandas.DataFrame(columns=["roi_id", "image_id"])
self._napari_rois = np.empty((0, 4, 2), dtype="float64")
def dump_rois(self):
# Get paths:
proj_dir = self.experiment_directory
rois_dir_path = os.path.join(proj_dir, "ROIs")
roi_csv_path = os.path.join(rois_dir_path, "rois.csv")
rois_hdf5_path = os.path.join(rois_dir_path, "rois.hdf5")
# Write data to file:
writeHDF5(self._napari_rois, rois_hdf5_path, "data")
self._rois_df.to_csv(roi_csv_path, index=False)
# Dump general configuration:
self.dump_configuration()
# --------------------------------------------
# Input images:
# --------------------------------------------
def set_extra_channels_names(self, channels_names):
# TODO: deprecate
if not isinstance(channels_names, list):
assert isinstance(channels_names, str)
channels_names = [channels_names]
assert len(channels_names) <= self.get("max_nb_extra_channels")
new_names = self.get("extra_channels_names")
for i, ch_name in enumerate(channels_names):
new_names[i] = ch_name
self.set("extra_channels_names", new_names)
def get_input_image_id_from_path(self, main_image_path):
df = self._input_images_df
# If image is in proj dir, then get relative path:
if os.path.isabs(main_image_path):
is_in_exp_dir, main_image_path = self.is_path_in_exp_dir(main_image_path)
image_id = df.loc[df["main_path"] == main_image_path, "image_id"].tolist()
assert len(image_id) == 1
return image_id[0]
def is_path_in_exp_dir(self, path):
if path is not None:
path = path if isinstance(path, Path) else Path(path)
is_in_exp_dir = Path(self.experiment_directory) in path.parents
path = os.path.relpath(path, self.experiment_directory) if is_in_exp_dir else path
return is_in_exp_dir, str(path)
else:
return False, None
def get_image_paths(self, image_id):
"""
Return a dictionary with the paths for each channel. The key of the dictionary is the channel name.
"""
if isinstance(image_id, str):
image_id = self.get_input_image_id_from_path(image_id)
assert image_id < self.nb_input_images, "Image ID not present in project"
image_data = self._input_images_df.loc[self._input_images_df["image_id"] == image_id]
ch_names = ["Main channel", "DAPI"] + self.get("extra_channels_names")
out_dict = {}
for i in range(2 + self.get("max_nb_extra_channels")):
path = image_data.iloc[0, i + 1]
if isinstance(path, str):
# If image is in the proj dir, then construct the absolute path:
if not os.path.isabs(path):
path = os.path.join(self.experiment_directory, path)
out_dict[ch_names[i]] = path
return out_dict
def add_input_image(self,
main_image_path,
main_image_filter=None,
dapi_path=None,
dapi_filter=None,
extra_ch_1_path=None,
extra_ch_1_filter=None,
extra_ch_2_path=None,
extra_ch_2_filter=None,
id_input_image_to_rewrite=None,
**extra_channels_kwargs
):
"""
# TODO: add option to remove input image? In that case, I need to update self.nb_input_images
"""
# TODO: generalize to multiple extra channels
assert len(extra_channels_kwargs) == 0, "Extra channels are not supported yet"
# Validate main image path:
assert os.path.isfile(main_image_path), "'{}' is not a file!"
# Convert to relative, if in proj_directory:
_, main_image_path = self.is_path_in_exp_dir(main_image_path)
def validate_ch_paths(ch_path, name_filter):
ch_path = None if ch_path == "" else ch_path
name_filter = None if name_filter == "" else name_filter
if ch_path is not None:
assert os.path.isfile(ch_path), "'{}' is not a file!"
# Convert to relative, if in proj_directory:
_, ch_path = self.is_path_in_exp_dir(ch_path)
else:
print("WARNING: filename filters outdated. No support for relative paths in proj dir")
if name_filter is not None:
assert isinstance(main_image_filter,
str) and main_image_filter != "", "Please insert a proper filter string for main image"
assert isinstance(name_filter,
str) and name_filter != "", "Wrong format for filter '{}'".format(name_filter)
ch_path = main_image_path.replace(main_image_filter, name_filter)
assert os.path.isfile(ch_path), "'{}' is not a file!"
return ch_path
# Validate DAPI image:
dapi_image_path = validate_ch_paths(dapi_path, dapi_filter)
# If present, then set up the training to use it (cellpose can still train fine if some of the images do
# not have DAPI channel):
if dapi_image_path is not None:
self.use_dapi_channel_for_segmentation = True
# Validate extra channels:
extra_ch_1_path = validate_ch_paths(extra_ch_1_path, extra_ch_1_filter)
extra_ch_2_path = validate_ch_paths(extra_ch_2_path, extra_ch_2_filter)
# Add new image:
image_info = [main_image_path, dapi_image_path, extra_ch_1_path, extra_ch_2_path]
nb_input_images = self.nb_input_images
# Check if main image has already been added:
matching_images = self._input_images_df.index[self._input_images_df["main_path"] == main_image_path].tolist()
assert len(matching_images) <= 1
if len(matching_images) == 1:
print("The added image was already present in the project. Updating paths.")
id_input_image_to_rewrite = matching_images[0]
if id_input_image_to_rewrite is not None:
assert id_input_image_to_rewrite < nb_input_images
added_image_id = nb_input_images if id_input_image_to_rewrite is None else id_input_image_to_rewrite
self._input_images_df.loc[added_image_id] = [added_image_id] + image_info
self.dump_input_images_info()
# Refresh all the ROIs, if there were any:
self._create_training_images(self._get_roi_ids_by_image_id(added_image_id))
return added_image_id
def dump_input_images_info(self):
# Write data to file:
proj_dir = self.experiment_directory
rois_dir_path = os.path.join(proj_dir, "ROIs")
input_images_csv_path = os.path.join(rois_dir_path, "input_images.csv")
self._input_images_df.to_csv(input_images_csv_path, index=False)
# Dump general configuration:
self.dump_configuration()
@property
def nb_input_images(self):
assert self._input_images_df is not None
nb_input_images = self._input_images_df["image_id"].max()
return 0 if math.isnan(nb_input_images) else nb_input_images + 1
def _init_input_images_df(self):
if self._input_images_df is None:
input_images_csv_path = os.path.join(self.experiment_directory, "ROIs/input_images.csv")
columns_names = ["image_id",
"main_path",
"DAPI_path"]
columns_names += ["extra_ch_{}_path".format(i) for i in range(self.get("max_nb_extra_channels"))]
if os.path.exists(input_images_csv_path):
self._input_images_df = | pandas.read_csv(input_images_csv_path, index_col=None) | pandas.read_csv |
# -*- coding:utf-8 -*-
import math
import phate
import anndata
import shutil
import warnings
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import cdist
from scipy.stats import wilcoxon, pearsonr
from scipy.spatial import distance_matrix
from sklearn.decomposition import PCA
# from python_codes.train.train import train
from python_codes.train.clustering import clustering
from python_codes.train.pseudotime import pseudotime
from python_codes.util.util import load_breast_cancer_data, preprocessing_data, save_features
from python_codes.util.exchangeable_loom import write_exchangeable_loom
warnings.filterwarnings("ignore")
from python_codes.util.util import *
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial','Roboto']
rcParams['savefig.dpi'] = 300
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable, inset_locator
title_sz = 16
####################################
#----------Get Annotations---------#
####################################
def get_adata_from_embeddings(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
feature_fp = os.path.join(output_dir, "features.tsv")
adata = sc.read_csv(feature_fp, delimiter="\t", first_column_names=None)
return adata
def get_clusters(args, sample_name, method="leiden", dataset="breast_cancer"):
original_spatial = args.spatial
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
args.spatial = original_spatial
cluster_color_dict = get_cluster_colors(args, sample_name)
unique_cluster_dict = {cluster:cluster_color_dict[cluster]["abbr"] for cluster in cluster_color_dict.keys()}
uniq_pred = np.unique(pred_clusters)
for cid, cluster in enumerate(uniq_pred):
pred_clusters[pred_clusters == cluster] = unique_cluster_dict[int(cluster)]
return pred_clusters
def get_cluster_colors_and_labels_original():
ann_dict = {
0: "Cancer 1",
1: "Immune:B/plasma",
2: "Adipose",
3: "Immune:APC/B/T cells",
4: "Cancer:Immune rich",
5: "Cancer 2",
6: "Cancer Connective"
}
color_dict = {
0: "#771122",
1: "#AA4488",
2: "#05C1BA",
3: "#F7E54A",
4: "#D55802",
5: "#137777",
6: "#124477"
}
return ann_dict, color_dict
def get_cluster_colors(args, sample_name):
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
clusters = df["Cluster ID"].values.astype(int)
annotations = df["Annotations"].values.astype(str)
colors = df["Color"].values.astype(str)
abbrs = df["Abbr"].values.astype(str)
cur_dict = {}
for cid, cluster in enumerate(clusters):
cur_dict[cluster] = {
"annotation" : annotations[cid],
"color" : colors[cid],
"abbr" : abbrs[cid]
}
return cur_dict
def get_top_n_cluster_specific_genes(args, sample_name, method, dataset="breast_cancer", top_n=3):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = pd.read_csv(cluster_marker_genes_fp, sep="\t")
df = df.loc[:top_n-1, df.columns.str.endswith("_n")]
cluster_specific_genes_dict = {}
for cluster_abbr in df.columns:
cluster_specific_genes_dict[cluster_abbr.strip("_n")] = df[cluster_abbr].values.astype(str)
return cluster_specific_genes_dict
def save_cluster_specific_genes(args, adata, sample_name, method, dataset="breast_cancer", qval=0.05):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
abbrs = np.array(np.unique(df["Abbr"].values.astype(str)))
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = pd.read_csv(cluster_marker_genes_fp, sep="\t", header=0)
for cid, cluster_name in enumerate(abbrs):
sub_df = df.loc[df.loc[:, f"{cluster_name}_p"] <= qval, f"{cluster_name}_n"]
genes = np.array(np.unique(sub_df.values.flatten().astype(str)))
output_fp = f'{output_dir}/cluster_specific_marker_genes/{cluster_name}.tsv'
mkdir(os.path.dirname(output_fp))
np.savetxt(output_fp, genes[:], delimiter="\n", fmt="%s")
print(f"Saved at {output_fp}")
all_genes = np.array(list(adata.var_names))
output_fp = f'{output_dir}/cluster_specific_marker_genes/background_genes.tsv'
mkdir(os.path.dirname(output_fp))
np.savetxt(output_fp, all_genes[:], delimiter="\n", fmt="%s")
print(f"Saved at {output_fp}")
def get_GO_term_dict(args):
base_dir = f"{args.dataset_dir}/Visium/Breast_Cancer/analysis"
genes_with_go_ids_fp = f'{base_dir}/genes_with_go_ids.csv'
go_id_to_genes_dict_pkl_fp = f"{base_dir}/go_id_to_genes_dict.pkl"
if os.path.exists(go_id_to_genes_dict_pkl_fp):
with open(go_id_to_genes_dict_pkl_fp, 'rb') as f:
go_terms_dict = pickle.load(f)
return go_terms_dict
else:
df = pd.read_csv(genes_with_go_ids_fp).values.astype(str)
go_terms = np.array(np.unique(df[:, 1]))
go_terms_dict = {go_id : df[df[:, 1] == go_id, 0] for go_id in go_terms}
with open(go_id_to_genes_dict_pkl_fp, 'wb') as f:
pickle.dump(go_terms_dict, f, -1)
print(f"Saved at {go_id_to_genes_dict_pkl_fp}")
return go_terms_dict
def get_GO_terms_with_spatial_coherent_expr(args, adata, sample_name, go_term_dict, dataset="breast_cancer"):
coords = adata.obsm["spatial"]
index = np.arange(coords.shape[0])
genes = np.array(adata.var_names)
GO_high_expressed = {}
GO_high_expressed_pvals = {}
n_go_terms = len(go_term_dict)
for gid, (go_id, go_genes) in enumerate(go_term_dict.items()):
if (gid + 1) % 500 == 0:
print(f"Processed {gid + 1}/{n_go_terms}: {100. * (gid + 1)/n_go_terms}% GO terms")
expr = adata.X[:, np.isin(genes, go_genes)].mean(axis=1)
avg_expr = expr.mean()
std_expr = expr.std()
outlier_val = avg_expr + std_expr
ind = np.array(np.where(expr > outlier_val)).flatten()
if ind.size > 5:
sub_coords = coords[ind, :]
sub_dists = distance.cdist(sub_coords, sub_coords, 'euclidean')
rand_index = np.random.choice(index, size=ind.size)
random_coord = coords[rand_index, :]
rand_dists = distance.cdist(random_coord, random_coord, 'euclidean')
pval = wilcoxon(sub_dists.flatten(), rand_dists.flatten(), alternative='greater')
if pval.pvalue < .05:
GO_high_expressed[go_id] = ind
GO_high_expressed_pvals[go_id] = pval.pvalue
else:
pass
print(f"Found {len(GO_high_expressed)} highly expressed GO terms")
args.spatial = True
go_terms_w_pv = np.array([[go_id, str(GO_high_expressed_pvals[go_id])] for go_id in sorted(GO_high_expressed_pvals.keys(), key= lambda key:GO_high_expressed_pvals[key], reverse=True)]).astype(str)
df = pd.DataFrame(go_terms_w_pv, columns=["GO_ID", "P-Val"])
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_fp = f"{output_dir}/highly_expr_go.tsv"
df.to_csv(high_expr_GO_out_fp, sep="\t", index=False)
print(f"Saved at {high_expr_GO_out_fp}")
high_expr_GO_out_pkl_fp = f"{output_dir}/highly_expr_go_w_spots_indices.pkl"
with open(high_expr_GO_out_pkl_fp, 'wb') as handle:
pickle.dump(GO_high_expressed, handle, -1)
print(f"Saved at {high_expr_GO_out_pkl_fp}")
def get_ovlp_GO_definitions(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_fp = f"{output_dir}/highly_expr_go.tsv"
df = pd.read_csv(high_expr_GO_out_fp, sep="\t", header= 0)
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/analysis/genes_with_go_ids_and_def.csv'
go_id_def = pd.read_csv(fp).values.astype(str)
go_dict = {go_id: go_id_def[gid, 1] for gid, go_id in enumerate(go_id_def[:, 0])}
go_terms = df.loc[:, "GO_ID"].values.astype(str)
go_def = np.array([go_dict[go_id] for go_id in go_terms]).astype(str)
df["GO_DEF"] = go_def
df = df.sort_values(by="P-Val", ascending=True)
high_expr_GO_out_def_fp = f"{output_dir}/highly_expr_go_w_def.tsv"
df.to_csv(high_expr_GO_out_def_fp, sep="\t", index=False)
print(f"Saved at {high_expr_GO_out_def_fp}")
def get_clusters_annnotations(sample_name):
if sample_name[0] == "G":
clusters = ['APC,B,T-1', 'APC,B,T-2', 'Inva-Conn', 'Invasive-2', 'Invasive-1', 'Imm-Reg-1', 'Imm-Reg-2'
, 'Tu.Imm.Itfc-1', 'Tu.Imm.Itfc-1', 'Tu.Imm.Itfc-1']
return clusters
else:
return []
def find_ovlpd_go_terms_with_cluster_specific_go_pathways(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
high_expr_GO_out_fp = f"{output_dir}/highly_expr_go.tsv"
high_expr_go_df = pd.read_csv(high_expr_GO_out_fp, sep="\t", header=0)
high_expr_go_terms = high_expr_go_df["GO_ID"].values.astype(str)
cluster_dir = f'{output_dir}/cluster_specific_marker_genes'
clusters = get_clusters_annnotations(sample_name)
for cid, cluster in enumerate(clusters):
cluster_go_term_fp = f"{cluster_dir}/{cluster}_topGO_terms.tsv"
df = pd.read_csv(cluster_go_term_fp, sep="\t", header=0)
go_ids = df["GO.ID"].values.astype(str)
ovlp_go_ids, x_ind, y_ind = np.intersect1d(high_expr_go_terms, go_ids, return_indices=True)
cluster_ovlp_go_terms_fp = f"{cluster_dir}/{cluster}_topGO_terms_w_high_expr_patterns.tsv"
sub_df = df.iloc[y_ind, :]
sub_df.to_csv(cluster_ovlp_go_terms_fp, sep="\t", index=False)
print(f"Saved at {cluster_ovlp_go_terms_fp}")
def cell_cell_communication_preprocessing_data(args, adata):
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
sc.pp.normalize_per_cell(adata, key_n_counts='n_counts_all', min_counts=0) # normalize with total UMI count per cell
sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
genes = np.array(adata.var_names)
cells = np.array(adata.obs_names)
return adata, genes, cells
def save_adata_to_preprocessing_dir(args, adata_pp, sample, cells):
pp_dir = f'{args.dataset_dir}/Visium/Breast_Cancer/preprocessed/{sample}'
mkdir(pp_dir)
cluster_annotations = get_clusters(args, sample)
concat_annotations = np.transpose(np.vstack([cells, cluster_annotations]))
annotation_fp = f'{pp_dir}/cluster_anno.tsv'
df = pd.DataFrame(data=concat_annotations, columns=["Cell", "Annotation"])
df.to_csv(annotation_fp, sep="\t", index=False)
print(f"{sample} annotation saved at {annotation_fp}")
adata_fp = f'{pp_dir}/anndata_pp.h5ad'
mkdir(os.path.dirname(adata_fp))
adata_pp.write(adata_fp)
print(f"{sample} adata saved at {adata_fp}")
####################################
#-------------Plotting-------------#
####################################
def plt_setting():
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 30
plt.rc('font', size=MEDIUM_SIZE, weight="bold") # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def figure(nrow, ncol, rsz=3., csz=3., wspace=.4, hspace=.5):
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol * csz, nrow * rsz))
plt_setting()
plt.subplots_adjust(wspace=wspace, hspace=hspace)
return fig, axs
def plot_hne_and_annotation(args, adata, sample_name, nrow = 1, scale = 0.045, ncol=4, rsz=2.5, csz=2.8, wspace=.4, hspace=.5, annotation=True):
fig, axs = figure(nrow, ncol, rsz=rsz, csz=csz, wspace=wspace, hspace=hspace)
if nrow == 1:
for ax in axs:
ax.axis('off')
ax = axs[0]
if annotation:
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-pat/img/{sample_name[0]}1_annotated.png'
else:
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/ST-imgs/{sample_name[0]}/{sample_name}/HE.jpg'
img = plt.imread(fp)
ax.imshow(img)
# ax.set_title("H & E", fontsize=title_sz)
x, y = adata.obsm["spatial"][:, 0]*scale, adata.obsm["spatial"][:, 1]*scale
if not annotation:
xlim = [np.min(x), np.max(x) * 1.05]
ylim = [np.min(y) * .75, np.max(y) * 1.1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
else:
xlim, ylim = None, None
ax.invert_yaxis()
return fig, axs, x, y, img, xlim, ylim
def plot_clustering(args, adata, sample_name, method="leiden", dataset="breast_cancer", cm = plt.get_cmap("Paired"), scale = .62, scatter_sz=1.3, nrow = 1, annotation=True):
original_spatial = args.spatial
fig, axs, x, y, img, xlim, ylim = plot_hne_and_annotation(args, adata, sample_name, scale=scale, nrow=nrow, ncol=3, rsz=2.6, csz=3.2, wspace=.3, hspace=.4, annotation=annotation)
spatials = [False, True]
for sid, spatial in enumerate(spatials):
ax = axs[sid + 1]
args.spatial = spatial
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(int)
uniq_pred = np.unique(pred_clusters)
n_cluster = len(uniq_pred)
if not annotation:
ax.imshow(img)
for cid, cluster in enumerate(uniq_pred):
color = cm((cid * (n_cluster / (n_cluster - 1.0))) / n_cluster)
ind = pred_clusters == cluster
ax.scatter(x[ind], y[ind], s=scatter_sz, color=color, label=cluster)
title = args.arch if not spatial else "%s + SP" % args.arch
ax.set_title(title, fontsize=title_sz, pad=-30)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
box = ax.get_position()
height_ratio = 1.0
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height * height_ratio])
lgnd = ax.legend(loc='center left', fontsize=8, bbox_to_anchor=(1, 0.5), scatterpoints=1, handletextpad=0.1,
borderaxespad=.1)
for handle in lgnd.legendHandles:
handle._sizes = [8]
fig_fp = f"{output_dir}/{method}.pdf"
plt.savefig(fig_fp, dpi=300)
plt.close('all')
args.spatial = original_spatial
def plot_pseudotime(args, adata, sample_name, dataset="breast_cancer", cm = plt.get_cmap("gist_rainbow"), scale = 0.62, scatter_sz=1.3, nrow = 1):
original_spatial = args.spatial
fig, axs, x, y, img, _, _ = plot_hne_and_annotation(args, adata, sample_name, scale=scale, nrow=nrow, ncol=3)
spatials = [False, True]
for sid, spatial in enumerate(spatials):
ax = axs[sid + 1]
ax.imshow(img)
args.spatial = spatial
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
fp = f"{output_dir}/pseudotime.tsv"
pseudotimes = | pd.read_csv(fp, header=None) | pandas.read_csv |
#!/usr/bin/local/python3
"""
NOTE: When including text files as command line arguments, their names must not begin with a hyphen or they will be ignored.
"""
from data_manager import add_text_sample, DATA_FOLDER, get_all_samples, DATAFRAME_DEST, abs_path, clear_all_samples
from txt_learn import arr_for_string
import pandas as pd
import numpy as np
from collections import Counter
import pickle
#std_length informs the size of the strings to be inserted into the database
std_length=250
#Generating Random Samples
import string
import random
potential_chars=string.printable
def add_random_text_samples(num=1,length=std_length):
"""
Creates random text samples w/ given length and enter it into the database managed in txt_learn.py
"""
count = 0
for _ in range(num):
new_sample = ''.join(random.choice(potential_chars) for _ in range(length))
add_text_sample('_',0,new_sample)
count += 1
return num
def add_english_text_samples(file_names, length=std_length, quiet=False):
"""
Add English text samples from a list of filenames provided.
"""
count = 0
for file_name in file_names:
with open(abs_path(file_name)) as txt_file:
full = txt_file.read()
for ind, samp in enumerate(full[i:i+length] for i in range(0, len(full), length)):
name = '%s_%d' % (file_name,ind)
add_text_sample(name,1,samp)
count += 1
return count
def generate_dataframe(destination_file=DATAFRAME_DEST):
"""
Generate dataframe with data in database, pickle it, and save it in destination_file.
"""
e_key, v_key = 'english', 'values'
def series_for_string(some_str, english):
arr = arr_for_string(some_str, True)
return | pd.Series([english, arr], index=[e_key, v_key]) | pandas.Series |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
target_names.append('Healthy')
target_names.append('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
target_names.append('Non-biodegr.')
target_names.append('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
target_names.append('Negative')
target_names.append('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
target_names.append('Cylinder')
target_names.append('Disk')
target_names.append('Flatellipsold')
target_names.append('Longellipsold')
target_names.append('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
target_names.append('No-use')
target_names.append('Long-term')
target_names.append('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
target_names.append('Van')
target_names.append('Car')
target_names.append('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
target_names.append('Fine')
target_names.append('Superior')
target_names.append('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.append(item)
DataRawLengthExternal = len(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global XDataExternal, yDataExternal
XDataExternal = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
AllTargetsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsExternal):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesExternal.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesExternal.append(Class)
previous = value
ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
# OneTimeTemp = copy.deepcopy(dfRaw)
# OneTimeTemp.drop(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.copy()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replace("-", "_")
storeNewColumns.append(newCol.replace("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
else:
keepOriginalFeatures = XData.copy()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is mean of test_score
return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformations
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (len(exeCall) == 0):
if (flagEx == 3):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
else:
if (flagEx == 4):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
#XDataStoredOriginal = XDataStored.copy()
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
#XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (len(exeCall) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for uniqueValue in exeCall:
currentColumnsDeleted.append(tracker[uniqueValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.drop(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.tolist()
for indx, col in enumerate(columns):
if indx in exeCall:
columnsKeepNew.append(col)
columnsNewGen.append(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCall[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacement = currentColumn.replace(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
if (len(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
elif (splittedCol[1] == 'l2'):
dfTemp = []
dfTemp = np.log2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l1p'):
dfTemp = []
dfTemp = np.log1p(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l10'):
dfTemp = []
dfTemp = np.log10(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'e2'):
dfTemp = []
dfTemp = np.exp2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'em1'):
dfTemp = []
dfTemp = np.expm1(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
columnsNamesLoc = XData.columns.values.tolist()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (len(splittedCol) == 1):
for tran in listofTransformations:
columnsNames.append(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformations:
if (splittedCol[1] == tran):
columnsNames.append(splittedCol[0])
else:
columnsNames.append(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (len(value) > 1):
tracker.append(value[1])
else:
tracker.append(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMany = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMany = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.copy()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMany = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMany = howMany + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMany = howMany + 1
#else:
#pass
scores = scoresAct + previousState
if (howMany == 3):
scores.append(1)
else:
scores.append(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.append(scores.mean())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
maxList = max(importances)
minList = min(importances)
for f in range(Data.shape[1]):
ImpurityFS.append((importances[f] - minList) / (maxList - minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.append(0.95)
elif (RFEImp[f] == 2):
RankingFS.append(0.85)
elif (RFEImp[f] == 3):
RankingFS.append(0.75)
elif (RFEImp[f] == 4):
RankingFS.append(0.65)
elif (RFEImp[f] == 5):
RankingFS.append(0.55)
elif (RFEImp[f] == 6):
RankingFS.append(0.45)
elif (RFEImp[f] == 7):
RankingFS.append(0.35)
elif (RFEImp[f] == 8):
RankingFS.append(0.25)
elif (RFEImp[f] == 9):
RankingFS.append(0.15)
else:
RankingFS.append(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.append(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Parallelization Initilization")
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.append(scoresHere.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = pd.DataFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = pd.DataFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.append({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(Data,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(Data.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
resultsFS.append(featureScores)
resultsFS.append(ImpurityFSDF)
resultsFS.append(perm_imp_eli5PD)
resultsFS.append(PerFeatureAccuracyPandas)
resultsFS.append(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.select_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = XDataNumericCopy[i].round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log1p(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log10(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.exp2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.expm1(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.append(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = pd.Series()
if ((len(targetRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = pd.Series()
if ((len(targetRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
if (flagInf == False):
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = pd.Series()
if ((len(targetRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
if (flagInf == False):
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = | pd.Series() | pandas.Series |
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_metrics_findFirstTimeAboveVel_1(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [-0.000051, -0.000051, -0.000041, -0.000066, -0.000111, -0.000158, -0.000194, -0.000207, 0.000016, 0.000107, 0.000198]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_2(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_3(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_4(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeOutside_1(self):
pass
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
#result = metrics.common.findFirstTimeOutside(data_object)
#expected_result = 0
#self.assertEqual(expected_result, result)
#err: NameError: name 'pos' is not defined --------------------------------------------------------!!!!!!!!!
def test_metrics_colMean_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position')
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_colMean_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = 6.5
self.assertEqual(expected_result, result)
def test_metrics_colMean_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = np.nan
#self.assertEqual(expected_result, result)
np.testing.assert_equal(expected_result, result)
def test_metrics_colSD_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 3.1622776601683795
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position', 3)
expected_result = 2.29128784747792
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 0
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colMax_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 10
self.assertEqual(expected_result, result)
def test_metrics_colMax_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 9
self.assertEqual(expected_result, result)
def test_metrics_colMax_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_colMin_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMin(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_colMin_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMin(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_timeAboveSpeed_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, True)
expected_result = 1.002994011976048
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_timeAboveSpeed_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData(data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, False)
expected_result = 0.1675
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_timeAboveSpeed_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = | pandas.DataFrame(data=d) | pandas.DataFrame |
import json
import pandas as pd
from vvc.utils import json_utils
def to_df(json_file):
count_summary = {}
time_summary = {}
with open(json_file) as json_data:
data = json.load(json_data)
for frame_id, objects in data['frames'].items():
# Extract counts
if frame_id not in count_summary:
count_summary[frame_id] = {}
for obj in objects['objects']:
tag = obj['tag']
if tag not in count_summary[frame_id]:
count_summary[frame_id][tag] = 0
count_summary[frame_id][tag] += 1
# Extract running time
if frame_id not in time_summary:
time_summary[frame_id] = {}
for key, value in objects['timestamps'].items():
time_summary[frame_id][key] = value
df = pd.DataFrame.from_dict(count_summary, orient='index')
df = df.fillna(0)
df = df.set_index( | pd.to_numeric(df.index) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 15:33:50 2019
@author: luc
"""
#%% Import Libraries
import numpy as np
import pandas as pd
import itertools
from stimuli_dictionary import cued_stim, free_stim, cued_stim_prac, free_stim_prac
def randomize(ID, Age, Gender, Handedness):
'''
Create a randomized and counterbalanced stimulus list for the current participant
Parameters
----------
ID : INT
The subject ID. Based on the subject ID the correct counterbalancing is determined
Returns
-------
design : Pandas DataFame
The dataframe containing the complete stimulus list (including practice trials)
keys: Dictionary
the response keys for the free phase
'''
#%% Variables
# experiment variables
nBlocks = 6
Phases = ['prac_cued', 'prac_free', 'cued', 'free']
nstim = 60 # sample 60 stim from each target_type
# sample from main stimulus set without replacement
# randomize word targets to avoid relationship reward - stimulus
for idx, name in enumerate(['lism','lila','nosm','nola']):
cued_stim[name] = np.random.choice(cued_stim[name], size = nstim, replace = False)
wide_cued = pd.DataFrame(cued_stim); wide_free = pd.DataFrame(free_stim)
wide_cued_prac = pd.DataFrame(cued_stim_prac); wide_free_prac = | pd.DataFrame(free_stim_prac) | pandas.DataFrame |
"""
Behaiviour_Recognizer Toolbox
© <NAME>
@author: <NAME>
This script is for making prediction for any desire validation set. In k-fold validation,
data is devided into k portion. For each validation set the network has used k-1 portion
for training the network and have saved the corresponding weights. Here we use the one
remaned portion (validation data for kth_Validation) to predict behaviour class for them.
It read test data (extracted feature for test data in convolutional network) from "Feature" folder,
provides prediction for them, evaluates the validation accuracy (accuracy for unseen data)
and reports it. It also provides the activations before and after the last layer
(before and after the activation function of the last layer) for investigating the network
performance in more details. After that, It organizes all the information and saves them in
an excel file in the destination path.
"""
from parameters import Param
from Part2.Model import RNN_model
import numpy as np
import pandas as pd
from tqdm import tqdm
from numpy import genfromtxt
from keras import backend as K
from sklearn.metrics import accuracy_score
#Define which validation set you would like to predict
kth_Validation=3
# The name of the weight for the kth_Validation
weight_path = Param.Destination_path + 'weights_' + str(kth_Validation)+'.hdf5'
#loading the model
model = RNN_model()
# loading the trained weights
model.load_weights(weight_path)
data=pd.read_excel(Param.Excel_Path)
Indexes = np.load(Param.Destination_path + 'Validation_Indexes.npy')
#preparing test data
Video_N = []
Video_names = data['Video']
for i in Indexes[kth_Validation-1]:
video_name = Video_names[i]
Video_N.append(video_name)
X = []
y = []
for i in Indexes[kth_Validation-1]:
File_name = Video_names[i] + '.csv'
Video = Video_names[i]
X0 = genfromtxt(Param.Features_Path + '\\' +File_name, delimiter=',')
X.append(X0)
Class = list(data[data['Video']==Video]['Class'])[0]
y.append(Class)
X = np.array(X)
#Prediction
predict = []
actual = []
After_soft = []
Before_soft = []
for i in tqdm(range(X.shape[0])):
# Reading the input (feature) and converting it in the desire input_shape for lstm
prediction_movie = X[i]
prediction_movie = prediction_movie.reshape(1,prediction_movie.shape[0],prediction_movie.shape[1])
# predicting tags for each array
prediction = model.predict_classes(prediction_movie)
get_3rd_layer_output = K.function([model.layers[0].input],[model.layers[3].output])
After_softmax = get_3rd_layer_output([prediction_movie])[0]
get_2rd_layer_output = K.function([model.layers[0].input],[model.layers[2].output])
Before_softmax = get_2rd_layer_output([prediction_movie])[0]
# appending the model prediction in predict list to assign the tag to the video
predict.append(prediction[0])
After_soft.append(After_softmax)
Before_soft.append(Before_softmax)
# After softmax
Aft1 = []
for i in range(len(After_soft)):
Aft1.append(After_soft[i][0][0])
Aft2 = []
for i in range(len(After_soft)):
Aft2.append(After_soft[i][0][1])
# Before softmax
Bef1 = []
for i in range(len(Before_soft)):
Bef1.append(Before_soft[i][0][0])
Bef2 = []
for i in range(len(Before_soft)):
Bef2.append(Before_soft[i][0][1])
Pre_class = [x+1 for x in predict]
#Organizing the validation information to save in excel file
Validation= | pd.DataFrame(Indexes[kth_Validation-1], columns = ['Index']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun June 16 15:30:27 2019
@author: <NAME>
"""
#IMPORTING NECESSARY LIBRARIES
import pandas as pd
import csv
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import webbrowsert
from scipy.stats import norm
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import statistics as stats
from PIL import Image #for storing in tiff
from io import BytesIO #for storing in tiff
#READING CSV FILE "STUDENTDATA" HAVING SAME LOCATION AS OF THIS PROGRAM
'''KEEP CSV AND THE PYTHON FILE IN THE SAME DIRECTORY'''
df=pd.read_csv("finalstudent.csv")
df_future=pd.read_csv("Book1.csv")
#removing extra columns
df.drop(['employment offer', 'mode of placement',
'Please provide an estimate of the yearly salary that you have been offered.',
'Is the employment opportunity off-campus?','If you are not planning for employment immediately, for which examitiore you preparing? (At most you can select three preferences)',
'Which of the following training programs have been conducted by the institute?'],1,inplace = True)
#CONVERSION FUNCTION CONVERTS CATEGORICAL VARIABLES INTO NUMERIC CODES
def conversion(original,new_name):
ler=LabelEncoder()
df[new_name]=ler.fit_transform(df[original])
return(df[new_name])
#CALLING OUT CONVERSION FUNCTION
conversion("Gender","Gender_converted")
conversion("Sections","Sections_converted")
conversion("GATE qualified","qualifiers_converted")
#BACKING UP DATA
backup=df.copy()
#CREATING DATAFRAMES OF INDIVUAL SECTIONS
def sections(name,cole_name,sect):
name=df[(df[cole_name]==sect)]
return(name)
#CREATING THE DUMMY VARIABLES
def get_dummy(original,coln_name):
df=pd.get_dummies(original[coln_name])
return(df)
'''
I did not use one hot encoder because it replaces all the column names with 0,1,2..
which would be difficult to rename manually.
In such scenario dummy variables are much preferred than one hot encoder
'''
#CONCATINATING DUMMY VARIABLES TO MAIN DATAFRAME
def to_concat_df(final,initial):
df = pd.concat([final, initial], axis=1)
return(df)
#FINDING MEAN AND STANDARD DEVIATION
def avg(df,col,texte):
#print('\n')
print(texte)
print('mean :',np.nanmean(df[col]))
print('standard deviation:',np.nanstd(df[col]))
print('\n')
#CREATING DUMMY VARIABLES AND CONCATINATING THEM TO THE ORIGINAL DATASET
df=to_concat_df(df,get_dummy(df,"Sections_converted"))
#RENAMING THE DUMMY COLUMNS AND REQUIRED COLUMNS
df.rename(columns={0:"GEN",1:"OBC",2:"PH",3:"SC",4:"ST"},inplace=True)
df.rename(columns={'GATE Marks out of 100':'GATE Marks'},inplace=True)
#SAME PROCEDURE FOR GENDERS TOO
df=to_concat_df(df,get_dummy(df,"Gender_converted"))
df.rename(columns={1:"MALE",0:"FEMALE"},inplace=True)
df.rename(columns={'CGPA':"B.Tech CGPA"},inplace=True)
#SAME PROCEDURE FOR GATE QUALIFIERS TOO
df=to_concat_df(df,get_dummy(df,"qualifiers_converted"))
df.rename(columns={0:"Not qualified",1:"Not Appeared",2:"Qualified"},inplace=True)
#removing unwanted columns
df.drop(["Sections",'GATE qualified','Gender_converted', 'Sections_converted', 'qualifiers_converted'],1,inplace = True)
#GETTING GEN_MALE DATACOLUMN
df.loc[((df.MALE == 1) & (df.GEN == 1)), 'GEN_MALE'] = 1
df.loc[((df.MALE != 1) | (df.GEN != 1)), 'GEN_MALE'] = 0
#GETTING GEN_FEMALE DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.GEN == 1)), 'GEN_FEMALE'] = 1
df.loc[((df.FEMALE != 1) | (df.GEN != 1)), 'GEN_FEMALE'] = 0
#-----------------------------------------------------------------
#GETTING OBC_MALE DATACOLUMN
df.loc[((df.MALE == 1) & (df.OBC == 1)), 'OBC_MALE'] = 1
df.loc[((df.MALE != 1) | (df.OBC != 1)), 'OBC_MALE'] = 0
#GETTING OBC_FEMALE DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.OBC == 1)), 'OBC_FEMALE'] = 1
df.loc[((df.FEMALE != 1) | (df.OBC != 1)), 'OBC_FEMALE'] = 0
#-----------------------------------------------------------------
#GETTING SC_MALE DATACOLUMN
df.loc[((df.MALE == 1) & (df.SC == 1)), 'SC_MALE'] = 1
df.loc[((df.MALE != 1) | (df.SC != 1)), 'SC_MALE'] = 0
#GETTING SC_FEMALE DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.SC == 1)), 'SC_FEMALE'] = 1
df.loc[((df.FEMALE != 1) | (df.SC != 1)), 'SC_FEMALE'] = 0
#-----------------------------------------------------------------
#GETTING ST_MALE DATACOLUMN
df.loc[((df.MALE == 1) & (df.ST == 1)), 'ST_MALE'] = 1
df.loc[((df.MALE != 1) | (df.ST != 1)), 'ST_MALE'] = 0
#GETTING ST_FEMALE DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.ST == 1)), 'ST_FEMALE'] = 1
df.loc[((df.FEMALE != 1) | (df.ST != 1)), 'ST_FEMALE'] = 0
#----------------------------------------------------------------
#GETTING male qualified DATACOLUMN
df.loc[((df.MALE == 1) & (df.Qualified == 1)), 'MALE_QUALIFIED'] = 1
df.loc[((df.MALE != 1) | (df.Qualified != 1)), 'MALE_QUALIFIED'] = 0
#GETTING female qualified DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.Qualified == 1)), 'FEMALE_QUALIFIED'] = 1
df.loc[((df.FEMALE != 1) | (df.Qualified != 1)), 'FEMALE_QUALIFIED'] = 0
#-----------------------------------------------------------------
#GETTING GEN_MALE qualified DATACOLUMN
df.loc[((df.GEN_MALE == 1) & (df.Qualified == 1)), 'GEN_MALE_QUALIFIED'] = 1
df.loc[((df.GEN_MALE != 1) | (df.Qualified != 1)), 'GEN_MALE_QUALIFIED'] = 0
#GETTING GEN_FEMALE qualified DATACOLUMN
df.loc[((df.GEN_FEMALE == 1) & (df.Qualified == 1)), 'GEN_FEMALE_QUALIFIED'] = 1
df.loc[((df.GEN_FEMALE != 1) | (df.Qualified != 1)), 'GEN_FEMALE_QUALIFIED'] = 0
#-----------------------------------------------------------------
#GETTING OBC_MALE QUALIFIEED DATACOLUMN
df.loc[((df.OBC_MALE == 1) & (df.Qualified == 1)), 'OBC_MALE_QUALIFIED'] = 1
df.loc[((df.OBC_MALE != 1) | (df.Qualified != 1)), 'OBC_MALE_QUALIFIED'] = 0
#GETTING OBC_FEMALE QUALIFIED DATACOLUMN
df.loc[((df.OBC_FEMALE == 1) & (df.Qualified == 1)), 'OBC_FEMALE_QUALIFIED'] = 1
df.loc[((df.OBC_FEMALE != 1) | (df.Qualified != 1)), 'OBC_FEMALE_QUALIFIED'] = 0
#-----------------------------------------------------------------
#GETTING SC_MALE QUALIFIED DATACOLUMN
df.loc[((df.SC_MALE == 1) & (df.Qualified == 1)), 'SC_MALE_QUALIFIED'] = 1
df.loc[((df.SC_MALE != 1) | (df.Qualified != 1)), 'SC_MALE_QUALIFIED'] = 0
#GETTING SC_FEMALE QUALIFIED DATACOLUMN
df.loc[((df.SC_FEMALE == 1) & (df.Qualified == 1)), 'SC_FEMALE_QUALIFIED'] = 1
df.loc[((df.SC_FEMALE != 1) | (df.Qualified != 1)), 'SC_FEMALE_QUALIFIED'] = 0
#-----------------------------------------------------------------
#GETTING ST_MALE QUALIFIED DATACOLUMN
df.loc[((df.ST_MALE == 1) & (df.Qualified == 1)), 'ST_MALE_QUALIFIED'] = 1
df.loc[((df.ST_MALE != 1) | (df.Qualified != 1)), 'ST_MALE_QUALIFIED'] = 0
#GETTING ST_FEMALE QUALIFIED DATACOLUMN
df.loc[((df.ST_FEMALE == 1) & (df.Qualified == 1)), 'ST_FEMALE_QUALIFIED'] = 1
df.loc[((df.ST_FEMALE != 1) | (df.Qualified != 1)), 'ST_FEMALE_QUALIFIED'] = 0
#-------------------------------------------------------------------------------------------------------------------------
#GETTING CFTI DATACOLUMN
df['CFTI'] = [1 if Institute in(['IIIT Guwahati','NIT Uttarakhand',
'NIT Sikkim','NIT Agartala',
'NIT Arunachal Pradesh','NIT Srinagar','NIT Meghalaya','NIT Manipur',
'NIT Mizoram','IIIT Manipur','NIT Nagaland']) else 0 for Institute in df['Institute']]
df['NON-CFTI'] = [0 if Institute in(['IIIT Guwahati','NIT Uttarakhand',
'NIT Sikkim','NIT Agartala',
'NIT Arunachal Pradesh','NIT Srinagar','NIT Meghalaya','NIT Manipur',
'NIT Mizoram','IIIT Manipur','NIT Nagaland']) else 1 for Institute in df['Institute']]
#-------------------------------------------------------------------------------------------------------------------------------
#GETTING CFTI_MALE DATACOLUMN
df.loc[((df.MALE == 1) & (df.CFTI == 1)), 'CFTI_MALE'] = 1
df.loc[((df.MALE != 1) | (df.CFTI != 1)), 'CFTI_MALE'] = 0
#GETTING CFTI_FEMALE DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.CFTI == 1)), 'CFTI_FEMALE'] = 1
df.loc[((df.FEMALE != 1) | (df.CFTI != 1)), 'CFTI_FEMALE'] = 0
#-------------------------------------------------------------------------------------
#GETTING NONCFTI_MALE DATACOLUMN
df.loc[((df.MALE == 1) & (df.CFTI == 0)), 'NONCFTI_MALE'] = 1
df.loc[((df.MALE == 0) & (df.FEMALE==1)), 'NONCFTI_MALE'] = 0
#GETTING NONCFTI_FEMALE DATACOLUMN
df.loc[((df.FEMALE == 1) & (df.CFTI == 0)), 'NONCFTI_FEMALE'] = 1
df.loc[((df.FEMALE != 1) & (df.MALE != 0)), 'NONCFTI_FEMALE'] = 0
#---------------------------------------------------------------------------
#HERE CFTI NONQUALIFIED + NON CFTI NON QUALIFIED BOTH ARE GIVEN 1 BUT IF WE WANT ACCURATE RESULTS WE SHOULD ONLY CONSIDER DF_CFTI THEN SEE WHAT HAPPENS!
#GETTING CFTI qualified DATACOLUMN
df.loc[((df.CFTI == 1) & (df.Qualified == 1)), 'CFTI_QUALIFIED'] = 1
df.loc[((df.CFTI != 1) | (df.Qualified != 1)), 'CFTI_QUALIFIED'] = 0
#df.loc[((df.NONCFTI==1) & (df.Qualified == 1)), 'NONCFTI_QUALIFIED'] = 1
#df.loc[((df.NONCFTI != 1) | (df.CFTI!=0)), 'NONCFTI_QUALIFIED'] = 0
#GETTING CFTI_MALE qualified DATACOLUMN
df.loc[((df.CFTI_MALE == 1) & (df.Qualified == 1)), 'CFTI_MALE_QUALIFIED'] = 1
df.loc[((df.CFTI_MALE != 1) | (df.Qualified != 1)), 'CFTI_MALE_QUALIFIED'] = 0
#GETTING CFTI_FEMALE qualified DATACOLUMN
df.loc[((df.CFTI_FEMALE == 1) & (df.Qualified == 1)), 'CFTI_FEMALE_QUALIFIED'] = 1
df.loc[((df.CFTI_FEMALE != 1) | (df.Qualified != 1)), 'CFTI_FEMALE_QUALIFIED'] = 0
df.to_csv('file1.csv')
'''
#GETTING NONCFTI_MALE qualified DATACOLUMN
df.loc[((df.NONCFTI_MALE == 1) & (df.Qualified == 1)), 'NONCFTI_MALE_QUALIFIED'] = 1
df.loc[((df.NONCFTI_MALE != 1) | (df.Qualified != 1)), 'NONCFTI_MALE_QUALIFIED'] = 0
#GETTING NONCFTI_FEMALE qualified DATACOLUMN
df.loc[((df.CFTI_FEMALE == 1) & (df.Qualified == 1)), 'NONCFTI_FEMALE_QUALIFIED'] = 1
df.loc[((df.CFTI_FEMALE != 1) | (df.Qualified != 1)), 'NONCFTI_FEMALE_QUALIFIED'] = 0
'''
#GETTING NENCESSARY DATAFRAMES FROM SELECTION CRITERIA FOR PLOTTING AND SEPERATE RECORD
#CAN ALSO BE DONE USING SECTIONS FUNCTION
df_cfti=df[(df['CFTI']==1)]
df_cfti_male=df[(df['CFTI_MALE']==1)]
df_cfti_female=df[(df['CFTI_FEMALE']==1)]
df_noncfti=df[(df['CFTI']==0)]
df_noncfti_male=df_noncfti[(df_noncfti['MALE']==1)]
df_noncfti_female=df_noncfti[(df_noncfti['FEMALE']==1)]
df_gen=df[(df['GEN']==1)]
df_gen_male=df[(df['GEN_MALE']==1)]
df_gen_female=df[(df['GEN_FEMALE']==1)]
df_obc=df[(df['OBC']==1)]
df_obc_male=df[(df['OBC_MALE']==1)]
df_obc_female=df[(df['OBC_FEMALE']==1)]
df_sc=df[(df['SC']==1)]
df_sc_male=df[(df['SC_MALE']==1)]
df_sc_female=df[(df['SC_FEMALE']==1)]
df_st=df[(df['ST']==1)]
df_st_male=df[(df['ST_MALE']==1)]
df_st_female=df[(df['ST_FEMALE']==1)]
df_qualified=df[(df['Qualified']==1)]
df_qualified_male=df[(df['MALE_QUALIFIED']==1)]
df_qualified_female=df[(df['FEMALE_QUALIFIED']==1)]
df_notappeared=df[(df['Not Appeared']==1)]
df_unqualified=df[(df['Not qualified']==1)]
df_unqualified_male=df_unqualified[df['MALE']==1]
df_unqualified_female=df_unqualified[df['FEMALE']==1]
df_qualified_cfti=df[(df['CFTI_QUALIFIED']==1)]
df_qualified_cfti_male=df[(df['CFTI_MALE_QUALIFIED']==1)]
df_qualified_cfti_female=df[(df['CFTI_FEMALE_QUALIFIED']==1)]
df_qualified_noncfti=df[(df['CFTI']==0)& (df['Qualified']==1)]
df_qualified_noncfti_male=df_noncfti_male[(df_noncfti_male['MALE_QUALIFIED']==1)]
df_qualified_noncfti_female=df_noncfti_female[(df_noncfti_female['FEMALE_QUALIFIED']==1)]
#MULTIPLE ENTRIES OF FUTURE ASPIRATIONS WERE SEPERATED IN THE EXCEL ITSELF THEN
#ALL THE 3 COLUMNS WERE CLUBBED IN 1 TO MAKE ONE COLUMN SO THAT THE FREQUENCY CAN BE CALCULATED
def piechart_combo(df_name):
list1=df_name['future examination 1'].tolist()
list2=df_name['future examination 2'].tolist()
list3=df_name['future examination 3'].tolist()
finalist=list1+list2+list3
#print(finalist)
return(finalist)
#CLUBBING FUTURE ASPIRATIONS OF ALL THE STUDENTS
Data1 = piechart_combo(df)
Data1=[i for i in Data1 if str(i)!='nan']
df_data1=pd.DataFrame(Data1,
columns=['Combined'])
#CLUBBING FUTURE ASPIRATIONS OF UNQUALIFIED STUDENTS
Data2 = piechart_combo(df_unqualified)
Data2=[i for i in Data2 if str(i)!='nan']
df_data2= | pd.DataFrame(Data2,columns=['not qualified']) | pandas.DataFrame |
"""Jリーグ各節の試合情報を読み込み、CSVとして取得、保存
"""
import os
from datetime import datetime, time, timedelta
from typing import List, Set, Dict, Any
import re
from glob import glob
import argparse
import pandas as pd
from bs4 import BeautifulSoup
import requests
PREFERENCE = {}
PREFERENCE['debug'] = False
DATE_FORMAT = '%Y%m%d'
SEASON=2022
CSVFILE_FORMAT = '../docs/csv/{}_allmatch_result-J{}.csv'
TIMESTAMP_FILE = '../csv/csv_timestamp.csv'
# Jリーグ公開の各節試合情報のURL
SOURCE_URL_FORMAT = 'https://www.jleague.jp/match/section/j{}/{}/'
# Jリーグ公開の順位情報のURL
STANDING_URL_FORMAT = 'https://www.jleague.jp/standings/j{}/'
def read_teams(category: int):
"""各カテゴリのチームリストを返す
"""
_url = STANDING_URL_FORMAT.format(category)
print(f'access {_url}...')
soup = BeautifulSoup(requests.get(_url).text, 'lxml')
return read_teams_from_web(soup, category)
def read_teams_from_web(soup: BeautifulSoup, category: int) -> List[str]:
"""Jリーグの順位情報からチームリストを読み込んで返す
"""
standings = soup.find('table', class_=f'J{category}table')
if not standings:
print(f'Can\'t find J{category} teams...')
return []
td_teams = standings.find_all('td', class_='tdTeam')
return [list(_td.stripped_strings)[1] for _td in td_teams]
def read_match(category: int, sec: int) -> pd.DataFrame:
"""指定されたカテゴリの指定された1つの節をデータをWebから読み込む
"""
_url = SOURCE_URL_FORMAT.format(category, sec)
print(f'access {_url}...')
soup = BeautifulSoup(requests.get(_url).text, 'lxml')
return read_match_from_web(soup)
def read_match_from_web(soup: BeautifulSoup) -> List[Dict[str, Any]]:
"""Jリーグの各節の試合情報リストから内容を読み込んで返す
"""
result_list = []
match_sections = soup.find_all('section', class_='matchlistWrap')
_index = 1
for _section in match_sections:
match_div = _section.find('div', class_='timeStamp')
if match_div:
match_date = match_div.find('h4').text.strip()
match_date = datetime.strptime(match_date[:match_date.index('(')], '%Y年%m月%d日')
else:
match_date = None
section_no = _section.find('div', class_='leagAccTit').find('h5').text.strip()
section_no = re.search('第(.+)節', section_no)[1]
#print((match_date, section_no))
for _tr in _section.find_all('tr'):
match_dict = {}
match_dict['match_date'] = match_date
match_dict['section_no'] = int(section_no)
match_dict['match_index_in_section'] = _index
stadium_td = _tr.find('td', class_='stadium')
if not stadium_td:
continue
match_dict['start_time'] = re.search(r'([^\>]+)\<br', str(stadium_td))[1]
match_dict['stadium'] = re.search(r'([^\>]+)\<\/a', str(stadium_td))[1]
match_dict['home_team'] = _tr.find('td', class_='clubName rightside').text.strip()
match_dict['home_goal'] = _tr.find('td', class_='point rightside').text.strip()
match_dict['away_goal'] = _tr.find('td', class_='point leftside').text.strip()
match_dict['away_team'] = _tr.find('td', class_='clubName leftside').text.strip()
# str_match_date = (match_date.strftime("%Y/%m/%d") if match_date else '未定')
if PREFERENCE['debug']:
print(match_dict)
result_list.append(match_dict)
_index += 1
return result_list
def read_all_matches(category: int) -> pd.DataFrame:
"""指定されたカテゴリの全て試合をWeb経由で読み込む
"""
return read_matches_range(category)
def read_matches_range(category: int, _range: List[int]=None) -> pd.DataFrame:
"""指定されたカテゴリの指定された節リストのデータをWebから読み込む
"""
_matches = pd.DataFrame()
if not _range:
teams_count = len(read_teams(category))
if teams_count % 2 > 0:
_range = range(1, teams_count * 2 + 1)
else:
_range = range(1, (teams_count - 1) * 2 + 1)
for _i in _range:
result_list = read_match(category, _i)
_matches = pd.concat([_matches, pd.DataFrame(result_list)])
# sortしたりreset_indexした結果を変数に残さないミスは良くやる
_matches = _matches.sort_values(['section_no', 'match_index_in_section']).reset_index(drop=True)
return _matches
def get_undecided_section(all_matches: pd.DataFrame) -> Set[str]:
"""開催日未定の節を返す
"""
return set(all_matches[all_matches['match_date'].isnull()]['section_no'])
def get_match_dates_of_section(all_matches: pd.DataFrame) -> Dict[str, Set[pd.Timestamp]]:
"""各節の開催日リストを返す
開催日未定の試合は無視
"""
return all_matches.dropna(subset=['match_date']).groupby('section_no').apply(make_kickoff_time)
def make_kickoff_time(_subset: pd.DataFrame):
"""与えられた試合データから、キックオフ時間を作成し、その2時間後 (試合終了時間想定) のセットを返す
与えられる試合データは同一節のものと想定
試合開始時間未定の場合は 00:00 キックオフと考える
同一時間を複数返さないようにするためのセット化を実施
"""
start_time = _subset['start_time'].str.replace('未定', '00:00')
result = pd.to_datetime(_subset['match_date'].dt.strftime('%Y/%m/%d ') + start_time) + timedelta(hours=2)
return set(result)
def get_sections_to_update(all_matches: pd.DataFrame,
_start: pd.Timestamp, _end: pd.Timestamp) -> Set[str]:
"""startからendまでの対象期間に、試合が終了した節のセットを返す
"""
target_sec = set()
for (_sec, _dates) in get_match_dates_of_section(all_matches).items():
for _date in _dates:
# print(f'compare "{_sec}" for match on {_date}' + f' between {_start} - {_end}')
if _start <= _date <= _end:
print(f'add "{_sec}" for match on {_date}' + f' between {_start} - {_end}')
target_sec.add(_sec)
target_sec = list(target_sec)
target_sec.sort()
return target_sec
def get_latest_allmatches_filename(category: int) -> str:
"""指定されたカテゴリの最新のCSVファイル名を返す
⇒ CSVファイルは常に同一名称に変更 (最新ファイルは毎回上書き)
"""
return CSVFILE_FORMAT.format(SEASON, category)
def read_latest_allmatches_csv(category: int) -> pd.DataFrame:
"""指定されたカテゴリの最新のCSVファイルを読み込んでDataFrameで返す
該当ファイルが一つもない場合はエラー
"""
return read_allmatches_csv(get_latest_allmatches_filename(category))
def read_allmatches_csv(matches_file: str) -> pd.DataFrame:
"""read_jleague_matches.py が書き出した結果のCSVファイルを読み込んでDataFrame構造を再現
matches_file: 読み込むファイル名
"""
print('match file "' + matches_file + '" reading.')
all_matches = pd.read_csv(matches_file, index_col=0, dtype=str, na_values='')
if 'index' in all_matches.columns:
all_matches = all_matches.drop(columns=['index'])
all_matches['match_date'] = | pd.to_datetime(all_matches['match_date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
from scipy import stats
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from surprise import Reader, Dataset, SVD, evaluate
from imdbToId import converter
# Configuring database
import MySQLdb
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="udolf", # your username
password="<PASSWORD>", # your password
db="recommendme") # name of the data base
gen_md = pd.read_csv('data/gen_md.csv')
# Main recommendation part for the
class recommendMe():
def __init__(self):
pass
'''
This will return movies intially to the guest who is not logged in or haven't rated a single
movie
'''
def build_chart(genre, percentile=0.85):
movieDb = gen_md[gen_md['genre'] == genre]
vote_counts = movieDb[movieDb['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = movieDb[movieDb['vote_average'].notnull()]['vote_average'].astype('int')
C = vote_averages.mean()
m = vote_counts.quantile(percentile)
qualified = movieDb[(movieDb['vote_count'] >= m) & (movieDb['vote_count'].notnull()) & (movieDb['vote_average'].notnull())][['title','vote_count','vote_average','popularity','imdb_id']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
qualified['wr'] = qualified.apply(lambda x: (x['vote_count']/(x['vote_count']+m) * x['vote_average']) + (m/(m+x['vote_count']) * C), axis=1)
qualified = qualified.sort_values('wr', ascending=False).head(250)
return qualified.head(7)
'''
This will return movies that are top of their genre but not rated by the user
'''
def build_chartP(genre,userId, percentile=0.85):
cur = db.cursor()
result = cur.execute('SELECT * FROM ratings WHERE userId = %s',[userId])
imdbIdsRatedAlready = []
if(result > 0):
data = cur.fetchall()
for singleR in data:
imdbIdsRatedAlready.append(singleR[3])
cur.close()
print(imdbIdsRatedAlready)
movieDb = gen_md[gen_md['genre'] == genre]
vote_counts = movieDb[movieDb['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = movieDb[movieDb['vote_average'].notnull()]['vote_average'].astype('int')
C = vote_averages.mean()
m = vote_counts.quantile(percentile)
qualified = movieDb[(movieDb['vote_count'] >= m) & (movieDb['vote_count'].notnull()) & (movieDb['vote_average'].notnull())][['title','vote_count','vote_average','popularity','imdb_id']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
qualified['wr'] = qualified.apply(lambda x: (x['vote_count']/(x['vote_count']+m) * x['vote_average']) + (m/(m+x['vote_count']) * C), axis=1)
qualified = qualified.sort_values('wr', ascending=False).head(250)
qualified = qualified[~qualified.imdb_id.isin(imdbIdsRatedAlready)]
return qualified.head(8)
''' This function will take user id,5 movie and 5 rating from the database that are
recently added and add them to the rating dataset that will be used for training the model
'''
def svdRecommender(userList,movieIdList,ratingList):
# Adding the data form the user
mat = []
for i in range(len(ratingList)):
temp = []
temp.append(userList[i])
temp.append(movieIdList[i])
temp.append(ratingList[i])
mat.append(temp)
ratings_small = pd.read_csv('data/ratings_small.csv')
newData = pd.DataFrame(mat,columns = ['userId','movieId','rating'])
ratings_small = ratings_small.append(newData,ignore_index = True)
ratings_small.to_csv('ratings_small.csv',index = False)
# Getting the recommended movies after the training
movies = recommendMe.recommender(userList[0])
return movies
''' This function will take the user id and perform the svd decompostion from the rating data
and after training, the trained model will we used to recommend the rating for all the
movies for the user and we will remove the movies which are already rated by the user
'''
def recommender(user):
cur = db.cursor()
# Getting the movies already rated by the user
result = cur.execute('SELECT * FROM ratings WHERE userId = %s',[user])
imdbIdsRatedAlready = []
if(result > 0):
data = cur.fetchall()
for singleR in data:
imdbIdsRatedAlready.append(singleR[3])
cur.close()
print(imdbIdsRatedAlready)
ratings = | pd.read_csv('data/ratings_small.csv') | pandas.read_csv |
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import seaborn as sns
import gc
import calendar
import pickle
import os
from sklearn.preprocessing import StandardScaler
from os.path import join
from sklearn.metrics import confusion_matrix
from IPython.display import clear_output, Image, display, HTML
from datetime import datetime
from IPython.display import display
plt.style.use('fivethirtyeight')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.options.display.float_format = '{:.4f}'.format
def missing_data(data):
total = data.isnull().sum()
percent = (data.isnull().sum()/data.isnull().count()*100)
tt = | pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) | pandas.concat |