prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from scipy.signal import butter, lfilter, resample, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import pandas as np
import matplotlib.pyplot as plt
import scipy
import pandas as pd
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def get_name(self):
return 'img-spec-{}'.format(self.size)
def drop_zeros(self, df):
return df[(df.T != 0).any()]
def apply(self, data):
data = pd.DataFrame(data.T)
data = self.drop_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmap=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.append(im)
return channels
class UnitScale:
"""
Scale across the last axis.
"""
def get_name(self):
return 'unit-scale'
def apply(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def get_name(self):
return 'unit-scale-feat'
def apply(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the last axis.
"""
def get_name(self):
return "fft"
def apply(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
apply ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def get_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def apply(self, data):
# apply pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample:
"""
Resample time-series data.
"""
def __init__(self, sample_rate):
self.f = sample_rate
def get_name(self):
return "resample%d" % self.f
def apply(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def get_name(self):
return "mag"
def apply(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def get_name(self):
return 'lpf%d' % self.f
def apply(self, data):
nyq = self.f / 2.0
cutoff = min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# apply filter over each channel
for j in range(len(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel means
"""
def get_name(self):
return 'mean'
def apply(self, data):
axis = data.ndim - 1
return data.mean(axis=axis)
class Abs:
"""
extract channel means
"""
def get_name(self):
return 'abs'
def apply(self, data):
return np.abs(data)
class Stats:
"""
Subtract the mean, then take (min, max, standard_deviation) for each channel.
"""
def get_name(self):
return "stats"
def apply(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(len(data)):
ch_data = data[i]
ch_data = data[i] - np.mean(ch_data)
outi = out[i]
outi[0] = np.std(ch_data)
outi[1] = np.min(ch_data)
outi[2] = np.max(ch_data)
return out
class Interp:
"""
Interpolate zeros max --> min * 1.0
NOTE: try different methods later
"""
def get_name(self):
return "interp"
def apply(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return data
class Log10:
"""
Apply Log10
"""
def get_name(self):
return "log10"
def apply(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return np.log10(data)
class Slice:
"""
Take a slice of the data on the last axis.
e.g. Slice(1, 48) works like a normal python slice, that is 1-47 will be taken
"""
def __init__(self, start, end):
self.start = start
self.end = end
def get_name(self):
return "slice%d-%d" % (self.start, self.end)
def apply(self, data):
s = [slice(None), ] * data.ndim
s[-1] = slice(self.start, self.end)
return data[s]
class CorrelationMatrix:
"""
Calculate correlation coefficients matrix across all EEG channels.
"""
def get_name(self):
return 'corr-mat'
def apply(self, data):
return upper_right_triangle(np.corrcoef(data))
# Fix everything below here
class Eigenvalues:
"""
Take eigenvalues of a matrix, and sort them by magnitude in order to
make them useful as features (as they have no inherent order).
"""
def get_name(self):
return 'eigenvalues'
def apply(self, data):
w, v = np.linalg.eig(data)
w = np.absolute(w)
w.sort()
return w
class FreqCorrelation:
"""
Correlation in the frequency domain. First take FFT with (start, end) slice options,
then calculate correlation co-efficients on the FFT output, followed by calculating
eigenvalues on the correlation co-efficients matrix.
The output features are (fft, upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, start, end, scale_option, with_fft=False, with_corr=True, with_eigen=True):
self.start = start
self.end = end
self.scale_option = scale_option
self.with_fft = with_fft
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'freq-correlation-%d-%d-%s-%s%s' % (self.start, self.end, 'withfft' if self.with_fft else 'nofft',
self.scale_option, selection_str)
def apply(self, data):
data1 = FFT().apply(data)
data1 = Slice(self.start, self.end).apply(data1)
data1 = Magnitude().apply(data1)
data1 = Log10().apply(data1)
data2 = data1
if self.scale_option == 'usf':
data2 = UnitScaleFeat().apply(data2)
elif self.scale_option == 'us':
data2 = UnitScale().apply(data2)
data2 = CorrelationMatrix().apply(data2)
if self.with_eigen:
w = Eigenvalues().apply(data2)
out = []
if self.with_corr:
data2 = upper_right_triangle(data2)
out.append(data2)
if self.with_eigen:
out.append(w)
if self.with_fft:
data1 = data1.ravel()
out.append(data1)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class TimeCorrelation:
"""
Correlation in the time domain. First downsample the data, then calculate correlation co-efficients
followed by calculating eigenvalues on the correlation co-efficients matrix.
The output features are (upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, max_hz, scale_option, with_corr=True, with_eigen=True):
self.max_hz = max_hz
self.scale_option = scale_option
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'time-correlation-r%d-%s%s' % (self.max_hz, self.scale_option, selection_str)
def apply(self, data):
# so that correlation matrix calculation doesn't crash
for ch in data:
if np.alltrue(ch == 0.0):
ch[-1] += 0.00001
data1 = data
if data1.shape[1] > self.max_hz:
data1 = Resample(self.max_hz).apply(data1)
if self.scale_option == 'usf':
data1 = UnitScaleFeat().apply(data1)
elif self.scale_option == 'us':
data1 = UnitScale().apply(data1)
data1 = CorrelationMatrix().apply(data1)
if self.with_eigen:
w = Eigenvalues().apply(data1)
out = []
if self.with_corr:
data1 = upper_right_triangle(data1)
out.append(data1)
if self.with_eigen:
out.append(w)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class TimeFreqCorrelation:
"""
Combines time and frequency correlation, taking both correlation coefficients and eigenvalues.
"""
def __init__(self, start, end, max_hz, scale_option):
self.start = start
self.end = end
self.max_hz = max_hz
self.scale_option = scale_option
assert scale_option in ('us', 'usf', 'none')
def get_name(self):
return 'time-freq-correlation-%d-%d-r%d-%s' % (self.start, self.end, self.max_hz, self.scale_option)
def apply(self, data):
data1 = TimeCorrelation(self.max_hz, self.scale_option).apply(data)
data2 = FreqCorrelation(self.start, self.end,
self.scale_option).apply(data)
assert data1.ndim == data2.ndim
return np.concatenate((data1, data2), axis=data1.ndim - 1)
class FFTWithTimeFreqCorrelation:
"""
Combines FFT with time and frequency correlation, taking both correlation coefficients and eigenvalues.
"""
def __init__(self, start, end, max_hz, scale_option):
self.start = start
self.end = end
self.max_hz = max_hz
self.scale_option = scale_option
def get_name(self):
return 'fft-with-time-freq-corr-%d-%d-r%d-%s' % (self.start, self.end, self.max_hz, self.scale_option)
def apply(self, data):
data1 = TimeCorrelation(self.max_hz, self.scale_option).apply(data)
data2 = FreqCorrelation(self.start, self.end,
self.scale_option, with_fft=True).apply(data)
assert data1.ndim == data2.ndim
return np.concatenate((data1, data2), axis=data1.ndim - 1)
def upper_right_triangle(matrix):
indices = | np.triu_indices_from(matrix) | pandas.triu_indices_from |
import numpy as np
import pandas as pd
import os
def to_categorical(data, dtype=None):
val_to_cat = {}
cat = []
index = 0
for val in data:
if dtype == 'ic':
if val not in ['1', '2', '3', '4ER+', '4ER-', '5', '6', '7', '8', '9', '10']:
val = '1'
if val in ['4ER+','4ER-']:
val='4'
if val not in val_to_cat:
val_to_cat[val] = index
cat.append(index)
index += 1
else:
cat.append(val_to_cat[val])
return np.array(cat)
def get_data(data):
d = {}
clin_fold = data[["METABRIC_ID"]]
rna = data[[col for col in data if col.startswith('GE')]]
rna = normalizeRNA(rna)
cna = data[[col for col in data if col.startswith('CNA')]]
d['ic'] = list(data['iC10'].values)
d['pam50'] = list(data['Pam50Subtype'].values)
d['er'] = list(data['ER_Expr'].values)
d['pr'] = list(data['PR_Expr'].values)
d['her2'] = list(data['Her2_Expr'].values)
d['drnp'] = list(data['DR'].values)
d['rnanp'] = rna.astype(np.float32).values
d['cnanp'] = ((cna.astype(np.float32).values + 2.0) / 4.0)
d['icnp'] = to_categorical(d['ic'], dtype='ic')
d['pam50np'] = to_categorical(d['pam50'])
d['ernp'] = to_categorical(d['er'])
d['prnp'] = to_categorical(d['pr'])
d['her2np'] = to_categorical(d['her2'])
d['drnp'] = to_categorical(d['drnp'])
"""
preprocessing for clinical data to match current pipeline
"""
## Clinical Data Quick Descriptions
# clin["Age_At_Diagnosis"] # Truly numeric
# clin["Breast_Tumour_Laterality"] # Categorical "L, R" (3 unique)
# clin["NPI"] # Truly numeric
# clin["Inferred_Menopausal_State"] # Categorical "Pre, Post" (3 unique)
# clin["Lymph_Nodes_Positive"] # Ordinal ints 0-24
# clin["Grade"] # Ordinal string (come on) 1-3 + "?"
# clin["Size"] # Truly Numeric
# clin["Histological_Type"] # Categorical strings (9 unique)
# clin["Cellularity"] # Categorical strings (4 unique)
# clin["Breast_Surgery"] # Categorical strings (3 Unique)
# clin["CT"] # Categorical strings (9 unique)
# clin["HT"] # Categorical strings (9 Unique)
# clin["RT"] # Categorical strings (9 Unique)
## Clinical Data Transformations
# On the basis of the above we will keep some as numeric and others into one-hot encodings
# (I am not comfortable binning the continuous numeric columns without some basis for their bins)
# Or since we dont have that much anyway just one hot everything and use BCE Loss to train
# We have to get the entire dataset, transform them into one-hots, bins
complete_data = r"MB.csv"
# complete_data = pd.read_csv(complete_data).set_index("METABRIC_ID")
complete_data = pd.read_csv(complete_data, index_col=None, header=0)
# Either we keep numerics as
clin_numeric = complete_data[["METABRIC_ID","Age_At_Diagnosis", "NPI", "Size"]]
# Numerical binned to arbitrary ranges then one-hot dummies
metabric_id = complete_data[["METABRIC_ID"]]
aad = pd.get_dummies(pd.cut(complete_data["NPI"],10, labels=[1,2,3,4,5,6,7,8,9,10]),prefix="aad", dummy_na = True)
npi = pd.get_dummies(pd.cut(complete_data["NPI"],6, labels=[1,2,3,4,5,6]),prefix="npi", dummy_na = True)
size = pd.get_dummies(complete_data["Size"], prefix = "size", dummy_na = True)
# Categorical and ordinals to one-hot dummies
btl = | pd.get_dummies(complete_data["Breast_Tumour_Laterality"], prefix = "btl", dummy_na = True) | pandas.get_dummies |
import sys
import pandas as pd
import numpy as np
import click
sys.path.append('.')
from src.data.preprocess_input import read_tsyg_data, prepare_dataset
def gen_init_states(base, parameters, mu, sigma, num=100, sign=None):
init = base.loc[np.repeat(base.index.values, num)].reset_index(drop=True)
for i, p in enumerate(parameters):
if sign is not None:
s = sign[i]
else:
s = ''
init[p] = np.random.normal(mu[i], sigma[i], size=init.shape[0])
if p == 'PDYN':
s = 'pos'
if s == 'pos':
init[p] = init[p].apply(lambda x: abs(round(x*100)/100)+0.1)
elif s == 'neg':
init[p] = init[p].apply(lambda x: -abs(round(x*100)/100)+0.1)
else:
init[p] = init[p].apply(lambda x: round(x*100)/100)
return init
def generate_perturbed_input(data, column, mu, sigma, amount, sign):
'''Generates Tsyganenko inputfile
Generates inputfile useable by model TA15.
It varies the value in column given, by picking randomly from a gaussian distribution
The other values remain fixed.
Input:
column: name of variable to be varied
mu: mean value of the gaussian distribution
sigma: standard deviation of gaussian distribution
amount: Total number of inputs - size of the ensemble
sign: Set all random values to sign of mu if true
Output:
file named TA15_output, useable by model TA15.
'''
allowed_columns = ['PDYN', 'B0y', 'B0z', 'XIND', 'VGSEX', 'VGSEY', 'VGSEZ']
for c in column:
assert c in allowed_columns, "Error, unknown variable {}. Known variables: {}".format(c, allowed_columns)
mu = [mu]*len(column)
sigma = [sigma]*len(column)
if len(column) > 1:
deny = ['', 'yes', 'y', 'Y', 'YES', 'Yes', 'YEs', 'yes']
for i in range(1, len(column)):
ans = input('> Keep same mu and sigma for the parameter {}? (Current value mu: {}, sigma: {}) [yes]'.format(c, mu[i], sigma[i]))
if ans not in deny:
try:
nmu = input('Please add the desired value of mu for {}: '.format(c))
nmu = float(nmu)
mu[i] = nmu
nsg = input('Please add the desired value of sigma for {}: '.format(c))
nsg = float(nsg)
sigma[i] = nsg
except ValueError:
print("Error, that is not a number.")
base = pd.DataFrame(columns=allowed_columns, data=[[2.0, 1, 8, 0, -400.0, 0.0, 0.0]])
base.index.name = 'ID'
if sign:
sign = []
for m in mu:
if m < 0:
sign.append('neg')
else:
sign.append('pos')
else:
sign=None
init = gen_init_states(base, column, mu, sigma, amount, sign)
return init
def generate_perturbed_input(data, variable, mu, sigma, amount, sign):
# Perturbe a single variable
for var in variable:
assert var in list(data.columns), "Error, unknown variable {}. Known variables: {}".format(var, list(data.columns))
mu = [mu]*len(variable)
sigma = [sigma]*len(variable)
signs = [None]*len(variable)
if sign:
for i, m in enumerate(mu):
if m < 0:
signs[i] = 'neg'
else:
signs[i] = 'pos'
init = data.loc[np.repeat(data.index.values, amount)].reset_index(drop=True)
for i, var in enumerate(variable):
generate_single_variable(init, var, mu[i], sigma[i], signs[i])
return prepare_dataset(init)
def generate_single_variable(base, variable, mu, sigma, sign=None):
if sign is not None:
s = sign
else:
s = ''
if variable == 'PDYN':
s = 'pos'
base[variable] = gaussian_dist(base[variable].to_numpy()[0], mu, sigma, size=base.shape[0])
if s == 'pos':
base[variable] = base[variable].apply(lambda x: abs(round(x, 2))+0.1)
elif s == 'neg':
base[variable] = base[variable].apply(lambda x: -abs(round(x, 2))+0.1)
else:
base[variable] = base[variable].apply(lambda x: round(x, 2))
return base
@click.command()
@click.argument('variable', type=str, nargs=-1)
@click.argument('mu', type=float, default=0)
@click.argument('sigma', type=float, default=0.05)
@click.argument('amount', type=int, default=50)
@click.argument('name', type=str)
@click.option('--sign', type=bool, default=False)
def main(variable, mu, sigma, amount, sign, name):
data = read_tsyg_data()
rnge = pd.date_range('2004-05-08 09:05:00', '2004-05-08 13:05:00', freq='20min')
for i in range(12):
timestamp = str(rnge[i])
print(timestamp)
values = extract_date_data(data, timestamp)
#states = generate_perturbed_input(data, variable, mu, sigma, amount, sign)
states = generate_perturbed_input(values, variable, mu, sigma, amount, sign)
states = states.drop(columns=['Np'])
print(states.head())
if i == 9:
states.Bz += 1.9
states.to_csv('model/input/input{}{}.csv'.format(name, (i+1)))
def gaussian_dist(base, mu, sigma, maxv=-363.0, minv=-583.0, size=50, seed=4255):
np.random.seed(seed)
rand = np.random.normal(mu, sigma, size=size)
values = np.round(rand*base, 2)
# cut of values
for i, val in enumerate(values):
if val > maxv:
values[i] = maxv
if val < minv:
values[i] = minv
return values
def extract_date_data(data, timestamp):
r = | pd.Timestamp(timestamp) | pandas.Timestamp |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
from django.http import HttpResponse
import pandas as pd
import numpy as np
import tushare as ts
class DateEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o,np.ndarray):
return o.tolist()
return json.JSONEncoder.default(self,o)
def today_all(request):
data = ts.get_today_all()
data = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
| tm.assert_almost_equal(mixed, exp) | pandas.util.testing.assert_almost_equal |
import pandas as pd
media = | pd.read_excel('./../data/CCLE/CCLE_Summary.xlsx', sheet_name='Media') | pandas.read_excel |
import numpy as np
import pandas as pd
from scipy import stats
stats.norm(10.,2.).rvs()
x = np.ones(10)
x *= 2.4
df = | pd.DataFrame([1,2,3]) | pandas.DataFrame |
# Import the pandas-PACKAGE
import matplotlib.pyplot as plt
import pandas as pd
# gca stands for 'get current axis'
ax = plt.gca()
# 3D-paraboloid can be described with equation:
# (x2/a2) + (y2/a2) = z
# If the coefficient 'a' is set to 1
# then the radius at each cut will be equal to √z (square-root of z).
# Your task is to create a dictionary that stores the mapping
# from the pair of coordinates (x, y) to the z-coordinate
# The list for 'x' and the list for 'y' are given:
range_x = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]
range_y = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]
circ_paraboloid = dict()
for x in range_x:
for y in range_y:
# Calculate the value for z
# Equation: (x**2/a**2) + (y**2/a**2) = z
# Coefficient (a) = 1
# => ( x**2 / (1**2) ) + ( y**2 / (1**2) ) = z
p = x**2 / (1**2)
q = y**2 / (1**2)
z = ( p ) + ( q )
# Create a new key for the dictionary
key = (x,y)
# print("key (x,y): ", key)
# Create a new key-value pair
circ_paraboloid[key] = z
# print (circ_paraboloid.items())
# print('circ_paraboloid: ', circ_paraboloid)
# # Prints something like
# ====================================
# [
# ((0.0, 0.0), 0.0),
# ((0.0, 0.2), 0.04000000000000001),
# ....
# ]
# ====================================
# print('circ_paraboloid[(1.8, 1.4)]: ', circ_paraboloid[(1.8, 1.4)])
my_paraboloid = dict()
my_keys = []
my_keys_index = []
my_values = []
for key, value in circ_paraboloid.items():
my_keys.append(key)
my_values.append(value)
# my_paraboloid[key.index()] = value
print(len(my_keys))
print(len(my_values))
for key in my_keys:
my_keys_index.append(my_keys.index(key) + 1 )
# print(my_keys_index)
# my_paraboloid = zip(my_keys_index, my_values)
my_paraboloid= {}
# print(my_paraboloid)
# Create a dataframe CLASS object
df = | pd.DataFrame(my_paraboloid) | pandas.DataFrame |
import json
import operator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.core.indexes import base
from scipy import stats
from sklearn.metrics import auc, roc_auc_score, roc_curve
from tqdm.auto import tqdm
from data_prep import gini_weight, normalise_matrix, prepare_matrix
class NoResultsError(Exception):
pass
def convert_df_to_string(df):
"""
Takes a dataframe and converts it to a list and then to a sting separated
by ';'
"""
if len(df.columns) > 1:
df = df.T
df = df.rename({df.columns[0]: 0}, axis=1)
else:
df = df.round(decimals=3)
df_list = df[0].values.tolist()
df_str = ";".join(map(str, df_list))
return df_str
def output_data(results):
res_best = results[0]
print(res_best)
def find_rank_file(elms, correct_res, metric, reverse=True):
ranks = []
elms[metric].sort(key=operator.itemgetter("result"), reverse=reverse)
for cr in correct_res:
for i, e in enumerate(elms[metric]):
if e["elm"] == cr:
ranks.append(i)
return ranks
def find_rank_name(elms, pssm, metric, reverse=True):
ranks = []
elms[metric].sort(key=operator.itemgetter("result"), reverse=reverse)
for i, e in enumerate(elms[metric]):
if e["elm"] == pssm:
ranks.append(i)
return ranks
class MultiComparison:
def __init__(
self,
data_file_1,
data_file_2 = "",
file_name = "",
use_index = True,
correct_results_file = "",
):
self.multi_metrics = {
"pearsons": {"match": [], "mismatch": []},
"kendalls": {"match": [], "mismatch": []},
"spearmans": {"match": [], "mismatch": []},
"dots": {"match": [], "mismatch": []},
"ssds": {"match": [], "mismatch": []},
"kls": {"match": [], "mismatch": []},
}
self.best_match = {}
self.all_ranks = {}
self.all_results = {}
self.pearson_rank = []
self.kendall_rank = []
self.spearman_rank = []
self.ssd_rank = []
self.dot_rank = []
self.kl_rank = []
if use_index:
data_1_names = []
for pssm in data_file_1:
# for the dms dataset
data_1_names.append(pssm)
# for the PropPD dataset
# data_1_names.append(data_file_1[pssm]["motif"])
else:
data_1_names = [file_name]
col_names = [
"ELM",
"Quality",
"Comparison Results",
"Motif 1",
"Motif 2",
"Consensus",
"Comparison Windows",
"Gini 1",
"Gini 2",
"Similarity",
]
similarity_met_col_names = [
"Pearsons",
"Kendalls",
"Spearmans",
"SSD",
"Dot Product",
"KL",
]
self.best_df = pd.DataFrame(columns=col_names, index=data_1_names)
self.similarity_df = pd.DataFrame(
columns=similarity_met_col_names, index=data_1_names
)
if data_file_2 != "":
data_2_names = []
for pssm in data_file_2:
# for dms
data_2_names.append(pssm)
# # for ELM
# data_2_names.append(data_file_2[pssm]["motif"])
self.all_df = pd.DataFrame(columns=data_1_names, index=data_2_names)
if correct_results_file != "":
with open(correct_results_file) as crf:
self.correct_results = json.load(crf)
self.match = []
self.mismatch = []
def del_nan_values(self):
for pssm in self.all_results:
for key, value in list(self.all_results[pssm].items()):
check_float = isinstance(value, float)
if (check_float == False):
del self.all_results[pssm][key]
def create_ranks(self,correct_results_file):
ranked = {}
for pssm in self.all_results:
ranked[pssm] = rank_dict(self.all_results[pssm])
correct_res_rank = {}
if correct_results_file == "":
for pssm_correct in ranked:
if pssm_correct not in ranked[pssm_correct]:
correct_res_rank[pssm_correct] = 0
else:
correct_res_rank[pssm_correct] = ranked[pssm_correct][pssm_correct]
else:
for pssm_correct in ranked:
print(pssm_correct)
if pssm_correct not in self.correct_results:
correct_result = 0
else:
best_rank = {}
for result in self.correct_results[pssm_correct]:
if result in ranked[pssm_correct]:
best_rank[result] = ranked[pssm_correct][result]
best_rank_sorted = sorted(best_rank.items(), key=operator.itemgetter(1))
print(best_rank_sorted)
if len(best_rank_sorted) == 0:
correct_result = 0
else:
correct_result = best_rank_sorted[0][1]
correct_res_rank[pssm_correct] = correct_result
# for comp_pssm in ranked[pssm_correct]:
# if comp_pssm == correct_results_file[pssm_correct][0]:
# correct_res_rank[pssm_correct] = ranked[pssm_correct][comp_pssm]
# else:
# correct_res_rank[pssm_correct] = 0
with open("ranked.json", "w") as ranked_file:
json.dump(ranked, ranked_file)
print("correct res rank")
print(correct_res_rank)
return ranked,correct_res_rank
def add_rank_column(self, correct_res_rank):
"""
Creates a dataframe with all the correct results ranks and adds it
to the output best_result dataframe
"""
# Turn Index to a Column
self.best_df = self.best_df.reset_index()
self.best_df = self.best_df.rename(columns={"index": "DMS"})
# Add the ranks column
rank_df = pd.DataFrame([correct_res_rank])
rank_df = rank_df.T
rank_df = rank_df.rename(columns={0: "Rank"})
rank_df = rank_df.reset_index()
rank_df = rank_df.rename(columns={"index": "DMS"})
print("rank df")
print(rank_df)
self.best_df = self.best_df.merge(rank_df, on="DMS")
# self.best_df = self.best_df.join(rank_df)
# self.best_df["Correct Result Rank"] = rank_df['Rank'].values
return self.best_df
def create_top_5_file(self):
"""
Creates a json file and returns a dictionary with the top 5 results for each PSSM comparison
"""
# create a file with the top 5 results for each pssm
top_5 = {}
ranked = {}
for k, v in self.all_results.items():
ranked[k] = rank_dict(v)
res = sorted(v.items(), key=operator.itemgetter(1), reverse=True)
top_5[k] = res[:5]
with open("top_5.json", "w") as top_5_file:
json.dump(top_5, top_5_file)
return top_5
def create_file(self, correct_results_file):
"""
Final function to be called after each comparison.
Creates all the output files of the program.
"""
# turn all the nan results to 0
for pssm in self.all_results:
for key, value in list(self.all_results[pssm].items()):
if pd.isna(value):
del self.all_results[pssm][key]
print("self.all_results")
print(self.all_results)
print("self.match")
print(self.match)
print("self.correct_rank")
print(self.correct_rank)
# create a file with the top 5 results for each pssm
top_5 = self.create_top_5_file()
# check the rank of the correct result
ranked,correct_res_rank = self.create_ranks(correct_results_file)
self.best_df = self.best_df.sort_values(
by=["Comparison Results"], ascending=False
)
# Add the ranks column
self.best_df = self.add_rank_column(correct_res_rank)
with open("all_results.json", "w") as all_results_file:
json.dump(self.all_results, all_results_file)
# TODO: use filenames
self.best_df.to_csv("dms_vs_elm.csv")
if hasattr(self, "all_df"):
self.all_df.to_csv("elm-all.csv")
# if hasattr(self, "similarity_df"):
# self.similarity_df.to_csv("dms_similarity.csv")
def plot_match(self):
match_df = | pd.DataFrame(self.match, columns=["Match"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
def create_empty_df(columns, dtypes, index=None):
df = pd.DataFrame(index=index)
for c,d in zip(columns, dtypes):
df[c] = | pd.Series(dtype=d) | pandas.Series |
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from ast import literal_eval
import matplotlib.pyplot as plt
from collections import Counter
from argparse import ArgumentParser
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# PyTorch, PyTorch.Text, and Lightning-PyTorch methods
import torch
from torch import nn, optim, Tensor
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchtext.vocab import Vocab
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# TQDM for progress tracking
from tqdm import tqdm
# Custom methods
from classes.datasets import CONCISE_PREDICTOR_SET
from models.CPM import CPM_deep
# Train CPM_deep
def train_CPM_deep(TRAINING_SET,
VAL_SET,
TESTING_SET,
TUNE_IDX,
REPEAT,
FOLD,
OUTPUT_DIR,
BATCH_SIZE,
LEARNING_RATE,
LAYERS,
NEURONS,
DROPOUT,
ES_PATIENCE,
EPOCHS,
CLASS_WEIGHTS,
OUTPUT_ACTIVATION):
"""
Args:
TRAINING_SET (pd.DataFrame)
VAL_SET (pd.DataFrame)
TESTING_SET (pd.DataFrame)
TUNE_IDX (str)
REPEAT (int)
FOLD (int)
OUTPUT_DIR (str): directory to save model outputs
BATCH_SIZE (int): size of minibatches during training
LEARNING_RATE (float): Learning rate for ADAM optimizer
LAYERS (int): number of hidden layers in feed forward neural network
NEURONS (list of length layers): the number of neurons in each layer
DROPOUT (flaot): the proportion of each dense layer dropped out during training
ES_PATIENCE (int): patience during early stopping
EPOCHS (int): maximum epochs during training
CLASS_WEIGHTS (boolean): identifies whether loss should be weighted against class frequency
OUTPUT_ACTIVATION (string): 'softmax' for DeepMN or 'sigmoid' for DeepOR
"""
# Create a directory within current repeat/fold combination to store outputs of current tuning configuration
tune_model_dir = os.path.join(OUTPUT_DIR,'tune_'+TUNE_IDX)
os.makedirs(tune_model_dir,exist_ok = True)
# Create PyTorch Dataset objects
train_Dataset = CONCISE_PREDICTOR_SET(TRAINING_SET,OUTPUT_ACTIVATION)
val_Dataset = CONCISE_PREDICTOR_SET(VAL_SET,OUTPUT_ACTIVATION)
test_Dataset = CONCISE_PREDICTOR_SET(TESTING_SET,OUTPUT_ACTIVATION)
# Create PyTorch DataLoader objects
curr_train_DL = DataLoader(train_Dataset,
batch_size=int(BATCH_SIZE),
shuffle=True)
curr_val_DL = DataLoader(val_Dataset,
batch_size=len(val_Dataset),
shuffle=False)
curr_test_DL = DataLoader(test_Dataset,
batch_size=len(test_Dataset),
shuffle=False)
# Initialize current model class based on hyperparameter selections
model = CPM_deep(train_Dataset.X.shape[1],
LAYERS,
NEURONS,
DROPOUT,
OUTPUT_ACTIVATION,
LEARNING_RATE,
CLASS_WEIGHTS,
train_Dataset.y)
early_stop_callback = EarlyStopping(
monitor='val_AUROC',
patience=ES_PATIENCE,
mode='max'
)
checkpoint_callback = ModelCheckpoint(
monitor='val_AUROC',
dirpath=tune_model_dir,
filename='{epoch:02d}-{val_AUROC:.2f}',
save_top_k=1,
mode='max'
)
csv_logger = pl.loggers.CSVLogger(save_dir=OUTPUT_DIR,name='tune_'+TUNE_IDX)
trainer = pl.Trainer(logger = csv_logger,
max_epochs = EPOCHS,
enable_progress_bar = False,
enable_model_summary = False,
callbacks=[early_stop_callback,checkpoint_callback])
trainer.fit(model,curr_train_DL,curr_val_DL)
best_model = CPM_deep.load_from_checkpoint(checkpoint_callback.best_model_path)
best_model.eval()
# Save validation set probabilities
for i, (x,y) in enumerate(curr_val_DL):
yhat = best_model(x)
val_true_y = y.cpu().numpy()
if OUTPUT_ACTIVATION == 'softmax':
curr_val_probs = F.softmax(yhat.detach()).cpu().numpy()
curr_val_preds = pd.DataFrame(curr_val_probs,columns=['Pr(GOSE=1)','Pr(GOSE=2/3)','Pr(GOSE=4)','Pr(GOSE=5)','Pr(GOSE=6)','Pr(GOSE=7)','Pr(GOSE=8)'])
curr_val_preds['TrueLabel'] = val_true_y
elif OUTPUT_ACTIVATION == 'sigmoid':
curr_val_probs = F.sigmoid(yhat.detach()).cpu().numpy()
curr_val_probs = pd.DataFrame(curr_val_probs,columns=['Pr(GOSE>1)','Pr(GOSE>3)','Pr(GOSE>4)','Pr(GOSE>5)','Pr(GOSE>6)','Pr(GOSE>7)'])
curr_val_labels = pd.DataFrame(val_true_y,columns=['GOSE>1','GOSE>3','GOSE>4','GOSE>5','GOSE>6','GOSE>7'])
curr_val_preds = pd.concat([curr_val_probs,curr_val_labels],axis = 1)
else:
raise ValueError("Invalid output layer type. Must be 'softmax' or 'sigmoid'")
curr_val_preds.insert(loc=0, column='GUPI', value=VAL_SET.GUPI.values)
curr_val_preds['TUNE_IDX'] = TUNE_IDX
curr_val_preds.to_csv(os.path.join(tune_model_dir,'val_predictions.csv'),index=False)
best_model.eval()
# Save testing set probabilities
for i, (x,y) in enumerate(curr_test_DL):
yhat = best_model(x)
test_true_y = y.cpu().numpy()
if OUTPUT_ACTIVATION == 'softmax':
curr_test_probs = F.softmax(yhat.detach()).cpu().numpy()
curr_test_preds = pd.DataFrame(curr_test_probs,columns=['Pr(GOSE=1)','Pr(GOSE=2/3)','Pr(GOSE=4)','Pr(GOSE=5)','Pr(GOSE=6)','Pr(GOSE=7)','Pr(GOSE=8)'])
curr_test_preds['TrueLabel'] = test_true_y
elif OUTPUT_ACTIVATION == 'sigmoid':
curr_test_probs = F.sigmoid(yhat.detach()).cpu().numpy()
curr_test_probs = pd.DataFrame(curr_test_probs,columns=['Pr(GOSE>1)','Pr(GOSE>3)','Pr(GOSE>4)','Pr(GOSE>5)','Pr(GOSE>6)','Pr(GOSE>7)'])
curr_test_labels = pd.DataFrame(test_true_y,columns=['GOSE>1','GOSE>3','GOSE>4','GOSE>5','GOSE>6','GOSE>7'])
curr_test_preds = pd.concat([curr_test_probs,curr_test_labels],axis = 1)
else:
raise ValueError("Invalid output layer type. Must be 'softmax' or 'sigmoid'")
curr_test_preds.insert(loc=0, column='GUPI', value=TESTING_SET.GUPI.values)
curr_test_preds['TUNE_IDX'] = TUNE_IDX
curr_test_preds.to_csv(os.path.join(tune_model_dir,'test_predictions.csv'),index=False)
# Functions to collect validation metrics from files in parallel
def collect_val_metrics(
csv_file_list,
n_cores,
progress_bar=True,
progress_bar_desc=''):
# Establish sizes of files for each core
sizes = [len(csv_file_list) // n_cores for _ in range(n_cores)]
sizes[:(len(csv_file_list) - sum(sizes))] = [val+1 for val in sizes[:(len(csv_file_list) - sum(sizes))]]
end_indices = np.cumsum(sizes)
start_indices = np.insert(end_indices[:-1],0,0)
# Build arguments for metric sub-functions
arg_iterable = [(
csv_file_list[start_indices[idx]:end_indices[idx]],
progress_bar,
progress_bar_desc)
for idx in range(len(start_indices))]
# Run metric sub-function in parallel
with multiprocessing.Pool(n_cores) as pool:
result = pool.starmap(_val_metric_par, arg_iterable)
return pd.concat(result, ignore_index=True).sort_values(by=['fold','TUNE_IDX']).reset_index(drop=True)
def _val_metric_par(sub_csv_file_list,
progress_bar=True,
progress_bar_desc=''):
if progress_bar:
iterator = tqdm(range(len(sub_csv_file_list)), desc=progress_bar_desc)
else:
iterator = range(len(sub_csv_file_list))
return pd.concat([val_metric_extraction(sub_csv_file_list[sub_file_idx]) for sub_file_idx in iterator], ignore_index=True)
def val_metric_extraction(chosen_file):
curr_metric_df = pd.read_csv(chosen_file)
curr_metric_df = curr_metric_df.groupby(['epoch','step'],as_index=False)[curr_metric_df.columns[~curr_metric_df.columns.isin(['epoch', 'step'])]].max()
curr_metric_df['TUNE_IDX'] = re.search('/tune_(.*)/version_', chosen_file).group(1)
curr_metric_df['repeat'] = int(re.search('/repeat(.*)/fold', chosen_file).group(1))
curr_metric_df['fold'] = int(re.search('/fold(.*)/tune', chosen_file).group(1))
return(curr_metric_df)
### SURFACE FUNCTION
def generate_resamples(
pred_df,
output_activation,
num_resamples,
n_cores,
progress_bar=True,
progress_bar_desc=''):
# Establish number of resamples per each core
sizes = [num_resamples // n_cores for _ in range(n_cores)]
sizes[:(num_resamples - sum(sizes))] = [val+1 for val in sizes[:(num_resamples - sum(sizes))]]
# Build arguments for metric sub-functions
arg_iterable = [(pred_df,
output_activation,
s,
progress_bar,
progress_bar_desc)
for s in sizes]
# Run metric sub-function in parallel
with multiprocessing.Pool(n_cores) as pool:
result = pool.starmap(_bs_rs_generator, arg_iterable)
# Add a core index to each resample dataframe
for i in range(len(result)):
curr_df = result[i]
curr_df['core_idx'] = i
result[i] = curr_df
result = pd.concat(result)
# From unique combinations of core_idx and core_sub_idx, assign resample IDs
rs_combos = result[['core_sub_idx','core_idx']].drop_duplicates(ignore_index = True)
rs_combos = rs_combos.sort_values(by=['core_idx','core_sub_idx']).reset_index(drop=True)
rs_combos['rs_idx'] = [i for i in range(rs_combos.shape[0])]
# Merge resample IDs
result = pd.merge(result,rs_combos,how='left',on=['core_idx','core_sub_idx'])
return result
## SUB-SURFACE and ACTIVE FUNCTION
def _bs_rs_generator(
pred_df,
output_activation,
size,
progress_bar,
progress_bar_desc):
# Create random generator instance
rs = np.random.RandomState()
if output_activation == 'softmax':
curr_outcomes = pred_df[['GUPI','TrueLabel']].drop_duplicates(ignore_index = True)
elif output_activation == 'sigmoid':
label_col = [col for col in pred_df if col.startswith('GOSE>')]
label_col.insert(0,'GUPI')
curr_outcomes = pred_df[label_col].drop_duplicates(ignore_index = True)
curr_outcomes['TrueLabel'] = curr_outcomes[[col for col in curr_outcomes if col.startswith('GOSE>')]].sum(axis = 1)
curr_outcomes = curr_outcomes.drop(columns = [col for col in curr_outcomes if col.startswith('GOSE>')])
else:
raise ValueError("Invalid output layer type. Must be 'softmax' or 'sigmoid'")
if progress_bar:
iterator = tqdm(range(size), desc=progress_bar_desc)
else:
iterator = range(size)
bs_subcore_resamples = []
for i in iterator:
curr_bs_rs = resample(curr_outcomes,replace=True, random_state = rs, stratify = curr_outcomes.TrueLabel).drop_duplicates(ignore_index = True)
curr_bs_rs['core_sub_idx'] = i
bs_subcore_resamples.append(curr_bs_rs)
return pd.concat(bs_subcore_resamples)
# Function to bootstrap AUROC on optimal validation configuration
def bootstrap_opt_val_metric(
opt_preds_df,
output_activation,
bs_resamples,
n_cores,
progress_bar=True,
progress_bar_desc=''):
# Establish number of resamples for each core
sizes = [len(bs_resamples.rs_idx.unique()) // n_cores for _ in range(n_cores)]
sizes[:(len(bs_resamples.rs_idx.unique()) - sum(sizes))] = [val+1 for val in sizes[:(len(bs_resamples.rs_idx.unique()) - sum(sizes))]]
end_indices = np.cumsum(sizes)
start_indices = np.insert(end_indices[:-1],0,0)
# Build arguments for metric sub-functions
arg_iterable = [(
opt_preds_df,
output_activation,
bs_resamples,
bs_resamples.rs_idx.unique()[start_indices[idx]:end_indices[idx]],
progress_bar,
progress_bar_desc)
for idx in range(len(start_indices))]
# Run metric sub-function in parallel
with multiprocessing.Pool(n_cores) as pool:
result = pool.starmap(_opt_val_bs_par, arg_iterable)
return pd.concat(result)
### SUB-SURFACE AND ACTIVE FUNCTOINS:
def _opt_val_bs_par(
opt_preds_df,
output_activation,
bs_resamples,
curr_resamples,
progress_bar,
progress_bar_desc):
# Initiate dataframe to store AUROCs from current core
curr_rs_test_results = pd.DataFrame(columns=['TUNE_IDX','RESAMPLE_IDX','opt_AUROC'])
if progress_bar:
iterator = tqdm(curr_resamples, desc=progress_bar_desc)
else:
iterator = curr_resamples
# Iterate through assigned resampling indices
for curr_rs_idx in iterator:
# Extract current bootstrapping resamples
curr_bs_rs = bs_resamples[bs_resamples.rs_idx == curr_rs_idx]
# Extract current optimal configuration in-sample predictions
curr_rs_preds = opt_preds_df[opt_preds_df.GUPI.isin(curr_bs_rs.GUPI)]
if output_activation == 'softmax':
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE')]
curr_config_auroc = roc_auc_score(curr_rs_preds.TrueLabel.values, curr_rs_preds[prob_cols].values, multi_class='ovo')
elif output_activation == 'sigmoid':
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE')]
label_cols = [col for col in curr_rs_preds if col.startswith('GOSE>')]
curr_config_auroc = roc_auc_score(curr_rs_preds[label_cols].values, curr_rs_preds[prob_cols].values, multi_class='macro')
curr_rs_test_results = curr_rs_test_results.append(pd.DataFrame({'TUNE_IDX':opt_preds_df.TUNE_IDX.unique()[0],
'RESAMPLE_IDX':curr_rs_idx,
'opt_AUROC':curr_config_auroc},index=[0]),ignore_index=True)
return curr_rs_test_results
# Calculate trial configuration performance on bootstrap resamples in parallel
def bootstrap_val_metric(
trial_configurations,
output_activation,
bs_resamples,
val_pred_file_info_df,
repeat,
n_cores,
split_by_tune_idx,
progress_bar=True,
progress_bar_desc=''):
if split_by_tune_idx:
# Establish sizes of files for each core
sizes = [len(trial_configurations) // n_cores for _ in range(n_cores)]
sizes[:(len(trial_configurations) - sum(sizes))] = [val+1 for val in sizes[:(len(trial_configurations) - sum(sizes))]]
end_indices = np.cumsum(sizes)
start_indices = np.insert(end_indices[:-1],0,0)
# Build arguments for metric sub-functions
arg_iterable = [(
trial_configurations[start_indices[idx]:end_indices[idx]],
output_activation,
bs_resamples,
val_pred_file_info_df,
repeat,
progress_bar,
progress_bar_desc)
for idx in range(len(start_indices))]
else:
# Establish sizes of resamples for each core
sizes = [len(bs_resamples.rs_idx.unique()) // n_cores for _ in range(n_cores)]
sizes[:(len(bs_resamples.rs_idx.unique()) - sum(sizes))] = [val+1 for val in sizes[:(len(bs_resamples.rs_idx.unique()) - sum(sizes))]]
end_indices = np.cumsum(sizes)
start_indices = np.insert(end_indices[:-1],0,0)
# Build arguments for metric sub-functions
arg_iterable = [(
trial_configurations,
output_activation,
bs_resamples[bs_resamples.rs_idx.isin([i for i in range(start_indices[idx],end_indices[idx])])],
val_pred_file_info_df,
repeat,
progress_bar,
progress_bar_desc)
for idx in range(len(start_indices))]
# Run metric sub-function in parallel
with multiprocessing.Pool(n_cores) as pool:
result = pool.starmap(_val_bs_par, arg_iterable)
return pd.concat(result, ignore_index=True).sort_values(by=['TUNE_IDX','RESAMPLE_IDX']).reset_index(drop=True)
### SUB-SURFACE AND ACTIVE FUNCTOINS:
def _val_bs_par(
curr_trial_configs,
output_activation,
bs_resamples,
val_pred_file_info_df,
repeat,
progress_bar,
progress_bar_desc):
# Initiate dataframe to store AUROCs from current core
curr_rs_test_results = pd.DataFrame(columns=['TUNE_IDX','RESAMPLE_IDX','trial_AUROC'])
if progress_bar:
iterator = tqdm(curr_trial_configs.TUNE_IDX.values, desc=progress_bar_desc)
else:
iterator = curr_trial_configs.TUNE_IDX.values
# Iterate through trial configurations
for curr_trial_tune_idx in iterator:
# Find available validation prediction files for current under-trial configuration
curr_trial_candidate_dirs = val_pred_file_info_df.file[(val_pred_file_info_df.TUNE_IDX == curr_trial_tune_idx) & (val_pred_file_info_df.repeat <= repeat)].values
curr_trial_candidate_dirs = [curr_cand_dir for curr_cand_dir in curr_trial_candidate_dirs if os.path.isfile(curr_cand_dir)]
# Load and concatenate validation predictions for current trial tuning index
curr_trial_val_preds_df = pd.concat([pd.read_csv(curr_dir) for curr_dir in curr_trial_candidate_dirs])
curr_trial_val_preds_df['TUNE_IDX'] = curr_trial_tune_idx
# Iterate through bootstrapping resamples and calculate AUROC for current trial configuration tuning index
for curr_rs_idx in bs_resamples.rs_idx.unique():
# Extract current bootstrapping resamples
curr_bs_rs = bs_resamples[bs_resamples.rs_idx == curr_rs_idx]
# Extract current trial configuration in-sample predictions
curr_rs_preds = curr_trial_val_preds_df[curr_trial_val_preds_df.GUPI.isin(curr_bs_rs.GUPI)]
if output_activation == 'softmax':
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE')]
curr_config_auroc = roc_auc_score(curr_rs_preds[curr_rs_preds.TUNE_IDX == curr_trial_tune_idx].TrueLabel.values, curr_rs_preds[curr_rs_preds.TUNE_IDX == curr_trial_tune_idx][prob_cols].values, multi_class='ovo')
elif output_activation == 'sigmoid':
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE')]
label_cols = [col for col in curr_rs_preds if col.startswith('GOSE>')]
curr_config_auroc = roc_auc_score(curr_rs_preds[curr_rs_preds.TUNE_IDX == curr_trial_tune_idx][label_cols].values, curr_rs_preds[curr_rs_preds.TUNE_IDX == curr_trial_tune_idx][prob_cols].values, multi_class='macro')
curr_rs_test_results = curr_rs_test_results.append(pd.DataFrame({'TUNE_IDX':curr_trial_tune_idx,
'RESAMPLE_IDX':curr_rs_idx,
'trial_AUROC':curr_config_auroc},index=[0]),ignore_index=True)
return curr_rs_test_results
# Functions for Bootstrap Bias Corrected with Dropping CV (BBCD-CV) doi: 10.1007/s10994-018-5714-4
def interrepeat_dropout(
REPEAT,
MODEL_DIR,
validation_perf,
tuning_grid,
grouping_vars,
num_resamples,
num_cores,
save_perf_metrics,
progress_bars
):
# Find all validation prediction files within the current model directory
val_pred_files = []
for path in Path(MODEL_DIR).rglob('*val_predictions.csv'):
val_pred_files.append(str(path.resolve()))
val_pred_file_info_df = pd.DataFrame({'file':val_pred_files,
'TUNE_IDX':[re.search('tune_(.*)/', curr_file).group(1) for curr_file in val_pred_files],
'VERSION':[re.search('IMPACT_model_outputs/v(.*)/repeat', curr_file).group(1) for curr_file in val_pred_files],
'repeat':[int(re.search('/repeat(.*)/fold', curr_file).group(1)) for curr_file in val_pred_files],
'fold':[int(re.search('/fold(.*)/tune_', curr_file).group(1)) for curr_file in val_pred_files]
}).sort_values(by=['repeat','fold','TUNE_IDX','VERSION']).reset_index(drop=True)
# Filter only validation prediction files available to current repeat and viable tuning indices
val_pred_file_info_df = val_pred_file_info_df[val_pred_file_info_df.repeat <= REPEAT]
val_pred_file_info_df = val_pred_file_info_df[val_pred_file_info_df.TUNE_IDX.isin(tuning_grid.TUNE_IDX)]
# Filter only viable tuning indices
validation_perf = validation_perf[validation_perf.TUNE_IDX.isin(tuning_grid.TUNE_IDX)]
max_validation_perf = validation_perf.groupby(['TUNE_IDX','repeat','fold'],as_index=False)['val_AUROC'].max()
max_validation_perf = pd.merge(max_validation_perf, tuning_grid, how="left", on=['TUNE_IDX'])
if save_perf_metrics:
max_validation_perf.to_csv(os.path.join(MODEL_DIR,'repeat'+str(REPEAT).zfill(2),'validation_performance.csv'),index=False)
# Group by tuning index and average validation AUROC
across_cv_perf = max_validation_perf.groupby(max_validation_perf.columns[~max_validation_perf.columns.isin(['repeat','fold','val_AUROC'])].values.tolist(),as_index=False)['val_AUROC'].mean()
opt_tune_idx = across_cv_perf[across_cv_perf.groupby(grouping_vars)['val_AUROC'].transform(max) == across_cv_perf['val_AUROC']].reset_index(drop=True)
# Non-ideal tuning configurations under trial
trial_configs = across_cv_perf[~across_cv_perf.TUNE_IDX.isin(opt_tune_idx.TUNE_IDX)]
# Initialize empty list to store droppped configurations
dropped_idx = []
# Iterate through each unique combination of the grouping variables
for combo_idx in range(opt_tune_idx.shape[0]):
# Acquire current output activation from optimal configuration dataframe
curr_output = opt_tune_idx.OUTPUT_ACTIVATION.values[combo_idx]
# Acquire current optimal configuration tuning index
curr_opt_tune_idx = opt_tune_idx.TUNE_IDX.values[combo_idx]
# Set current progress bar label
pb_label = ', '.join([name+': '+str(opt_tune_idx[name].values[combo_idx]) for name in grouping_vars])
# Find available validation prediction files for current optimal tuning index
curr_opt_candidate_dirs = val_pred_file_info_df.file[(val_pred_file_info_df.TUNE_IDX == curr_opt_tune_idx) & (val_pred_file_info_df.repeat <= REPEAT)].values
curr_opt_candidate_dirs = [curr_cand_dir for curr_cand_dir in curr_opt_candidate_dirs if os.path.isfile(curr_cand_dir)]
# Compile current optimal configuration validation predictions
curr_opt_val_preds_df = pd.concat([pd.read_csv(curr_cand_dir) for curr_cand_dir in curr_opt_candidate_dirs])
# Filter out under-trial configurations that match the current grouping combination
curr_trial_configs = pd.merge(trial_configs,opt_tune_idx[opt_tune_idx.TUNE_IDX == curr_opt_tune_idx][grouping_vars],how='inner',on=grouping_vars).reset_index(drop=True)
# Generate bootstrapping resamples
bs_resamples = generate_resamples(curr_opt_val_preds_df,
curr_output,
num_resamples,
num_cores,
progress_bar=progress_bars,
progress_bar_desc='Generating resamples '+pb_label)
# Calculate optimal configuration performance on generated resamples
opt_bs_AUC = bootstrap_opt_val_metric(curr_opt_val_preds_df,
curr_output,
bs_resamples,
num_cores,
progress_bar=progress_bars,
progress_bar_desc='Bootstrapping optimal config AUC '+pb_label)
bootstrapped_AUC = bootstrap_val_metric(curr_trial_configs,
curr_output,
bs_resamples,
val_pred_file_info_df,
REPEAT,
num_cores,
len(curr_trial_configs) >= num_cores,
progress_bars,
'Bootstrapping AUC '+pb_label)
bootstrapped_AUC = pd.merge(bootstrapped_AUC,opt_bs_AUC[['RESAMPLE_IDX','opt_AUROC']],on='RESAMPLE_IDX',how='left')
bootstrapped_AUC['trial_win'] = bootstrapped_AUC['trial_AUROC'] >= bootstrapped_AUC['opt_AUROC']
bootstrap_pvals = bootstrapped_AUC.groupby(['TUNE_IDX'],as_index=False)['trial_win'].agg(['sum','size']).reset_index()
bootstrap_pvals['pval'] = bootstrap_pvals['sum']/bootstrap_pvals['size']
dropped_idx.append(bootstrap_pvals[bootstrap_pvals['pval'] < 0.01].TUNE_IDX.values)
print('Tuning indices droppped from '+pb_label+':')
print(bootstrap_pvals[bootstrap_pvals['pval'] < 0.01].TUNE_IDX.values)
# Concatenate arrays of dropped indices
dropped_tune_idx = np.concatenate(dropped_idx)
viable_tuning_grid = tuning_grid[~tuning_grid.TUNE_IDX.isin(dropped_tune_idx)]
return (dropped_tune_idx,viable_tuning_grid)
def calc_orc(pred_file_info, progress_bar = True, progress_bar_desc = ''):
if progress_bar:
iterator = tqdm(range(pred_file_info.shape[0]),desc=progress_bar_desc)
else:
iterator = range(pred_file_info.shape[0])
compiled_orc = []
for idx in iterator:
curr_file = pred_file_info.file[idx]
curr_output = pred_file_info.OUTPUT_ACTIVATION[idx]
curr_preds = pd.read_csv(curr_file)
prob_cols = [col for col in curr_preds if col.startswith('Pr(GOSE')]
if curr_output == 'softmax':
aucs = []
for ix, (a, b) in enumerate(itertools.combinations(np.sort(curr_preds.TrueLabel.unique()), 2)):
filt_preds = curr_preds[curr_preds.TrueLabel.isin([a,b])].reset_index(drop=True)
filt_preds['ConditProb'] = filt_preds[prob_cols[b]]/(filt_preds[prob_cols[a]] + filt_preds[prob_cols[b]])
filt_preds['ConditProb'] = np.nan_to_num(filt_preds['ConditProb'],nan=.5,posinf=1,neginf=0)
filt_preds['ConditLabel'] = (filt_preds.TrueLabel == b).astype(int)
aucs.append(roc_auc_score(filt_preds['ConditLabel'],filt_preds['ConditProb']))
curr_orc = np.mean(aucs)
elif curr_output == 'sigmoid':
label_cols = [col for col in curr_preds if col.startswith('GOSE>')]
curr_train_probs = curr_preds[prob_cols].values
train_probs = np.empty([curr_train_probs.shape[0], curr_train_probs.shape[1]+1])
train_probs[:,0] = 1 - curr_train_probs[:,0]
train_probs[:,-1] = curr_train_probs[:,-1]
for col_idx in range(1,(curr_train_probs.shape[1])):
train_probs[:,col_idx] = curr_train_probs[:,col_idx-1] - curr_train_probs[:,col_idx]
train_labels = curr_preds[label_cols].values.sum(1).astype(int)
aucs = []
for ix, (a, b) in enumerate(itertools.combinations(np.sort(np.unique(train_labels)), 2)):
a_mask = train_labels == a
b_mask = train_labels == b
ab_mask = np.logical_or(a_mask,b_mask)
condit_probs = train_probs[ab_mask,b]/(train_probs[ab_mask,a]+train_probs[ab_mask,b])
condit_probs = np.nan_to_num(condit_probs,nan=.5,posinf=1,neginf=0)
condit_labels = b_mask[ab_mask].astype(int)
aucs.append(roc_auc_score(condit_labels,condit_probs))
curr_orc = np.mean(aucs)
curr_info_row = pred_file_info[pred_file_info.file == curr_file].reset_index(drop=True)
curr_info_row['val_ORC'] = curr_orc
compiled_orc.append(curr_info_row)
return pd.concat(compiled_orc,ignore_index = True)
def bs_dropout_auroc(bs_combos, val_file_info, bs_rs_GUPIs, curr_output, progress_bar = True, progress_bar_desc = ''):
compiled_auroc = []
curr_tis = bs_combos.TUNE_IDX.unique()
if progress_bar:
iterator = tqdm(curr_tis,desc=progress_bar_desc)
else:
iterator = curr_tis
for ti in curr_tis:
ti_preds = pd.concat([pd.read_csv(curr_file) for curr_file in val_file_info.file[val_file_info.TUNE_IDX == ti].values],ignore_index=True)
curr_ti_combos = bs_combos[bs_combos.TUNE_IDX == ti]
for curr_rs_index in curr_ti_combos.RESAMPLE:
curr_rs_GUPIs = bs_rs_GUPIs[curr_rs_index - 1]
curr_in_sample_preds = ti_preds[ti_preds.GUPI.isin(curr_rs_GUPIs)].reset_index(drop=True)
prob_cols = [col for col in curr_in_sample_preds if col.startswith('Pr(GOSE')]
if curr_output == 'softmax':
curr_auroc = roc_auc_score(curr_in_sample_preds.TrueLabel.values, curr_in_sample_preds[prob_cols].values, multi_class='ovo')
elif curr_output == 'sigmoid':
label_cols = [col for col in curr_in_sample_preds if col.startswith('GOSE>')]
curr_auroc = roc_auc_score(curr_in_sample_preds[label_cols].values, curr_in_sample_preds[prob_cols].values, multi_class='macro')
compiled_auroc.append(pd.DataFrame({'TUNE_IDX':ti,'RESAMPLE':curr_rs_index,'val_AUROC':curr_auroc},index=[0]))
return pd.concat(compiled_auroc,ignore_index = True)
def collate_batch(batch):
(gupis, idx_list, y_list, pt_offsets) = ([], [], [], [0])
for (curr_GUPI, curr_Indices, curr_y) in batch:
gupis.append(curr_GUPI)
idx_list.append(torch.tensor(curr_Indices,dtype=torch.int64))
y_list.append(curr_y)
pt_offsets.append(len(curr_Indices))
idx_list = torch.cat(idx_list)
y_list = torch.tensor(y_list, dtype=torch.int64)
pt_offsets = torch.tensor(pt_offsets[:-1]).cumsum(dim=0)
return (gupis, idx_list, y_list, pt_offsets)
def collect_agg_weights(model_ckpt_info, progress_bar = True, progress_bar_desc = ''):
if progress_bar:
iterator = tqdm(range(model_ckpt_info.shape[0]),desc=progress_bar_desc)
else:
iterator = range(model_ckpt_info.shape[0])
compiled_df = []
for i in iterator:
# Extract current checkpoint information
curr_file = model_ckpt_info.file[i]
curr_TUNE_IDX = model_ckpt_info.TUNE_IDX[i]
curr_repeat = model_ckpt_info.repeat[i]
curr_fold = model_ckpt_info.fold[i]
# Load current token dictionary
curr_vocab = cp.load(open('/home/sb2406/rds/hpc-work/CENTER-TBI_tokens/repeat'+str(curr_repeat).zfill(2)+'/fold'+str(curr_fold)+'/token_dictionary.pkl',"rb"))
# Load current model weights from checkpoint file
model = deepCENTERTBI.load_from_checkpoint(curr_file)
model.eval()
with torch.no_grad():
#curr_embedX = model.embedX.weight.numpy()
curr_embedW = np.exp(model.embedW.weight.numpy())
# Get validation performance of current tuning index, repeat, and fold
curr_val_ORC = val_performance[(val_performance.TUNE_IDX==curr_TUNE_IDX)&(val_performance.repeat==curr_repeat)&(val_performance.fold==curr_fold)].val_AUROC.values[0]
compiled_df.append(pd.DataFrame({'TUNE_IDX':curr_TUNE_IDX, 'Token':curr_vocab.get_itos(),'AggWeight':curr_embedW.reshape(-1), 'repeat':curr_repeat, 'fold':curr_fold, 'val_ORC':curr_val_ORC}))
return pd.concat(compiled_df,ignore_index=True)
def format_shap(shap_matrix,idx,token_labels,testing_set):
shap_df = pd.DataFrame(shap_matrix,columns=token_labels)
shap_df['GUPI'] = testing_set.GUPI
shap_df = shap_df.melt(id_vars = 'GUPI', var_name = 'Token', value_name = 'SHAP')
shap_df['label'] = idx
return shap_df
def collect_shap_values(shap_info_df, model_dir, progress_bar = True, progress_bar_desc = ''):
#shap_dfs = []
if progress_bar:
iterator = tqdm(range(shap_info_df.shape[0]),desc=progress_bar_desc)
else:
iterator = range(shap_info_df.shape[0])
for i in iterator:
# Extract current file, repeat, and fold information
curr_file = shap_info_df.file[i]
curr_repeat = shap_info_df.repeat[i]
curr_fold = shap_info_df.fold[i]
curr_output_type = shap_info_df.output_type[i]
# Define current fold directory based on current information
tune_dir = os.path.join(model_dir,'repeat'+str(curr_repeat).zfill(2),'fold'+str(curr_fold),'tune_0008')
# Load current token dictionary
curr_vocab = cp.load(open('/home/sb2406/rds/hpc-work/CENTER-TBI_tokens/repeat'+str(curr_repeat).zfill(2)+'/fold'+str(curr_fold)+'/token_dictionary.pkl',"rb"))
# Extract current testing set for current repeat and fold combination
testing_set = pd.read_pickle('/home/sb2406/rds/hpc-work/CENTER-TBI_tokens/repeat'+str(curr_repeat).zfill(2)+'/fold'+str(curr_fold)+'/testing_indices.pkl')
testing_set['seq_len'] = testing_set.Index.apply(len)
testing_set['unknowns'] = testing_set.Index.apply(lambda x: x.count(0))
# Number of columns to add
cols_to_add = testing_set['unknowns'].max() - 1
# Initialize empty dataframe for multihot encoding of testing set
multihot_matrix = np.zeros([testing_set.shape[0],len(curr_vocab)+cols_to_add])
# Encode testing set into multihot encoded matrix
for i in range(testing_set.shape[0]):
curr_indices = np.array(testing_set.Index[i])
if sum(curr_indices == 0) > 1:
zero_indices = np.where(curr_indices == 0)[0]
curr_indices[zero_indices[1:]] = [len(curr_vocab) + i for i in range(sum(curr_indices == 0)-1)]
multihot_matrix[i,curr_indices] = 1
# Define token labels
token_labels = curr_vocab.get_itos() + [curr_vocab.get_itos()[0]+'_'+str(i+1).zfill(3) for i in range(cols_to_add)]
token_labels[0] = token_labels[0]+'_000'
# Load current shap value matrix
shap_values = cp.load(open(os.path.join(tune_dir,'shap_arrays_'+curr_output_type+'.pkl'),"rb"))
# Convert each SHAP matrix to formatted dataframe and concatenate across labels
shap_df = pd.concat([format_shap(curr_matrix,idx,token_labels,testing_set) for idx,curr_matrix in enumerate(shap_values)],ignore_index=True)
shap_df['repeat'] = curr_repeat
shap_df['fold'] = curr_fold
# Convert multihot encoded matrix into formatted dataframe for token indicators
indicator_df = | pd.DataFrame(multihot_matrix,columns=token_labels) | pandas.DataFrame |
import json
from typing import List, Dict
from lppinstru.discovery import Discovery, c_int, trigsrcAnalogOut1
import time, datetime
import zmq, math
import sys, traceback
import functools
import numpy as np
import pandas as pds
import peakutils
import signal,atexit
from threading import Thread
from juice_scm_gse.analysis import noise,fft
from juice_scm_gse import config as cfg
from juice_scm_gse.utils import mkdir
from juice_scm_gse.utils import Q_,ureg
import logging as log
commands = {}
class DiscoCommand:
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
commands[func.__name__] = func #Creat a dictionnary of the commands with their names as keys
def make_cmd(self, channel, **kwargs):
payload = {
"CMD": self.func.__name__,
"channel": channel,
"args": kwargs
}
log.info(f"Build cmd with {payload}")
return json.dumps(payload)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
class Disco_Driver(Discovery):
def __init__(self, card=-1):
super().__init__(card=card)
self.digital_io_output_enable(0x0001)
def turn_on(self): #to turn on & off the asic
self.digital_io = 1
def turn_off(self):
self.digital_io = 0
def set_dc_output(disco: Disco_Driver, dc_value):
disco.analog_out_gen(shape='DC', channel=0, offset=dc_value)
time.sleep(.2)
res = disco.analog_in_read(ch1=False, ch2=True, frequency=disco.max_sampling_buffer * 4,
samplesCount=disco.max_sampling_buffer, ch1range=10.)
return np.median(res[0][0])
def remove_offset(disco: Disco_Driver):
v_min, v_max = 2.35, 2.65
offset1 = set_dc_output(disco,v_min)
offset2 = set_dc_output(disco,v_max)
a = (offset2-offset1)/(v_max-v_min)
b = offset2 - (a*v_max)
command = max(min(-b/a,5.),-5.)
offset = set_dc_output(disco, command)
log.info(f"Minimized offset to {offset}V with {command}V")
return command
@DiscoCommand
def do_psd(disco: Disco_Driver,progress_func, psd_output_dir, psd_snapshots_count=10, psd_sampling_freq=[100000], **kwargs):
mkdir(psd_output_dir)
snapshot_asic_out = []
sampling_freq_chx = int()
remove_offset(disco)
progress_func("psd",0.,"", 0.)
for f in psd_sampling_freq:
for step in range(psd_snapshots_count):
progress_func("psd",0.,f"Current snapshot {step}/{psd_snapshots_count}",float(step)/float(psd_snapshots_count))
res = disco.analog_in_read(ch1=False, ch2=True, frequency=f,
samplesCount=disco.max_sampling_buffer)
sampling_freq_chx = res[1]
snapshot_asic_out.append(res[0][0])
progress_func("psd analysis", 0.,"", 0.)
for i in range(len(snapshot_asic_out)):
progress_func("psd analysis", 0., f"Saving snapshot {i}/{len(snapshot_asic_out)}", float(i)/float(len(snapshot_asic_out)))
np.savetxt(psd_output_dir + f"/snapshot_psd_{f}Hz_{i}_asic_out.csv.gz", snapshot_asic_out[i])
freq_ch1, psd = noise.psd(snapshot_asic_out, sampling_freq_chx, window=True, removeMean=True)
progress_func("psd analysis", 0.,"", 1.)
df = pds.DataFrame(data={"PSD_ASIC_OUTPUT": psd}, index=freq_ch1)
df.to_csv(psd_output_dir + f"/psd_{f}Hz.csv.gz")
@DiscoCommand
def do_dynamic_tf(disco: Disco_Driver,progress_func, d_tf_output_dir, d_tf_frequencies=np.logspace(0,6,num=200), **kwargs):
mkdir(d_tf_output_dir)
tf_g = []
tf_phi = []
tf_f = []
progress_func("TF",0.,"", 0.)
i = 0.
dc = remove_offset(disco)
for f in d_tf_frequencies:
progress_func("TF", 0., f"Current frequency {f:.1f}Hz", i/len(d_tf_frequencies))
i+=1.
disco.analog_out_gen(frequency=f, shape='Sine', channel=0, amplitude=.2,offset=dc)
time.sleep(.3)
res = disco.analog_in_read(ch1=True, ch2=True, frequency=min(f*disco.max_sampling_buffer/10.,disco.max_sampling_freq), samplesCount=disco.max_sampling_buffer, ch1range=10.)
real_fs = res[1]
data = pds.DataFrame(data={"input": res[0][0],
"output": res[0][1]}, index=np.arange(0., disco.max_sampling_buffer/real_fs,
1. / real_fs))
data.to_csv(d_tf_output_dir + f"/dynamic_tf_snapshot_{f}Hz.csv.gz")
window = np.hanning(len(res[0][0]))
in_spect = fft.fft(waveform=res[0][0], sampling_frequency=real_fs, window=window, remove_mean=True)
out_spect = fft.fft(waveform=res[0][1], sampling_frequency=real_fs, window=window, remove_mean=True)
freq = in_spect["f"]
peaks = peakutils.indexes(in_spect["mod"], min_dist=2)
for peak in peaks:
f = in_spect["f"][peak]
g = 20. * np.log10(out_spect["mod"][peak] / in_spect["mod"][peak])
tf_phi.append(out_spect["phi"][peak] - in_spect["phi"][peak])
tf_g.append(g)
tf_f.append(f)
progress_func("TF done!", 0., "", 1.)
tf = pds.DataFrame(data={"G(dB)":tf_g,"Phi(rad)":tf_phi},index=tf_f)
tf.to_csv(d_tf_output_dir + f"/dynamic_tf.csv.gz")
disco.analog_out_disable(channel=0)
@DiscoCommand
def do_static_tf(disco: Disco_Driver,progress_func, s_tf_output_dir,s_tf_amplitude=.5,s_tf_steps=100, **kwargs):
mkdir(s_tf_output_dir)
tf_vin = []
tf_vout = []
v_min = 2.5-s_tf_amplitude
v_max = 2.5+s_tf_amplitude
progress_func("Static TF",0., "", 0.)
input_range = np.arange(v_min, v_max, (v_max-v_min)/s_tf_steps)
i = 0.
for step in input_range:
progress_func("Static TF", 0., f"Current voltage = {step:.3f}V", i/len(input_range))
i+=1.
disco.analog_out_gen(shape='DC', channel=0,offset=step)
time.sleep(.3)
res = disco.analog_in_read(ch1=True, ch2=True, frequency=disco.max_sampling_buffer*4, samplesCount=disco.max_sampling_buffer, ch1range=10.)
real_fs = res[1]
data = pds.DataFrame(data={"input": res[0][0],
"output": res[0][1]}, index=np.arange(0., disco.max_sampling_buffer / real_fs,
1. / real_fs))
data.to_csv(s_tf_output_dir + f"/static_tf_snapshot_{step}V.csv.gz")
tf_vin.append(np.mean(res[0][0]))
tf_vout.append(np.mean(res[0][1]))
progress_func("Static TF done!", 0., "",1.)
tf = | pds.DataFrame(data={"Vout": tf_vout}, index=tf_vin) | pandas.DataFrame |
# !/usr/bin/env python3
from math import isnan
import os
import shutil
import numpy as np
import random
from numpy.lib.function_base import average
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
import cv2
# import simpledorff
from pandas.core.frame import DataFrame
from sklearn import decomposition, datasets
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import cohen_kappa_score
class analyse:
def __init__(self, dataPath, drive, mergedDataFile, modelDataFile, results_folder):
# Docstring
"Class container for analysing functions."
self.modelDataFile = modelDataFile
# Set paths
self.dataPath = dataPath
self.drive = drive
self.results_folder = results_folder
# Load data
self.merge_data = pd.read_csv(mergedDataFile)
print('Created analyse class')
def get_responses(self):
# Get response per frame
indices = []
all_responses = []
for key in self.merge_data.keys():
for stimulus_index in range(0, len(self.merge_data.keys())):
response_column = 'Stimulus %s: response' % stimulus_index
if response_column in key:
indices.append(int(stimulus_index))
all_responses.append(self.merge_data[response_column])
# Create response DataFrame
self.response_data = pd.DataFrame(all_responses, index=indices)
self.response_data.sort_index(inplace=True)
self.response_data.columns = self.merge_data['Meta:worker_code']
self.response_data = self.response_data.transpose()
# Get mean and std of responses
self.response_mean = self.response_data.mean(skipna=True)
self.response_std = self.response_data.std(skipna=True)
# Get normalized response
self.response_normal = (
self.response_data-self.response_mean.mean())/self.response_std.mean()
# Save responses
response_data = pd.DataFrame(self.response_data)
response_data.to_csv(self.results_folder +
'filtered_responses/' + 'response_data.csv')
# Get anonymous data
survey_data = pd.DataFrame(
self.merge_data.loc[:, 'about_how_many_kilometers_miles_did_you_drive_in_the_last_12_months':'which_input_device_are_you_using_now'])
survey_data.index = survey_data['Meta:worker_code']
survey_data.pop('Meta:worker_code')
self.survey_data = survey_data.join(response_data)
self.survey_data.to_csv(self.results_folder +
'filtered_responses/' + 'survey_data.csv')
# print(survey_data)
print('Got responses')
def find_outliers(self, thresh):
# Find outliers
self.bad_indices = []
for index in self.response_data.index:
if(self.response_data.loc[index].std(skipna=True) < thresh):
self.bad_indices.append(index)
self.response_data = self.response_data.drop(index)
print('Found outliers')
def info(self):
# Get mean and std of responses
self.response_mean = self.response_data.mean(skipna=True)
self.response_std = self.response_data.std(skipna=True)
# Get general info on data
self.data_description = self.response_data.describe()
self.data_description.to_csv(
self.results_folder + 'filtered_responses/' + 'self.data_description.csv')
# print(self.data_description)
print('Got info')
def split(self, useNorm=False):
# Chose usage of normalized data
if useNorm == True:
data = self.response_normal
else:
data = self.response_data
# Get mean and std of responses
self.response_mean = data.mean(skipna=True)
self.response_std = data.std(skipna=True)
# Find middle
middle = int(round(len(self.response_data)/2))
# Get first half of data
self.response_data_first = data[0:middle]
self.response_mean_first = self.response_data_first.mean(skipna=True)
self.response_std_first = self.response_data_first.std(skipna=True)
# Get last half of data
self.response_data_last = data[(middle+1):len(data)]
self.response_mean_last = self.response_data_last.mean(skipna=True)
self.response_std_last = self.response_data_last.std(skipna=True)
# Get correlation of first and last half
r_fl = self.response_mean_first.corr(self.response_mean_last)
r2_fl = r_fl*r_fl
# print('{:<25}'.format('autocorrelation') + ': R^2 =',f'{r2_fl:.5f}')
# print('{:<25}'.format('autocorrelation') + ': R^2 =',f'{r2_fl:.5f}')
# Plot correlation of first and last half
plt.figure(figsize=(10,4))
self.plot_correlation(self.response_mean_first, self.response_mean_last,
None, None,
'Group 1', 'Group 2', 'Autocorrelation', r=round(r_fl, 5))
# self.training_data = self.response_data
middle_frame = int(round(self.response_data.shape[1])/2)
self.data_training = self.response_data.iloc[:, 0:middle_frame]
self.data_testing = self.response_data.iloc[:,
middle_frame:self.response_data.shape[1]]
plt.tight_layout()
# plt.show()
print('Split data')
def random(self, useNorm=False, seed=100):
# Chose usage of normalized data
if useNorm == True:
data = self.response_normal
else:
data = self.response_data
# Get mean and std of responses
data_response_mean = data.mean(skipna=True)
data_response_std = data.std(skipna=True)
# Chose random person
random.seed(seed)
random_person = random.randrange(0, len(data), 1)
self.single_response = data.iloc[random_person]
# Get correlation of data and random person
correlations = []
for i in range(0, len(data)):
single_response = data.iloc[i]
r = single_response.corr(data_response_mean)
correlations.append(r)
r_sm = self.single_response.corr(data_response_mean)
r2_sm = r_sm*r_sm
print('{:<30}'.format('Single random') + ': N' +
'{:<4}'.format(str(random_person)) + " vs Human R^2 = " + str(r_sm))
# Plot correlation of data and random person
# self.plot_correlation(self.single_response, data_response_mean,
# data_response_std, [],
# ('Person n'+str(random_person)), 'Response mean', 'random', r2_sm)
print('Got random correlation')
pd_corr = pd.DataFrame(correlations)
pd_corr.to_csv(self.results_folder + 'filtered_responses/' + 'individual_corr.csv')
# plt.figure()
# plt.boxplot(pd_corr)
pd_corr.plot.box(vert=False, figsize=(10,2))
parts = plt.vlines(0.5,1,1)
model = plt.vlines(0.58568,0.8,1.2,colors='orange')
# plt.title("Participant correlation")
# plt.ylabel("Participants")
plt.xlabel("Correlation")
plt.legend([parts, model],['Participants', 'Model'])
plt.grid()
plt.yticks([])
plt.tight_layout()
plt.savefig(self.results_folder + 'filtered_responses/' +
'individual_corr' + '.png')
# print(correlations)
print("Average over correlation: {}, stdev: ".format(pd_corr.median()))
def model(self, plotBool=True):
self.model_data = pd.read_csv(
self.results_folder + 'model_responses/' + self.modelDataFile)
# Get keys
self.parameter_keys = list(self.model_data)
self.parameter_keys.remove('general_frame_number')
self.model_data.pop('general_frame_number')
for parameter in self.parameter_keys:
# Get correlation
r = self.model_data[parameter].corr(self.response_mean)
# Print correlation
print('{:<25}'.format(parameter) + ': r =',f'{r:.5f}')
# Save figure correlation
if plotBool == True:
self.plot_correlation(self.model_data[parameter], self.response_mean,
None, self.response_std,
str(parameter), 'response_mean', parameter, r=round(r, 5))
# Check model cronbach alpha
# self.cronbach_alpha(self.model_data[['model_type', 'model_imminence', 'model_probability']])
# Add mean response to correlation matrix
self.model_data['response_mean_last'] = self.response_mean_last
# Get correlation matrix
corrMatrix = self.model_data.corr(method='pearson')
# corrMatrix = corrMatrix.sort_values(by='response_mean')
# Remove uppper triangle
mask = np.zeros_like(corrMatrix)
mask[np.triu_indices_from(mask, k=1)] = True
# Get eigenvalues and vectors
# Number of params
n = len(self.parameter_keys)
v = np.linalg.eig(corrMatrix.iloc[4:n, 4:n])
v_sum = np.sum(v[0])
v_csum = np.cumsum(v[0])
v_ccurve = v_csum/v_sum
v_cutoff = len(v_ccurve[(v_ccurve <= 0.8)])+1
# print(v_cutoff)
plt.clf()
plt.plot(v[0], marker="o")
plt.plot(np.ones(len(v[0])))
# plt.title('Scree plot')
plt.xlabel('Component')
plt.ylabel('Eigenvalue')
plt.grid()
# Save figure
plt.savefig(self.results_folder +
'regression/' + 'scree_plot' + '.png')
plt.clf()
plt.plot(v_ccurve, marker ="o")
# plt.title('Cumulative eigenvalue curve')
plt.xlabel('Component')
plt.ylabel('Cumulative eigenvalue')
plt.grid()
# Save figure
plt.savefig(self.results_folder +
'regression/' + 'ccum_eigen_plot' + '.png')
# Get significant params
p_keys = self.model_data.keys()[4:n]
# print(p_keys)
significant_parameters = set([])
# print(v_cutoff)
loading = v[1] * [v**0.5 for v in v[0]]
for column in range(0, v_cutoff):
for row in range(0, len(v[1])):
if (abs(v[1][row, column]) >= 0.4):
# if (abs(loading[row, column]) >= 0.8):
# if (row <= 3):
# pass
# else:
significant_parameters.add(p_keys[row])
self.sig_params = list(significant_parameters)
# Plot corr of sigs
# plt.clf()
# sn.heatmap(self.model_data[self.].corr(method='pearson'),vmax = 1,vmin = -1,cmap = 'RdBu_r', linewidths=.5, annot=True,yticklabels=self.)
# plt.title('Correlations of significant parameters')
# plt.show()
# Get eigenvector heatmap
# plt.figure()
# sn.heatmap(loading, vmax = 1,vmin = -1,cmap = 'RdBu_r', linewidths=.5, annot=True,yticklabels=p_keys,fmt='.2f')
# # plt.title('Eigenvectors')
# plt.xlabel('Loading of principle component')
# plt.ylabel('Values')
# plt.show()
# Save figure
# plt.savefig(self.results_folder + 'regression/' + 'eigenvector_matrix' + '.png')
# Plot correlation matrix
if (plotBool == True):
plt.clf()
sn.heatmap(corrMatrix, vmax=1, vmin=-1,
cmap='RdBu_r', linewidths=.5, annot=True)
# plt.show()
print('Got model data and correlations')
else:
pass
r = self.model_data['model_combination'].corr(self.response_mean)
return r**2
def risky_images(self, model=False):
# Get most risky and least risky images
if (model == True):
response_model_sorted = self.model_data['model_combination'].sort_values(
)
least_risky = response_model_sorted.index[0:5]
most_risky = response_model_sorted.tail(5).index[::-1]
else:
response_model_sorted = self.response_mean.sort_values()
least_risky = response_model_sorted.index[0:5]
most_risky = response_model_sorted.tail(5).index[::-1]
# Save most and least risky images
i = 1
for image in least_risky:
# os.path.join(self.dataPath + self.drive + '/image_02/data')
shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(image) + '.png',
self.results_folder + 'most_least_risky_images/' + 'least_risky_%s.png' % i)
i += 1
i = 1
for image in most_risky:
# os.path.join(self.dataPath + self.drive + '/image_02/data')
shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(image) + '.png',
self.results_folder + 'most_least_risky_images/' + 'most_risky_%s.png' % i)
i += 1
print('Got risky images')
def risk_ranking(self):
# Sort list of mean response values
response_mean_sorted = self.response_mean.sort_values()
# i = 0
# for image in response_mean_sorted.index:
# shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(
# image) + '.png', self.results_folder + 'risk_sorted_images/' + '%s.png' % i)
# i += 1
# Sort list of model combination values
response_model_sorted = pd.Series(
self.model_data['model_combination']).sort_values()
# i = 0
# for image in response_model_sorted.index:
# shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(image) +
# '.png', self.results_folder + 'risk_sorted_images/model' + '%s.png' % i)
# i += 1
r = round(np.corrcoef(self.response_mean, self.model_data['model_combination'])[1][0],4)
self.plot_correlation(self.response_mean, self.model_data['model_combination'], name1="Experiment result", name2="Model result", parameter="model_experiment", r=r)
print(np.corrcoef(response_mean_sorted.index,response_model_sorted.index))
print('Ranked images on risk')
def PCA(self):
print("Starting PCA analysis")
images = sorted(os.listdir(
self.dataPath + self.drive + '/image_02/data/'))
images_features_gray = []
images_features_blue = []
images_features_green = []
images_features_red = []
for image in images:
image_features_gray = []
image_features_blue = []
image_features_green = []
image_features_red = []
full_path = self.dataPath + self.drive + '/image_02/data/' + image
loaded_image = cv2.imread(full_path)
gray = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
blue = loaded_image[:, :, 0]
green = loaded_image[:, :, 1]
red = loaded_image[:, :, 2]
scaling = 1./2
gray_scaled = cv2.resize(gray, (0, 0), fx=(scaling), fy=(scaling))
blue_scaled = cv2.resize(blue, (0, 0), fx=(scaling), fy=(scaling))
green_scaled = cv2.resize(
green, (0, 0), fx=(scaling), fy=(scaling))
red_scaled = cv2.resize(red, (0, 0), fx=(scaling), fy=(scaling))
scaled_shape = gray_scaled.shape
for horizontal in gray_scaled:
image_features_gray = image_features_gray + list(horizontal)
images_features_gray.append(image_features_gray)
for horizontal in blue_scaled:
image_features_blue = image_features_blue + list(horizontal)
images_features_blue.append(image_features_blue)
for horizontal in green_scaled:
image_features_green = image_features_green + list(horizontal)
images_features_green.append(image_features_green)
for horizontal in red_scaled:
image_features_red = image_features_red + list(horizontal)
images_features_red.append(image_features_red)
# PCA decomposition
print("Running decomposition")
nc = 50 # number of model variables
pca = decomposition.PCA(n_components=nc)
std_gray = StandardScaler()
gray_std = std_gray.fit_transform(images_features_gray)
gray_pca = pca.fit_transform(gray_std)
eigen_frames_gray = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
std_blue = StandardScaler()
blue_std = std_blue.fit_transform(images_features_blue)
blue_pca = pca.fit_transform(blue_std)
eigen_frames_blue = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
std_green = StandardScaler()
green_std = std_green.fit_transform(images_features_green)
green_pca = pca.fit_transform(green_std)
eigen_frames_green = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
std_red = StandardScaler()
red_std = std_red.fit_transform(images_features_red)
red_pca = pca.fit_transform(red_std)
eigen_frames_red = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
# # Back tranform for check
# back_transform = pca.inverse_transform(gray_pca)
# back_transform_renormalize = std_gray.inverse_transform(back_transform)
# # Show before and after
# first_image = np.array(images_features[0]).reshape(scaled_shape)
# cv2.imshow('Before PCA',first_image)
# cv2.waitKey(0)
# # second_image = np.array(back_transform_renormalize[0]).reshape(scaled_shape)
# cv2.imshow('After PCA',second_image)
# cv2.waitKey(0)
gray_pca_df = pd.DataFrame(gray_pca)
blue_pca_df = pd.DataFrame(blue_pca)
green_pca_df = pd.DataFrame(green_pca)
red_pca_df = pd.DataFrame(red_pca)
self.pca = gray_pca_df
r = round(gray_pca_df[2].corr(self.response_mean),5)
self.plot_correlation(gray_pca_df[2],self.response_mean_last,name1='Gray pca component 2',name2='response_mean_last',r=r)
print("Saving images")
for i in range(0, nc):
print('Feature: ', i)
print('Gray correlation: ',
gray_pca_df[i].corr(self.response_mean))
print('Blue correlation: ',
blue_pca_df[i].corr(self.response_mean))
print('Green correlation: ',
green_pca_df[i].corr(self.response_mean))
print('Red correlation: ', red_pca_df[i].corr(self.response_mean))
max_pixel_gray = np.max(abs(eigen_frames_gray[i]))
max_pixel_blue = np.max(abs(eigen_frames_blue[i]))
max_pixel_green = np.max(abs(eigen_frames_green[i]))
max_pixel_red = np.max(abs(eigen_frames_red[i]))
gray_channel = eigen_frames_gray[i]*1/max_pixel_gray*255
blue_channel = eigen_frames_blue[i]*1/max_pixel_blue*255
green_channel = eigen_frames_green[i]*1/max_pixel_green*255
red_channel = eigen_frames_red[i]*1/max_pixel_red*255
bgr_image = np.zeros((scaled_shape[0], scaled_shape[1], 3))
bgr_image[:, :, 0] = blue_channel
bgr_image[:, :, 1] = green_channel
bgr_image[:, :, 2] = red_channel
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('color ' + str(i)+'.png')), bgr_image)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('gray ' + str(i)+'.png')), gray_channel)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('blue ' + str(i)+'.png')), blue_channel)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('green' + str(i)+'.png')), green_channel)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('red ' + str(i)+'.png')), red_channel)
print('Performed PCA')
def multivariate_regression(self, pred='default'):
# train = pd.DataFrame(self.pca, columns= ['0','1','2','3','4','5','6','7','8','9','10','11','12','13'])
# train = self.pca.iloc[0:middle]
# test = self.pca.iloc[middle:len(self.pca)]
lr = LinearRegression(normalize=False, copy_X=True)
# lr = LinearRegression()
if (pred == 'default'):
predictor_keys = ['general_velocity', 'general_distance_mean',
'general_number_bjects', 'manual_breaklight', 'occluded_mean']
elif(pred == 'sig'):
predictor_keys = self.sig_params
elif(pred == 'all'):
predictor_keys = self.model_data.keys()
else:
print('Wrong input, changing to default')
predictor_keys = ['general_velocity', 'general_distance_mean',
'general_number_bjects', 'manual_breaklight', 'occluded_mean']
predictors = self.model_data[predictor_keys]
sc = StandardScaler()
predictors_stand = sc.fit_transform(predictors)
middle = int(round(predictors.shape[0]/2))
print(predictors_stand[middle])
# Fit regression
print("Fitting regression model")
print(self.sig_params)
lr.fit(predictors_stand[0:middle], self.response_mean[0:middle])
predictions = lr.predict(predictors_stand[middle:predictors.shape[0]])
# print(predictors[0:middle])
# print(lr.predict(predictors[0:middle]))
data = predictors_stand[0:middle] * lr.coef_
results = pd.DataFrame(data, columns=predictor_keys)
results.insert(0, "intercept", lr.intercept_)
results.to_csv(self.results_folder +
'regression/' + 'regression.csv')
data2 = predictors_stand[middle:predictors.shape[0]] * lr.coef_
results2 = pd.DataFrame(data2, columns=predictor_keys)
results2.insert(0, "intercept", lr.intercept_)
results2.to_csv(self.results_folder +
'regression/' + 'regression2.csv')
# Analyse result
r = np.corrcoef(
self.response_mean[middle:predictors.shape[0]], predictions)[0, 1]
print('Correlation = {}'.format(r))
self.plot_correlation(predictions, self.response_mean[middle:len(
self.response_mean)], name1="Multivariate regression", name2="Response test", parameter="regression_multivariate", r=round(r, 5))
print('Lr coef: {}'.format(lr.coef_))
print('Lr coef deep: {}'.format(lr.coef_[0]))
# self.cronbach_alpha(self.model_data[predictor_keys])
print('Performed multivariate regression')
def risk_accidents(self, plotBool=False):
# Get accident answers
accident_occurence = self.merge_data['how_many_accidents_were_you_involved_in_when_driving_a_car_in_the_last_3_years_please_include_all_accidents_regardless_of_how_they_were_caused_how_slight_they_were_or_where_they_happened']
# Filter no responses
accident_occurence = [-1 if value ==
'i_prefer_not_to_respond' else value for value in accident_occurence]
accident_occurence = [
6 if value == 'more_than_5' else value for value in accident_occurence]
accident_occurence = [value if value == np.nan else float(
value) for value in accident_occurence]
# Group by accidents
n_bins = 20
bins = np.linspace(0, 100, n_bins+1)
binned = []
for value in self.response_data.mean(axis=1):
for b in bins:
if (value <= b):
binned.append(b)
# print("Value:{} < bin:{}".format(value,b))
break
# Get accident occurence
average_score = list(self.response_data.mean(axis=1))
risk_accidents = pd.DataFrame(
{'Accidents': accident_occurence, 'Average_score': average_score})
r = risk_accidents.corr().values[0, 1]
self.plot_correlation( | pd.Series(accident_occurence) | pandas.Series |
"""
Module for interacting with the NHL's open but undocumented API.
"""
import streamlit as st
import pandas as pd
from pandas.io.json import json_normalize
import requests as rqsts
## data ingestion
def get_seasons(streamlit=False):
""" returns all seasons on record """
seasons_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/seasons')
try:
seasons_response.raise_for_status()
except rqsts.exceptions.HTTPError as e:
if streamlit:
st.write(e)
else:
print(e)
raise e
seasons = seasons_response.content
seasons_df = pd.read_json(seasons)
seasons_df = json_normalize(seasons_df.seasons)
seasons_df.set_index('seasonId', inplace=True)
return seasons_df
def get_current_season():
season_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/seasons/current')
season = season_response.content
season_df = pd.read_json(season)
season_df = json_normalize(season_df.seasons)
season_id = season_df.seasonId
season_start = season_df.regularSeasonStartDate
season_end = season_df.regularSeasonEndDate
return season_id, season_start, season_end
def get_teams(streamlit=False):
"""returns all teams FOR THE CURRENT SEASON"""
teams_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/teams')
try:
teams_response.raise_for_status()
except rqsts.exceptions.HTTPError as e:
if streamlit:
st.write(e)
else:
print(e)
raise e
teams = teams_response.content
teams_df = pd.read_json(teams)
teams_df = json_normalize(teams_df.teams)
return teams_df
def get_schedule(start_date, end_date):
# teams = get_teams()
# st.dataframe(teams)
# output_df = pd.DataFrame()
schedule_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/schedule?startDate={0}&endDate={1}'.format(start_date, end_date))
schedule = schedule_response.content
schedule = pd.read_json(schedule)
schedule = json_normalize(schedule.dates)
output_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from random import sample
def random_selection():
fields = ['Index', 'Is Success']
# read specific columns
mhm_path = './results/attack_mhm.csv'
gi_path = './results/attack_genetic.csv'
index_mhm = pd.read_csv(mhm_path, skipinitialspace=True, usecols=fields)
index_gi = pd.read_csv(gi_path, skipinitialspace=True, usecols=fields)
mhm_success = index_mhm[index_mhm['Is Success'] == 1]
gi_success = index_gi[index_gi['Is Success'] == 1]
print(type(gi_success))
intersect = list(set(mhm_success['Index'].values.tolist()).intersection(set(gi_success['Index'].values.tolist())))
print(len(intersect))
# samples = sample(intersect, 100)
#
# print(samples)
# print(len(set(samples)))
# return samples
return intersect
def filter_csv(index):
mhm_path = './results/attack_mhm.csv'
gi_path = './results/attack_genetic.csv'
index_mhm = pd.read_csv(mhm_path)
index_gi = pd.read_csv(gi_path)
mhm = index_mhm.loc[index_mhm['Index'].isin(index)]
gi = index_gi.loc[index_gi['Index'].isin(index)]
data = [gi["Index"], gi["Original Code"], gi["Adversarial Code"], gi["Extracted Names"], gi["Replaced Names"],
mhm["Adversarial Code"], mhm["Extracted Names"], mhm["Replaced Names"],]
headers = ["Index", "Original", "GA_Adversarial Code", "GA_Extracted Names", "GA_Replaced Names",
"mhm_Adversarial Code", "mhm_Extracted Names", "mhm_Replaced Names",]
gi.to_csv('gi.csv', index=False)
mhm.to_csv('mhm.csv', index=False)
print(mhm)
df3 = | pd.concat(data, axis=1, keys=headers) | pandas.concat |
#Z0096
# import standards
import pandas as pd
# import stats tools
from scipy.stats import chi2_contingency, ttest_ind
# import modeling tools
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import RFE
from sklearn.metrics import classification_report, confusion_matrix
#################### Explore Functions ####################
def ttest_report(data, sample_mask_one, sample_mask_two, target, alpha=0.05):
'''
'''
# get tstat and p value from ttest between two samples
t, p = stats.ttest_ind(data[sample_mask_one][[target]], data[sample_mask_two][[target]])
# check p value relative to alpha
if p < alpha:
status = 'May reject'
else:
status = 'Fail to reject'
# print results and hypothesis statement
print(f'''
T-Stat: {t}
P-Value: {p}
{status} the null hypotheses that "{target}" is the same between the two samples.
''')
def chi_test(cat, target, alpha=0.05):
'''
'''
# set observed DataFrame with crosstab
observed = | pd.crosstab(cat, target) | pandas.crosstab |
import numpy as np
import pandas as pd
import glob
from pmdarima.arima import ndiffs
from pandas.tseries.offsets import QuarterBegin, QuarterEnd
from .hand_select import hand_select
import pandas_datareader.data as web
import xlrd, csv
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook, InvalidFileException
def set_date_as_index(df):
df.columns = [name.lower() for name in df.columns]
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True)
return df
def make_float(df):
df = df.replace(".", np.nan)
df = df.astype(float)
return df
def read_files(paths, fillna=True):
csv_list = []
xls_list = []
for path in paths:
csv_files = glob.glob(path + "/*.csv")
xls_files = glob.glob(path + "/*.xls")
for elt in csv_files:
df = pd.read_csv(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
csv_list.append(df)
for elt in xls_files:
try:
df = pd.read_excel(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
xls_files.append(df)
except Exception:
pass
return csv_list, xls_list
def make_stationary(df):
df = hand_select(df)
df = df.dropna()
columns = df.columns
for name in columns:
x = df[name].values
d_kpss = ndiffs(x, test='kpss')
d_adf = ndiffs(x, test='adf')
d_pp = ndiffs(x, test='pp')
d_ = max(d_kpss, d_adf, d_pp)
if d_ > 0:
new_name = name + '_diff' + str(d_)
if d_ == 1:
df[new_name] = df[name].diff()
elif d_ == 2:
df[new_name] = df[name].diff().diff()
elif d_ > 2:
raise ValueError('High order differentiation')
else:
raise Exception('Some thing is wrong')
df = df.drop(columns=[name])
return df
def open_xls_as_xlsx(filename):
# first open using xlrd
book = xlrd.open_workbook(filename)
index = 0
nrows, ncols = 0, 0
while nrows * ncols == 0:
sheet = book.sheet_by_index(index)
nrows = sheet.nrows
ncols = sheet.ncols
index += 1
# prepare a xlsx sheet
book1 = Workbook()
sheet1 = book1.active
for row in range(1, nrows):
for col in range(1, ncols):
sheet1.cell(row=row, column=col).value = sheet.cell_value(row, col)
return book1
def read_data(path, sheet=False, header='infer'):
file_format = path.split('.')[-1]
if 'msci' in path:
header = 6
if sheet is False:
# if file_format == 'csv':
# df = pd.read_csv(path, header=header)
# elif file_format == 'xls':
# df = open_xls_as_xlsx(path)
# else:
try:
df = | pd.read_excel(path, header=header, engine='openpyxl') | pandas.read_excel |
# %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\Users\yxie367\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
#dataset = pd.read_csv(r"C:\Users\xieya\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
# %% [markdown]
# ### View Data and Informations
# %%
dataset.head()
# %%
dataset.info()
# %%
edible, poisonous = dataset['class'].value_counts()
# print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# %%
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
#print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
# %% [markdown]
# # NN1 Stalk Root - Rooted (r)
# %% [markdown]
# ### Split Dataset
# %% [markdown]
# #### Get the Labels
# %%
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
#print("X:",X.shape,"\ny:",y.shape)
# %% [markdown]
# #### Train Set and Test Set
total_error_1 = 0
total_error_2 = 0
total_error_comb = 0
randnum = np.arange(2,44,4)
num_trials = len(randnum)
record = ""
wrong_record = ""
run = 1
# %% Data cleaning
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
# %% Data cleaning pt2
X_green = pd.DataFrame()
X_not_green = pd.DataFrame()
y_green = pd.Series(dtype='float64')
y_not_green = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"odor"] == "a":
X_green = X_green.append(X.iloc[i,:])
y_green = y_green.append(pd.Series(y.iloc[i]))
else:
X_not_green = X_not_green.append(X.iloc[i,:])
y_not_green = y_not_green.append(pd.Series(y.iloc[i]))
# %%
for j in randnum:
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=j)
X_train_not_green, X_test_not_green, y_train_not_green, y_test_not_green = train_test_split(X_not_green, y_not_green, test_size=1-(6905/(8124-len(X_green))), random_state=j)
X_train_green = (X_train_not_green)
y_train_green = (y_train_not_green)
X_train_white = (X_train_not_white)
y_train_white = (y_train_not_white)
# %%
from sklearn.utils import shuffle
X_train_full1 = shuffle(X_train_white, random_state=j)
X_test = shuffle(X, random_state=j).iloc[4000:8000]
y_train_full1 = shuffle(y_train_white, random_state=j)
y_test = shuffle(y, random_state=j).iloc[4000:8000]
# %% [markdown]
# #### Validation Set
# %%
X_valid1, X_train1 = X_train_full1[:500], X_train_full1[500:]
y_valid1, y_train1 = y_train_full1[:500], y_train_full1[500:]
# print("X_train:", X_train1.shape[0], "y_train", y_train1.shape[0])
# print("X_valid: ", X_valid1.shape[0], "y_valid ", y_valid1.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train1 = pipeline.fit_transform(X_train1)
X_valid1 = pipeline.fit_transform(X_valid1)
X_test1 = pipeline.fit_transform(X_test)
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
# %%
# tf.random.set_seed(j)
tf.random.set_random_seed(j)
# %%
model1 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model1.summary()
# %% [markdown]
# #### Compile the Model
# %%
model1.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model1 = model1.fit(X_train1, y_train1,
epochs=100,
validation_data=(X_valid1, y_valid1),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results1 = model1.evaluate(X_test1, y_test)
# print("test loss, test acc:", results1)
# %% [markdown]
# ### Make Some Predictions
# %%
X_new1 = X_test1[:5]
y_prob1 = model1.predict(X_new1)
# print(y_prob.round(3))
# %%
y_pred1 = (model1.predict(X_new1) > 0.5).astype("int32")
# print(y_pred)
y_test_pred = (model1.predict(X_test1) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df1 = pd.DataFrame(model1.predict(X_test1))
y_test_pred1 = pd.DataFrame(y_test_pred).reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test_pred1], axis=1)
y_test1 = y_test.reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test1], axis=1)
X_df1.columns = ["X_pred","y_pred","y_actual"]
#print(X_df1)
# %%
import math
table1 = pd.DataFrame(columns=["KL_div","abs_distance","correctness"])
for i in range(0,len(X_df1)):
# KL divergence
p = X_df1.loc[i,"X_pred"]
try:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
except:
kl = 0
table1.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table1.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred1 = X_df1.loc[i,"y_pred"]
y_act1 = X_df1.loc[i,"y_actual"]
if y_pred1 == y_act1:
table1.loc[i,"correctness"] = 1 # correct prediction
else:
table1.loc[i,"correctness"] = 0 # wrong prediction
table1.loc[i,"y_pred"] = y_pred1
#print(table1)
# %%
table1["count"] = 1
correctness1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness1["percent"] = 100*(correctness1["correctness"]/correctness1["count"])
#print(correctness1)
# %%
index = []
for i in (correctness1.index):
index.append(str(i))
plt.bar(index,correctness1["percent"], width=0.7)
for index,data in enumerate(correctness1["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.80, 0.1))).apply(sum)
kl1["percent"] = (kl1["correctness"]/kl1["count"])
kl1.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl1["percent"])
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg1 = np.arange(0, 0.70, 0.1).reshape((-1, 1))
y_reg1 = kl1["percent"]
reg_model1 = LinearRegression().fit(x_reg1,y_reg1)
# %%
# print('intercept(alpha):', reg_model1.intercept_)
# print('slope(theta):', reg_model1.coef_)
# %% [markdown]
# # NN2 Odor - Almond (a)
# %% [markdown]
# #### Train Set and Test Set
# %%
from sklearn.utils import shuffle
X_train_full2 = shuffle(X_train_green, random_state=j)
# X_test2 = shuffle(X_test_green, random_state=j)
y_train_full2 = shuffle(y_train_green, random_state=j)
# y_test2 = shuffle(y_test_green, random_state=j)
# %% [markdown]
# #### Validation Set
# %%
X_valid2, X_train2 = X_train_full2[:500], X_train_full2[500:]
y_valid2, y_train2 = y_train_full2[:500], y_train_full2[500:]
# print("X_train:", X_train2.shape[0], "y_train", y_train2.shape[0])
# print("X_valid: ", X_valid2.shape[0], "y_valid ", y_valid2.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train2 = pipeline.fit_transform(X_train2)
X_valid2 = pipeline.fit_transform(X_valid2)
X_test2 = pipeline.fit_transform(X_test)
y_test2 = y_test
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
tf.random.set_random_seed(j)
# %%
model2 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model2.summary()
# %% [markdown]
# #### Compile the Model
# %%
model2.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model2 = model2.fit(X_train2, y_train2,
epochs=100,
validation_data=(X_valid2, y_valid2),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results2 = model2.evaluate(X_test2, y_test2)
# print("test loss, test acc:", results2)
# %% [markdown]
# ### Make Some Predictions
# %%
# y_pred2 = (model2.predict(X_new2) > 0.5).astype("int32")
# print(y_pred2)
y_test_pred2 = (model2.predict(X_test2) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df2 = pd.DataFrame(model2.predict(X_test2))
y_test_pred2 = pd.DataFrame(y_test_pred2).reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test_pred2], axis=1)
y_test2 = y_test2.reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test2], axis=1)
X_df2.columns = ["X_pred","y_pred","y_actual"]
#print(X_df2)
# %%
import math
table2 = pd.DataFrame(columns=["KL_div","abs_distance","y_pred","correctness"])
for i in range(0,len(X_df2)):
# KL divergence
p = X_df2.loc[i,"X_pred"]
if p > 0:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
else:
kl = 1
table2.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table2.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred = X_df2.loc[i,"y_pred"]
y_act = X_df2.loc[i,"y_actual"]
if y_pred == y_act:
table2.loc[i,"correctness"] = 1 # correct prediction
else:
table2.loc[i,"correctness"] = 0 # wrong prediction
table2.loc[i,"y_pred"] = y_pred
#print(table2)
# %%
table2["count"] = 1
correctness2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness2["percent"] = 100*(correctness2["correctness"]/correctness2["count"])
#print(correctness2)
# %%
index = []
for i in (correctness2.index):
index.append(str(i))
plt.bar(index,correctness2["percent"], width=0.7)
for index,data in enumerate(correctness2["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
kl2["percent"] = (kl2["correctness"]/kl2["count"])
kl2.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl2["percent"])
# print(kl)
# print(np.arange(0, 0.7, 0.05))
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg2 = np.arange(0, 0.7, 0.1).reshape((-1, 1))
y_reg2 = kl2["percent"]
reg_model2 = LinearRegression().fit(x_reg2,y_reg2)
# %%
# print('intercept(alpha):', reg_model2.intercept_)
# print('slope(theta):', reg_model2.coef_)
# %% [markdown]
# ## Algorithm C: It = argmax(Ct,i)
# %%
# Correct answer
ans = | pd.DataFrame(X_df2["y_actual"]) | pandas.DataFrame |
# gsheets_data.py
from dotenv import load_dotenv
import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import io
import sys
from wordcloud import WordCloud, STOPWORDS
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import sendgrid
from sendgrid.helpers.mail import *
def send_email(text, sub):
load_dotenv()
SENDGRID_API_KEY = os.environ.get("SENDGRID_API_KEY", "OOPS, please set env var called 'SENDGRID_API_KEY'")
MY_EMAIL_ADDRESS = os.environ.get("MY_EMAIL_ADDRESS", "OOPS, please set env var called 'MY_EMAIL_ADDRESS'")
CLIENT_EMAIL_ADDRESS = os.environ.get("MY_EMAIL_ADDRESS", "OOPS, please set env var called 'CLIENT_EMAIL_ADDRESS'")
sg = sendgrid.SendGridAPIClient(apikey=SENDGRID_API_KEY)
### load email variables
from_email = Email(MY_EMAIL_ADDRESS)
to_email = Email(CLIENT_EMAIL_ADDRESS)
subject = sub
sub_text = text
message_text = f"Dear User, \nThis is an automated email response. \n {sub_text}"
content = Content("text/plain", message_text)
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return response
def google_sheets_data(gsheet):
load_dotenv()
DOCUMENT_ID = os.environ.get("GOOGLE_SHEET_ID", "OOPS")
SHEET_NAME = gsheet
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
file_name = os.path.join(os.getcwd(), "google_credentials", "gcreds.json")
creds = ServiceAccountCredentials.from_json_keyfile_name(file_name, scope)
client = gspread.authorize(creds)
doc = client.open_by_key(DOCUMENT_ID)
sheet = doc.worksheet(SHEET_NAME)
data = sheet.get_all_records()
return data
def quality(x):
if x == 5:
return 'Exceptional'
elif x==4.5 or x==4:
return 'Very Good'
elif x==3.5 or x==3:
return 'Average'
else:
return 'Poor'
def savefile(fileandext):
file_name = os.path.join(os.getcwd(), "plot_images", fileandext)
if os.path.exists(file_name):
os.remove(file_name)
return file_name
def float_format(data):
x = "{0:.2f}".format(data)
return x
if __name__ == "__main__":
### Defining the initial datasets. THey will be used for the plots created below
business_search = google_sheets_data('business_search')
business_reviews = google_sheets_data('business_reviews')
bs_data = | pd.DataFrame(business_search) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.core.display import display
from cycler import cycler
import matplotlib as mpl
import re
from ads.common import logger
mpl.rcParams["image.cmap"] = "BuGn"
mpl.rcParams["axes.prop_cycle"] = cycler(
color=["teal", "blueviolet", "forestgreen", "peru", "y", "dodgerblue", "r"]
)
from ads.evaluations.evaluation_plot import EvaluationPlot
from ads.evaluations.statistical_metrics import ModelEvaluator
from ads.dataset.dataset_with_target import ADSDatasetWithTarget
from ads.common.model import ADSModel
from ads.common.decorator.runtime_dependency import runtime_dependency
class ADSEvaluator(object):
"""ADS Evaluator class. This class holds field and methods for creating and using
ADS evaluator objects.
Attributes
----------
evaluations : list[DataFrame]
list of evaluations.
is_classifier : bool
Whether the model has a non-empty `classes_` attribute indicating the presence of class labels.
legend_labels : dict
List of legend labels. Defaults to `None`.
metrics_to_show : list[str]
Names of metrics to show.
models : list[ads.common.model.ADSModel]
The object built using `ADSModel.from_estimator()`.
positive_class : str or int
The class to report metrics for binary dataset, assumed to be true.
show_full_name :bool
Whether to show the name of the evaluator in relevant contexts.
test_data : ads.common.data.ADSData
Test data to evaluate model on.
training_data : ads.common.data.ADSData
Training data to evaluate model.
Positive_Class_names : list
Class attribute listing the ways to represent positive classes
Methods
-------
add_metrics(func, names)
Adds the listed metics to the evaluator it is called on
del_metrics(names)
Removes listed metrics from the evaluator object it is called on
add_models(models, show_full_name)
Adds the listed models to the evaluator object
del_models(names)
Removes the listed models from the evaluator object
show_in_notebook(plots, use_training_data, perfect, baseline, legend_labels)
Visualize evalutation plots in the notebook
calculate_cost(tn_weight, fp_weight, fn_weight, tp_weight, use_training_data)
Returns a cost associated with the input weights
"""
Positive_Class_Names = ["yes", "y", "t", "true", "1"]
def __init__(
self,
test_data,
models,
training_data=None,
positive_class=None,
legend_labels=None,
show_full_name=False,
):
"""Creates an ads evaluator object.
Parameters
----------
test_data : ads.common.data.ADSData instance
Test data to evaluate model on.
The object can be built using `ADSData.build()`.
models : list[ads.common.model.ADSModel]
The object can be built using `ADSModel.from_estimator()`.
Maximum length of the list is 3
training_data : ads.common.data.ADSData instance, optional
Training data to evaluate model on and compare metrics against test data.
The object can be built using `ADSData.build()`
positive_class : str or int, optional
The class to report metrics for binary dataset. If the target classes is True or False,
positive_class will be set to True by default. If the dataset is multiclass or multilabel,
this will be ignored.
legend_labels : dict, optional
List of legend labels. Defaults to `None`.
If legend_labels not specified class names will be used for plots.
show_full_name : bool, optional
Show the name of the evaluator object. Defaults to `False`.
Examples
--------
>>> train, test = ds.train_test_split()
>>> model1 = MyModelClass1.train(train)
>>> model2 = MyModelClass2.train(train)
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> legend_labels={'class_0': 'one', 'class_1': 'two', 'class_2': 'three'}
>>> multi_evaluator = ADSEvaluator(test, models=[model1, model2],
... legend_labels=legend_labels)
"""
self.evaluations = []
if isinstance(training_data, ADSDatasetWithTarget):
training_data, _ = training_data.train_test_split(test_size=0.0)
if isinstance(test_data, ADSDatasetWithTarget):
test_data, _ = test_data.train_test_split(test_size=0.0)
self.test_data = test_data
self.training_data = training_data
self.classes = []
self.is_classifier = (
hasattr(models[0], "classes_") and models[0].classes_ is not None
)
pclass = positive_class
if self.is_classifier:
self.classes = list(models[0].classes_)
if len(self.classes) == 2:
self.metrics_to_show = [
"accuracy",
"hamming_loss",
"precision",
"recall",
"f1",
"auc",
]
if positive_class is None or positive_class not in self.classes:
pclass = next(
(
x
for x in list(self.classes)
if str(x).lower() in ADSEvaluator.Positive_Class_Names
),
self.classes[0],
)
logger.info(
f"Using {pclass} as the positive class. Use `positive_class` to set this value."
)
else:
# Multi-class
self.metrics_to_show = [
"accuracy",
"hamming_loss",
"precision_weighted",
"precision_micro",
"recall_weighted",
"recall_micro",
"f1_weighted",
"f1_micro",
]
else:
# Regression
self.metrics_to_show = ["r2_score", "mse", "mae"]
self.positive_class = pclass
self.legend_labels = legend_labels
for m in models:
if not (isinstance(m, ADSModel)):
try:
m = ADSModel.from_estimator(m.est)
except:
logger.info("This model cannot be converted to an ADS Model.")
self.evaluations = [pd.DataFrame(), pd.DataFrame()]
self.model_names = []
self.add_models(models, show_full_name=show_full_name)
def add_metrics(self, funcs, names):
"""Adds the listed metrics to the evaluator object it is called on.
Parameters
----------
funcs : list
The list of metrics to be added. This function will be provided `y_true`
and `y_pred`, the true and predicted values for each model.
names : list[str])
The list of metric names corresponding to the functions.
Returns
-------
Nothing
Examples
--------
>>> def f1(y_true, y_pred):
... return np.max(y_true - y_pred)
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> evaluator.add_metrics([f1], ['Max Residual'])
>>> evaluator.metrics
Output table will include the desired metric
"""
if len(funcs) != len(names):
raise ValueError("Could not find 1 unique name for each function")
for name, f in zip(names, funcs):
f_res = []
for m in self.evaluations[1].columns:
res = f(
self.evaluations[1][m]["y_true"], self.evaluations[1][m]["y_pred"]
)
f_res.append(res)
pd_res = pd.DataFrame(
[f_res], columns=self.evaluations[1].columns, index=[name]
)
self.evaluations[1] = pd.concat([self.evaluations[1], pd_res])
if self.evaluations[0].shape != (0, 0):
f_res = []
for m in self.evaluations[0].columns:
res = f(
self.evaluations[0][m]["y_true"],
self.evaluations[0][m]["y_pred"],
)
f_res.append(res)
pd_res = pd.DataFrame(
[f_res], columns=self.evaluations[0].columns, index=[name]
)
self.evaluations[0] = | pd.concat([self.evaluations[0], pd_res]) | pandas.concat |
from pathlib import Path
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Series,
_testing as tm,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
from pandas.io.pytables import TableIterator
pytestmark = pytest.mark.single
def test_read_missing_key_close_store(setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with | HDFStore(path, "r") | pandas.HDFStore |
from numpy import mean
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plot
import matplotlib.mlab as mlab
import matplotlib.pylab as lab
import matplotlib.patches as patches
import matplotlib.ticker as plticker
from matplotlib import rcParams
from matplotlib import gridspec
from matplotlib import cm
import sys
import time
rcParams['font.sans-serif'] = 'Arial'
infile = sys.argv[1] # which file to go over
window = int(sys.argv[2]) # what size window was used?
slide = int(sys.argv[3]) # what slide
bottompanel = int(sys.argv[4]) # this is how often to plot points on the bottom panel
scaffold_file = sys.argv[5] # file with the scaffold and length
color_scheme = sys.argv[6] # what color scheme to use?
outdir = sys.argv[7] # where to output the pdf
# this is to process the file and output the shading correctly
def scale_dict(info_file):
info_read = open(info_file, "r")
info_dict = {}
final_lenny = 0
lenny = 0
for line in info_read:
linetab = (line.rstrip()).split("\t")
scaffy = linetab[0]
final_lenny = final_lenny + lenny
lenny = int(linetab[1])
info_dict[scaffy] = final_lenny
info_read.close()
return(info_dict)
# processing the scaffold information
shader = scale_dict(scaffold_file)
maxx = sum(pd.read_csv(scaffold_file, sep="\t", header=None)[1])
print(maxx)
midwin = window/2
outfiley = open(outdir+"/connected_fragments.txt", "w")
b = pd.read_csv(infile)
comparisons = list(b.columns[3:])
strains = []
ncomps = len(comparisons)
for comp in comparisons:
strainies = comp.split(";")
if (strainies[0] in strains)==False:
strains.append(strainies[0])
if comp == comparisons[ncomps-1]:
strains.append(strainies[1])
nstrains = len(strains)
print("nstrains", nstrains)
grids = sum(range(nstrains+4))+25
print("grids", grids)
fig = plot.figure(figsize=(grids, grids), dpi=10000)
gs = gridspec.GridSpec(grids, grids)
begger = 1
mover = nstrains-1
ender = begger + mover
lastfirst = None
boxcount = pd.Series()
## let's do colors
## have to normalize the data set first so that they're continuous
norm = matplotlib.colors.Normalize(vmin=75, vmax=100)
m = cm.ScalarMappable(norm=norm, cmap=color_scheme)
for comparison in comparisons:
print("%s: %s"%("processing", comparison))
compare = comparison.split(";")
compcol = b[comparison]
first = compare[0]
if lastfirst == None:
lastfirst = first
ax = plot.subplot(gs[begger:ender, :])
standing = strains.index(first)
remains = strains[standing+1:][::-1]
ncomp = len(remains)-1
tickies = list(pd.Series(range(len(remains)))+0.5)
# label offset needs to be .2 to worky well
plotsizies = ender - begger
tickplace = (plotsizies+0.25)/plotsizies
ax.set_title(first, y=tickplace)
####
ax.title.set_fontsize(80) # this is very awkward, but it the only way to do this
ax.set_ylim(0,ender-begger)
ax.set_xlim(0, max(b['total_med']))
yloc = plticker.FixedLocator(tickies)
ax.yaxis.set_ticklabels(remains)
ax.yaxis.set_tick_params(labelsize=60)
ax.yaxis.set_major_locator(yloc)
xloc = plticker.MultipleLocator(2000000)
ax.xaxis.set_major_locator(xloc)
lockyx = list(ax.xaxis.get_major_locator().tick_values(0,max(b['total_med'])))
ax.xaxis.set_tick_params(labelsize=150, colors='black')
# for better labeling:
new_lockyx = [int(i) for i in lockyx] # this is to create labels with numbers
xlabs = []
for i in new_lockyx:
j = str(i)
if len(str(j)) <= 2:
xlabs.append(i/1)
elif 3 <= len(str(j)) <= 6:
xlabs.append(i/1000)
elif 3 <= len(str(j)) <= 9:
xlabs.append(i/1000000)
else:
xlabs.append(round(i/float(1000000000), 1))
ax.xaxis.set_ticklabels(xlabs)
# this are the variables for the shading below
old = None
shade = True
# here comes the shading
for contig in sorted(shader):
val = shader[contig]
if old != None and shade == True:
plot.axvspan(old, val, color='0.85', alpha=0.5)
shade = False
else:
if old != None:
shade = True
old = shader[contig]
# the last one
if shade == True:
plot.axvspan(old, maxx, color='0.85', alpha=0.5)
else:
if first == lastfirst:
ncomp = ncomp- 1
pass
else:
standing = strains.index(first)
remains = strains[standing+1:][::-1]
ncomp = len(remains)-1
begger = ender + 4
mover = mover - 1
ender = begger + mover
lastfirst = first
ax = plot.subplot(gs[begger:ender, :])
tickies = list(pd.Series(range(len(remains)))+0.5)
# label offset needs to be .2 to worky well
plotsizies = ender - begger
tickplace = (plotsizies+0.25)/plotsizies
ax.set_title(first, y=tickplace)
####
ax.title.set_fontsize(80) # this is very awkward, but it the only way to do this
ax.set_ylim(0,ender-begger)
ax.set_xlim(0, max(b['total_med']))
yloc = plticker.FixedLocator(tickies)
ax.yaxis.set_ticklabels(remains)
ax.yaxis.set_tick_params(labelsize=60)
ax.yaxis.set_major_locator(yloc)
xloc = plticker.MultipleLocator(2000000)
ax.xaxis.set_major_locator(xloc)
lockyx = list(ax.xaxis.get_major_locator().tick_values(0,max(b['total_med'])))
ax.xaxis.set_tick_params(labelsize=150, colors='black')
# for better labeling:
new_lockyx = [int(i) for i in lockyx] # this is to create labels with numbers
xlabs = []
for i in new_lockyx:
j = str(i)
if len(str(j)) <= 2:
xlabs.append(i/1)
elif 3 <= len(str(j)) <= 6:
xlabs.append(i/1000)
elif 3 <= len(str(j)) <= 9:
xlabs.append(i/1000000)
else:
xlabs.append(round(i/float(1000000000), 1))
ax.xaxis.set_ticklabels(xlabs)
# this are the variables for the shading below
old = None
shade = True
# here comes the shading
for contig in sorted(shader):
val = shader[contig]
if old != None and shade == True:
plot.axvspan(old, val, color='0.85', alpha=0.5)
shade = False
else:
if old != None:
shade = True
old = shader[contig]
# the last one
if shade == True:
plot.axvspan(old, maxx, color='0.85', alpha=0.5)
second = compare[1]
############################## 75 ####################################
asel = compcol[(compcol >= 75) & (compcol < 83)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 75-83% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(75)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(75)))
patchy = patches.Rectangle((begpos, ncomp),endpos-begpos, 1)
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 83 ####################################
asel = compcol[(compcol >= 83) & (compcol < 90)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 83-90% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(83)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(83)))
patchy = patches.Rectangle((begpos, ncomp),endpos-begpos, 1)
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 90 #############################################
asel = compcol[(compcol >= 90) & (compcol < 95)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 90-95% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(90)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(90)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 95 #############################################
# these first, then the ones that are the same
asel = compcol[compcol >= 95]
samies = list(asel.index)
singletons = set(samies)
# let's plot these high values first
# start with a boolean approach to identify long fragments of similarity
if len(samies) > 0:
print("processing 95% similar and higher")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
keep_score = set([])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
########## this is for frequency plotting later
testbeg = begpos%bottompanel
if testbeg == 0:
shift = 0
else:
shift = bottompanel-testbeg
actualbeg = begpos + shift
#end
testend = endpos%bottompanel
shift = bottompanel-testend
actualend = endpos + shift
# now get the stuff
stellar = [i for i in range(actualbeg, actualend, bottompanel) if i not in keep_score]
newvals = | pd.Series(stellar) | pandas.Series |
import glob
import math
import uuid
from enum import Enum
from typing import Union, Optional, Tuple, Iterable, List, Dict
import numpy as np
import pandas as pd
import os
import ray
Data = Union[str, List[str], np.ndarray, pd.DataFrame, pd.Series]
class RayFileType(Enum):
CSV = 1
PARQUET = 2
class RayShardingMode(Enum):
INTERLEAVED = 1
BATCH = 2
class _RayDMatrixLoader:
def __init__(self,
data: Data,
label: Optional[Data] = None,
filetype: Optional[RayFileType] = None,
ignore: Optional[List[str]] = None,
**kwargs):
self.data = data
self.label = label
self.filetype = filetype
self.ignore = ignore
self.kwargs = kwargs
if isinstance(data, str):
if not self.filetype:
# Try to guess filetype from file ending
if data.endswith(".csv"):
self.filetype = RayFileType.CSV
elif data.endswith(".parquet"):
self.filetype = RayFileType.PARQUET
else:
raise ValueError(
"File or stream specified as data source, but "
"filetype could not be detected. "
"\nFIX THIS by passing] "
"the `filetype` parameter to the RayDMatrix. Use the "
"`RayFileType` enum for this.")
def _split_dataframe(self, local_data: pd.DataFrame) -> \
Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
"""Split dataframe into `features`, `labels`"""
if self.label is not None:
if isinstance(self.label, str):
x = local_data[local_data.columns.difference([self.label])]
y = local_data[self.label]
else:
x = local_data
if not isinstance(self.label, pd.DataFrame):
y = pd.DataFrame(self.label)
else:
y = self.label
return x, y
return local_data, None
def _load_data_numpy(self, data: Data):
return pd.DataFrame(
data, columns=[f"f{i}" for i in range(data.shape[1])])
def _load_data_pandas(self, data: Data):
return data
def _load_data_csv(self, data: Data):
if isinstance(data, Iterable) and not isinstance(data, str):
return pd.concat([
| pd.read_csv(data_source, **self.kwargs) | pandas.read_csv |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted( | zip(*arrays) | pandas.compat.zip |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [ | Timedelta("1 days") | pandas.Timedelta |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": | pandas.StringDtype() | pandas.StringDtype |
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import datetime as dt
import urllib.request, json
import os
import numpy as np
import tensorflow as tf # This code has been tested with TensorFlow 1.6
from sklearn.preprocessing import MinMaxScaler
import sys
ds = None
if (len(sys.argv) < 2):
ds = 'kaggle'
elif (len(sys.argv) == 2):
if ((sys.argv[1] == "-k") or (sys.argv[1] == '--kaggle')):
ds = 'kaggle'
elif ((sys.argv[1] == "-a") or (sys.argv[1] == '--alphavantage')):
ds = 'alphavantage'
else:
exit(1)
#print (ds)
data_source = ds # alphavantage or kaggle
if data_source == 'alphavantage':
# ====================== Loading Data from Alpha Vantage ==================================
# TODO: read from file
api_key = '<KEY>'
# TODO: make kwarg
# American Airlines stock market prices
ticker = "AAL"
# JSON file with all the stock market data for AAL from the last 20 years
url_string = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&outputsize=full&apikey=%s"%(ticker,api_key)
# Save data to this file
file_to_save = 'stock_market_data-%s.csv'%ticker
# If you haven't already saved data,
# Go ahead and grab the data from the url
# And store date, low, high, volume, close, open values to a Pandas DataFrame
if not os.path.exists(file_to_save):
with urllib.request.urlopen(url_string) as url:
data = json.loads(url.read().decode())
# extract stock market data
data = data['Time Series (Daily)']
df = | pd.DataFrame(columns=['Date','Low','High','Close','Open']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
import os
import re
from sklearn.model_selection import train_test_split
import random
import scorecardpy as sc
# split train into train data and test data
# os.chdir(r'D:\GWU\Aihan\DATS 6103 Data Mining\Final Project\Code')
def split_data(inpath, target_name, test_size):
df = pd.read_csv(inpath)
y = df[target_name]
#x = df1.loc[:,df1.columns!='loan_default']
x=df.drop(target_name,axis=1)
# set a random seed for the data, so that we could get the same train and test set
random.seed(12345)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=1, stratify=y)
training = pd.concat([X_train, y_train], axis=1)
testing = pd.concat([X_test, y_test], axis=1)
return training, testing
class PreProcessing():
def __init__(self, df):
self.Title = "Preprocessing Start"
self.df = df
# checking the null value and drop the null value
def Null_value(self):
self.df.isnull().sum()
self.df_new = self.df.dropna()
return self.df_new
# convert the format of 'AVERAGE.ACCT.AGE' and 'CREDIT.HISTORY.LENGTH' from 'xyrs xmon' to numbers that represent month.
def find_number(self, text):
num = re.findall(r'[0-9]+',text)
return int(num[0])*12 + int(num[1])
def comvert_format(self, colname):
colname_new = self.df[colname].apply(lambda x: self.find_number(x))
self.df[colname] = colname_new
# convert categorical string to numbers
def convert_cate_to_num(self, colname_list):
for colname in colname_list:
self.df[colname] = self.df[colname].astype('category')
cat_columns = self.df.select_dtypes(['category']).columns
self.df[cat_columns] = self.df[cat_columns].apply(lambda x: x.cat.codes)
def format_date(self, colname_list):
for colname in colname_list:
self.df[colname] = pd.to_datetime(self.df[colname], format = "%d-%m-%y",infer_datetime_format=True)
def format_age_disbursal(self):
self.df['Date.of.Birth'] = self.df['Date.of.Birth'].where(self.df['Date.of.Birth'] < pd.Timestamp('now'),
self.df['Date.of.Birth'] - np.timedelta64(100, 'Y'))
self.df['Age'] = (pd.Timestamp('now') - self.df['Date.of.Birth']).astype('<m8[Y]').astype(int)
self.df['Disbursal_months'] = ((pd.Timestamp('now') - self.df['DisbursalDate']) / np.timedelta64(1, 'M')).astype(int)
def bin_cutpoint(self, target_name, colname_list):
for colname in colname_list:
bins_disbursed_amount = sc.woebin(self.df, y=target_name, x=[colname])
sc.woebin_plot(bins_disbursed_amount)
pd.concat(bins_disbursed_amount)
list_break = pd.concat(bins_disbursed_amount).breaks.astype('float').to_list()
list_break.insert(0, float('-inf'))
# list_break
self.df[colname] = | pd.cut(self.df[colname], list_break) | pandas.cut |
#! /usr/bin/env python3
import argparse
import json
import logging
import logging.config
import os
import sys
import time
from concurrent import futures
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import ServerSideExtension_pb2 as SSE
import grpc
from SSEData_churn import FunctionType, \
get_func_type
from ScriptEval_churn import ScriptEval
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# LOAD THE SCALER
scaler = joblib.load('Scaler/scaler.sav')
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the ARIMA example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
self.scriptEval = ScriptEval()
if not os.path.exists('logs'):
os.mkdir('logs')
logging.config.fileConfig('logger.config')
logging.info('Logging enabled')
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_churn'
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
"""
Implementation of added functions.
"""
@staticmethod
def _churn(request, context):
# Disable caching.
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# instantiate list
modelName = None
columnNames = None
dataList = []
for request_rows in request:
for row in request_rows.rows:
# model name - only grab once as it is repeated
if not modelName:
modelName = ''
modelName = 'Models/' + [d.strData for d in row.duals][0] + '.pkl'
# column names - only grab once as it is repeated
if not columnNames:
columnNames = ''
columnNames = [d.strData.replace('\\',' ').replace('[','').replace(']','') for d in row.duals][1]
columnNames = columnNames.split('|')
# pull duals from each row, and the strData from duals
data = [d.strData.split('|') for d in row.duals][2]
dataList.append(data)
churn_df = | pd.DataFrame(dataList) | pandas.DataFrame |
import boto3
import json
import pandas as pd
import numpy as np
import random
import re
import os
from global_variables import API_PARAMETERS_FILE
from global_variables import print_green, print_yellow, print_red
from global_variables import service_dict
from global_variables import default_feature_list
def read_api_calls_file():
with open(API_PARAMETERS_FILE, "r") as log_file:
data = json.load(log_file)
return data
def call_api_with_auto_generated_parameters(api, method, debug=True):
try:
client = boto3.client(api)
except ValueError as e:
print_red(str(e))
api_calls = read_api_calls_file()
# normal api calls
api_methods = json.loads(api_calls[api])
if debug:
print_green("Print list of methods belong to {0}".format(api))
for method in api_methods:
print(method)
response = None
try:
parameters = api_methods[method]
valued_params = {}
for parm in parameters:
valued_params[parm] = "random-value"
callable_api = getattr(client, method)
print_yellow(valued_params)
response = callable_api(**valued_params)
if debug:
print_yellow("Response after calling {0}.{1}:{2}".format(api,method,response))
except ValueError as e:
print_red(str(e))
return response
def assign_random_user():
"""
75% user with temporary credential
25% root
"""
rand = random.randint(4)
if rand >= 1:
user = "AssumedRole"
else:
user = "Root"
return user
def generate_api_calls_based_on_aws_cloudtrail_logs(log_file, number_of_replication, produce_random_sample=False):
"""
NOT TO USE THIS FUNCTION
"""
# read the log file
collected_logs = pd.read_csv(log_file)
columns = collected_logs.columns.tolist()
# first col: method, second api:request parameters
for log in collected_logs:
print(log)
for i in range(number_of_replication):
user = assign_random_user()
# region = assign_random_region()
for feature in columns:
call_api_with_auto_generated_parameters()
class AWSLogAnalyzer(object):
def __init__(self, log_file, log_dir, read_all_file=False):
self.log_file = log_dir + log_file
if read_all_file:
self.log_df = | pd.read_csv(log_dir + log_file) | pandas.read_csv |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
import datetime
date_types = (
pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
_isdate = lambda x: isinstance(x, date_types)
SPAN = 2 / 3.
ALPHA = 0.05 # significance level for confidence interval
def _snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def _plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(_snakify, summary_names))
# TODO: indexing w/ data frame is messing everything up
fittedvalues = df['predicted_value'].values
predict_mean_ci_low = df['mean_ci_95%_low'].values
predict_mean_ci_upp = df['mean_ci_95%_upp'].values
predict_ci_low = df['predict_ci_95%_low'].values
predict_ci_upp = df['predict_ci_95%_upp'].values
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, fittedvalues, predict_mean_ci_low, predict_mean_ci_upp
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y)
y2 = | pd.Series(upper * std + y) | pandas.Series |
from datetime import datetime
import pandas as pd
import pytest
def test_params_1():
d1 = {
"PIDN": [1, 1, 3],
"DCDate": [datetime(2001, 3, 2), datetime(2001, 3, 2), datetime(2001, 8, 1)],
"Col1": [7, 7, 9],
}
primary = | pd.DataFrame(data=d1) | pandas.DataFrame |
import csv
import os
import pandas as pd
import math
import numpy as np
POIEdges = {'Sathorn_Thai_1': ['L197#1', 'L197#2'],
'Sathorn_Thai_2': ['L30', 'L58#1', 'L58#2'],
'Charoenkrung_1': ['L30032'],
'Charoenkrung_2': ['L60', 'L73', 'L10149#1', 'L10149#2'],
'Charoenkrung_3': ['L67'],
'Silom_1': ['L138'],
'Silom_2': ['L133.25'],
'Silom_3': ['L49'],
'Mehasak': ['L64'],
'Surasak': ['L10130', 'L10189'],
'Charoen_Rat': ['L40']
}
percentage = ['100%','1%','5%','10%','15%','20%','25%','30%','35%','40%','45%','50%']
resolution = ['1','5','10','15','20','25','30','35','40','45','50','55','60']
def createFileForMean(fileNo):
# to get the current directory
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
os.mkdir(dirpath + '/' + str(fileNo) + '/statistics')
for time_resolution in resolution:
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
#print(len(percentage))
heading = ["Road Name",*percentage]
writer1.writerow(heading)
myfile1.close()
def createFileForStd(fileNo):
# to get the current directory
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for freq in resolution:
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(freq) + '_std.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
# print(len(percentage))
heading = ["Road Name", *percentage]
writer1.writerow(heading)
myfile1.close()
def parseFloat(str):
try:
return float(str)
except:
str = str.strip()
if str.endswith("%"):
return float(str.strip("%").strip()) / 100
raise Exception("Don't know how to parse %s" % str)
def statisticsForResolution_And_Percentage(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
###############################################
#https://www.geeksforgeeks.org/program- mplement-standard-error-mean/
# arr[] = {78.53, 79.62, 80.25, 81.05, 83.21, 83.46}
# mean = (78.53 + 79.62 + 80.25 + 81.05 + 83.21 + 83.46) / 6
# = 486.12 / 6
# = 81.02
# Sample Standard deviation = sqrt((78.53 – 81.02)
# 2 + (79.62 - 81.02)
# 2 + ...
# + (83.46 – 81.02)
# 2 / (6 – 1))
# = sqrt(19.5036 / 5)
# = 1.97502
# Standard error of mean = 1.97502 / sqrt(6)
# = 0.8063
###############################################
for time_resolution in resolution:
for edge, value in POIEdges.items():
meanSpeed = []
std = []
for pcent in percentage:
path = dirpath + '/'+str(fileNo)+'/' + edge + '_' + time_resolution + '_' + pcent + '.csv'
if (os.path.exists(path)):
link_df = pd.read_csv(path)
meanSpeed.append(link_df['Mean Speed (km/h)'].mean())
std.append(link_df['Mean Speed (km/h)'].std())
myfile = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow([edge, *meanSpeed])
myfile.close()
myfile = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow([edge, *std])
myfile.close()
def stasticsforLoop(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for time_resolution in resolution:
###################for standard deviation###############################
temp_1 =[]
temp_1.append('Mean of standard deviation for all edges')
temp_2 = []
temp_2.append('Standard Error of Mean')
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv'
if (os.path.exists(path)):
road_df = pd.read_csv(path)
for column in (list(road_df)):
if column !='Road Name':
#print('Number of roads :', len(road_df))
temp_1.append(road_df[column].sum()/len(road_df)) # Mean of standard deviation for all edges
percent = parseFloat(column)
#print(time_resolution,column, road_df[column].sum())
#print(len(road_df))
temp_2.append((road_df[column].sum()/len(road_df))/math.sqrt(percent))# standard error of mean for all edges
road_df.loc[len(road_df)] = temp_1
road_df.loc[len(road_df)+1] = temp_2
road_df.to_csv(dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv',
index=False)
##################for mean ###############################
temp = []
temp.append('Mean Speed for all edges')
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv'
if (os.path.exists(path)):
road_df = pd.read_csv(path)
for column in (list(road_df)):
if column != 'Road Name':
temp.append(road_df[column].sum() / len(road_df))
road_df.loc[len(road_df)] = temp
road_df.to_csv(dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv',
index=False)
def readTotal(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
####################for mean ###########################################
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/All_mean.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time Resolution",*percentage]
writer1.writerow(heading)
for time_resolution in resolution:
temp =[]
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv'
if (os.path.exists(path)):
time_df = pd.read_csv(path)
temp = time_df.iloc[-1,:].values.tolist()
#print(temp)
temp.pop(0)
writer1.writerow([time_resolution,*temp])
myfile1.close()
##################for standard deviation #################################
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/All_std.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time Resolution", *percentage]
writer1.writerow(heading)
for time_resolution in resolution:
temp = []
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv'
if (os.path.exists(path)):
time_df = pd.read_csv(path)
temp = time_df.iloc[-2,:].values.tolist()
temp.pop(0)
writer1.writerow([time_resolution,*temp])
myfile1.close()
###################for standard error of mean################################################
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/All_stdError.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time Resolution", *percentage]
writer1.writerow(heading)
for time_resolution in resolution:
temp = []
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv'
if (os.path.exists(path)):
time_df = pd.read_csv(path)
temp = time_df.iloc[-1, :].values.tolist()
temp.pop(0)
writer1.writerow([time_resolution, *temp])
myfile1.close()
def plotAllMean(fileNo):
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
path = dirpath + '/'+str(fileNo)+'/statistics/All_mean.csv'
if (os.path.exists(path) ):
mean_all_df = pd.read_csv(path)
colors = ['#CB4335', '#808000', '#616A6B', '#009E73', '#ABB2B9', '#E69F00', '#800000']
#percentage.pop(0)
names = percentage
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
mean_df = mean_all_df.set_index("Time Resolution")
ax = mean_df[[*names]].plot(kind='bar', figsize=(15, 10), legend=True, color=colors)
ax.set_xlabel("Time resolution", fontsize=20, fontname='Times New Roman')
ax.set_ylabel("Mean", fontsize=20, fontname='Times New Roman')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'family': 'Times New Roman', 'size': 20})
# plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname='Times New Roman', rotation=0)
plt.yticks(fontsize=20, fontname='Times New Roman')
# plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(
dirpath + '/'+str(fileNo)+'/statistics/All_mean.png',
width=1800, height=500)
def plotAllStd(fileNo):
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
path = dirpath + '/'+str(fileNo)+'/statistics/All_std.csv'
if (os.path.exists(path)):
std_all_df = pd.read_csv(path)
#colors = ['#E69F00', '#56B4E9', '#F0E442', '#009E73', '#D55E00', '#2E4053', '#0000FF']
colors = ['#CB4335', '#808000', '#616A6B', '#009E73', '#ABB2B9', '#E69F00', '#0000FF']
#percentage.pop(0)
names = percentage
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
df = std_all_df.set_index("Time Resolution")
ax = df[[*names]].plot(kind='bar', figsize=(15, 10), legend=True, color=colors)
ax.set_xlabel("Time resolution", fontsize=20, fontname='Times New Roman')
ax.set_ylabel("Standard deviation", fontsize=20, fontname='Times New Roman')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'family': 'Times New Roman', 'size': 20})
# plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname='Times New Roman', rotation=0)
plt.yticks(fontsize=20, fontname='Times New Roman')
# plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(
dirpath + '/'+str(fileNo)+'/statistics/All_std.png',
width=1800, height=500)
def plotAllStandardErrorOfMean(fileNo):
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
path = dirpath + '/'+str(fileNo)+'/statistics/All_stdError.csv'
if (os.path.exists(path)):
stderror_all_df = pd.read_csv(path)
#colors = ['#E69F00', '#56B4E9', '#F0E442', '#009E73', '#D55E00', '#2E4053', '#0000FF']
#colors = ['#CB4335', '#808000', '#616A6B', '#009E73', '#ABB2B9', '#E69F00', '#0000FF']
colors = ['#FFA07A', '#FF7F50', '#FFFFE0', '#98FB98', '#E0FFFF', '#B0E0E6', '#E6E6FA', '#FFC0CB',
'#F0FFF0', '#DCDCDC', '#8B0000', '#FF8C00', '#FFFF00', '#6B8E23', '#008080',
'#483D8B', '#4B0082', '#C71585', '#000000', '#800000']
names = percentage
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
df = stderror_all_df.set_index("Time Resolution")
ax = df[[*names]].plot(kind='bar',figsize=(15, 10), legend=True, color=colors)
ax.set_xlabel("Time Resolution", fontsize=20, fontname ='Times New Roman')
ax.set_ylabel("Standard Error of Mean", fontsize=20, fontname ='Times New Roman')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),prop={'family' :'Times New Roman','size':20})
#plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname = 'Times New Roman',rotation=0)
plt.yticks(fontsize=20,fontname = 'Times New Roman')
#plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(
dirpath + '/'+str(fileNo)+'/statistics/All_stdError.png',
width=1800, height=500)
def plotLineChart_AllStandardErrorOfMean(fileNo):
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
path = dirpath + '/'+str(fileNo)+'/statistics/All_stdError.csv'
if (os.path.exists(path)):
stderror_all_df = pd.read_csv(path)
#colors = ['#E69F00', '#56B4E9', '#F0E442', '#009E73', '#D55E00', '#2E4053', '#0000FF']
#colors = ['#CB4335', '#808000', '#616A6B', '#009E73', '#ABB2B9', '#E69F00', '#0000FF']
colors =['#FFA07A','#FF7F50','#FFFFE0','#98FB98','#E0FFFF','#B0E0E6','#E6E6FA','#FFC0CB',
'#F0FFF0','#DCDCDC','#8B0000','#FF8C00', '#FFFF00', '#6B8E23', '#008080',
'#483D8B', '#4B0082', '#C71585', '#000000', '#800000']
names = percentage
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
df = stderror_all_df.set_index("Time Resolution")
ax = df[[*names]].plot(kind='line',figsize=(15, 10), legend=True, color=colors)
ax.set_xlabel("Time Resolution", fontsize=20, fontname ='Times New Roman')
ax.set_ylabel("Standard Error of Mean", fontsize=20, fontname ='Times New Roman')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),prop={'family' :'Times New Roman','size':20})
#plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname = 'Times New Roman',rotation=0)
plt.yticks(fontsize=20,fontname = 'Times New Roman')
#plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(
dirpath + '/'+str(fileNo)+'/statistics/All_stdError_Line.png',
width=1800, height=500)
################################################################
def diff(first, second):
second = set(second)
return [item for item in first if item not in second]
# def detectionMetric():
# dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
# resolution = ['60']
#
# myfile1 = open(
# dirpath + '/'+str(fileNo)+'/statistics/detectionMetric.csv', 'w', newline='')
# writer1 = csv.writer(myfile1)
# # print(len(percentage))
# heading = ["Samples","NDPG","NTPG","NPFA","NTPA","DR_PG","FAR_PG","NDG","NTG","NFA","NTA","DR_G","FAR_G"]
# writer1.writerow(heading)
#
# #############################
# #NTPG = Number of total potential gridlock
# #NDPG = Number of detected potential gridlock
# #NTPA = Number of total potential gridlock alarm
# #NPFA = Number of potential gridlock false alarm
# #DR_PG = Detected rate of potential gridlock
# #FAR_PG = False alarm rate of potential gridlock
#
# # NTG = Number of total gridlock
# # NDG = Number of detected gridlock
# # NTA = Number of total gridlock alarm
# # NFA = Number of gridlock false alarm
# # DR_G = Detected rate of gridlock
# # FAR_G = False alarm rate of gridlock
# #############################
#
# actual_data = pd.read_csv(dirpath + '/'+str(fileNo)+'/cluster/60_100%_AllCluster.csv')
#
#
# NTPG_list = actual_data[actual_data['Gridlock']==1]
# #print(NTPG_list)
# NTPG = len(NTPG_list)
# actual_potential_Index = actual_data.query('Gridlock == 1').index.tolist()
#
#
# NTG_list = actual_data[actual_data['Gridlock']==2]
# NTG = len(NTG_list)
# actual_gridlock_Index = actual_data.query('Gridlock == 2').index.tolist()
#
#
#
# for time_resolution in resolution:
# #percentage.pop(0)
# #print(percentage)
#
# for pcent in percentage:
# predicted_data = pd.read_csv(dirpath + '/'+str(fileNo)+'/cluster/' + time_resolution + '_' + pcent + '_AllCluster.csv')
#
# ####################### for potential gridlock ####################
# predicted_potential_Index = predicted_data.query('Gridlock == 1').index.tolist()
# NDPG = len(set(actual_potential_Index) & set(predicted_potential_Index))
# NTPA = len(predicted_potential_Index)
# NPFA = len(diff(predicted_potential_Index,actual_potential_Index))
#
# DR_PG = 0
# FAR_PG = 0
#
# if NTPG !=0 and NTPA !=0:
# DR_PG = (NDPG/NTPG) *100
# FAR_PG = (NPFA/NTPA) *100
#
#
# #####################for gridlock #################################
# predicted_gridlock_Index = predicted_data.query('Gridlock == 2').index.tolist()
# NDG = len(set(actual_gridlock_Index) & set(predicted_gridlock_Index))
# NTA = len(predicted_gridlock_Index)
# NFA = len(diff(predicted_gridlock_Index, actual_gridlock_Index))
#
# DR_G = 0
# FAR_G = 0
#
# if NTG != 0 and NTA != 0:
# DR_G = (NDG / NTG) * 100
# FAR_G = (NFA / NTA) * 100#
# ##################writing file#####################
# writer1.writerow([pcent,NDPG,NTPG,NPFA,NTPA,DR_PG,FAR_PG,NDG,NTG,NFA,NTA,DR_G,FAR_G])
#
# myfile1.close()
def detectionMetricForEachLabel(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
resolution = ['60'] # To evaluate gridlock detection, I use only 60s time resoultion with various sample size
for label in range(5):
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/detectionMetricForLabel_'+str(label+1)+'.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
# print(len(percentage))
heading = ["Samples","ND_"+str(label+1),"NT_"+str(label+1),"NFA_"+str(label+1),"NTA_"+str(label+1),"DR_"+str(label+1),"FAR_"+str(label+1)]
writer1.writerow(heading)
############################################
# NT = Number of total respective level
# ND = Number of detected respective level
# NTA = Number of total respective alarm
# NFA = Number of respective false alarm
# DR = Detected rate of respective level
# FAR = False alarm rate of respective level
############################################
actual_data = pd.read_csv(dirpath + '/'+str(fileNo)+'/cluster/60_100%_AllCluster.csv')
NT_list = actual_data[actual_data['Gridlock']==(label+1)]
NT = len(NT_list)
actual_Index = actual_data[actual_data['Gridlock'] ==(label+1)].index.tolist()
for time_resolution in resolution:
for pcent in percentage:
predicted_data = pd.read_csv(dirpath + '/'+str(fileNo)+'/cluster/' + time_resolution + '_' + pcent + '_AllCluster.csv')
#################################################
predicted_Index = predicted_data[predicted_data['Gridlock'] ==(label+1)].index.tolist()
ND = len(set(actual_Index) & set(predicted_Index))
NTA = len(predicted_Index)
NFA = len(diff(predicted_Index,actual_Index))
DR = 0
FAR = 0
if NT!=0 and NTA !=0:
DR = (ND/NT) *100
FAR = (NFA/NTA) *100
##################writing file####################
writer1.writerow([pcent,ND,NT,NFA,NTA,DR,FAR])
myfile1.close()
def plotDetectionRate(fileNo):
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for label in range(5):
path = dirpath + '/'+str(fileNo)+'/statistics/detectionMetricForLabel_'+str(label+1)+'.csv'
if (os.path.exists(path)):
all_df = pd.read_csv(path)
#colors = ['#616A6B','#ABB2B9']
colors = ['#616A6B']
#names = ["DR_PG", "DR_G"]
names = ["DR_"+str(label+1)]
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
df = all_df.set_index("Samples")
ax = df[[*names]].plot(kind='bar',figsize=(15, 10), legend=True, color=colors, width =0.8)
ax.set_xlabel("Data Samples", fontsize=20, fontname ='Times New Roman')
ax.set_ylabel("Detection Rate (%)", fontsize=20, fontname ='Times New Roman')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),prop={'family' :'Times New Roman','size':20})
################################################################################
#https://robertmitchellv.com/blog-bar-chart-annotations-pandas-mpl.html
# create a list to collect the plt.patches data
totals = []
# find the values and append to list
for i in ax.patches:
totals.append(i.get_height())
# set individual bar lables using above list
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
ax.text(i.get_x() - .001, i.get_height() + .5, \
str(int(round((i.get_height())))) + '%', fontsize=10,
color='red')
################################################################################
#plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname = 'Times New Roman',rotation=0)
plt.yticks(fontsize=20,fontname = 'Times New Roman')
#plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(
dirpath + '/'+str(fileNo)+'/statistics/DetectionRate_'+str(label+1)+'_.png',
width=1800, height=500)
def plotFalseAlarmRate(fileNo):
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for label in range(5):
# print('False Alarm rate for class label :'+str(label+1))
path = dirpath + '/'+str(fileNo)+'/statistics/detectionMetricForLabel_'+str(label+1)+'.csv'
if (os.path.exists(path)):
all_df = pd.read_csv(path)
#colors = ['#009E73', '#E69F00']
colors = ['#009E73']
#names = ["FAR_PG","FAR_G"]
names = ["FAR_"+str(label+1)]
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
df = all_df.set_index("Samples")
ax = df[[*names]].plot(kind='bar',figsize=(15, 10), legend=True, color=colors, width =0.8)
ax.set_xlabel("Data Samples", fontsize=20, fontname ='Times New Roman')
ax.set_ylabel("False Alarm Rate (%)", fontsize=20, fontname ='Times New Roman')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),prop={'family' :'Times New Roman','size':20})
################################################################################
#https://robertmitchellv.com/blog-bar-chart-annotations-pandas-mpl.html
# create a list to collect the plt.patches data
totals = []
# find the values and append to list
for i in ax.patches:
totals.append(i.get_height())
# set individual bar lables using above list
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
ax.text(i.get_x() - .001, i.get_height() + .5, \
str(int(round((i.get_height())))) + '%', fontsize=10,
color='red')
################################################################################
#plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname = 'Times New Roman',rotation=0)
plt.yticks(fontsize=20,fontname = 'Times New Roman')
#plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(
dirpath + '/'+str(fileNo)+'/statistics/FalseAlarmRate_'+str(label+1)+'_.png',
width=1800, height=500)
#========================================================================================
def StandardErrorofMeanforEachEdge_TimeResolutionFileIndex(): # In my dataset, I used edge and road interchangeably, but both are same.
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for time_resolution in resolution:
temp_df = pd.DataFrame()
temp_df['Road Name']= POIEdges.keys()
for pcent in percentage:
temp_1 = list()
for edge, value in POIEdges.items():
percent = parseFloat(pcent)
temp_2 = list()
for fileNo in range(1, 101):
path = dirpath + '/' + str(fileNo) + '/statistics/' + time_resolution + '_std.csv'
if (os.path.exists(path)):
df = pd.read_csv(path)
#print(df[df['Road Name'] == edge][pcent].values[0])
temp_2.append(df[df['Road Name'] == edge][pcent].values/math.sqrt(percent)) # standard error of mean of speed of vehicles on this edge
temp_1.append(np.mean(temp_2))
temp_df[pcent] = temp_1
temp_df.to_csv(dirpath + '/StandardErrorOfMeanForAllEdges/' + str(time_resolution) + '_std_ForAllEdges.csv',
index=False)
def plotStandardErrorOfMeanForEachLink_TimeResolutionFileIndex():
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID/'
print(dirpath)
percentage.pop(0)
for time_resoultion in resolution:
path = dirpath + '/StandardErrorOfMeanForAllEdges/StandardErrorOfMeanForEachLink_TimeResolutionFileIndex/'+str(time_resoultion)+'_std_ForAllEdges.csv'
if (os.path.exists(path)):
stderror_all_df = pd.read_csv(path)
colors = ['#CD5C5C', '#808080', '#808000', '#0000FF', '#000080', '#BDB76B', '#3CB371', '#D8BFD8']
names = percentage
#https://stackoverflow.com/questions/29498652/plot-bar-graph-from-pandas-dataframe
df = stderror_all_df.set_index("Road Name")
#print(df)
ax = df[[*names]].plot(kind='bar',figsize=(15, 10), legend=True, color=colors)
ax.set_xlabel("Link Name", fontsize=20, fontname ='Times New Roman')
ax.set_ylabel("Standard Error of Mean", fontsize=20, fontname ='Times New Roman')
ax.legend(title = "Samples", loc='center left', bbox_to_anchor=(1, 0.5),prop={'family' :'Times New Roman','size':20})
#plt.legend(prop={'family' :'Times New Roman'})
plt.xticks(fontsize=20, fontname = 'Times New Roman',rotation=90)
plt.yticks(fontsize=20,fontname = 'Times New Roman')
plt.title('Standard Error of Mean Speed of Vehicles on each Link with time resoultion '+time_resoultion +'s',fontsize=20, fontname='Times New Roman')
#plt.rcParams.update({'font.family':'Times New Roman'})
plt.tight_layout()
plt.savefig(dirpath + '/StandardErrorOfMeanForAllEdges/' + str(time_resoultion) + '_std_ForAllEdges.png',
width=1800, height=500)
def StandardErrorofMeanforEachEdge_SamplesFileIndex(): # In my dataset, I used edge and road interchangeably, but both are same.
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for pcent in percentage:
temp_df = pd.DataFrame()
temp_df['Road Name']= POIEdges.keys()
for time_resolution in resolution:
temp_1 = list()
for edge, value in POIEdges.items():
percent = parseFloat(pcent)
temp_2 = list()
for fileNo in range(1, 101):
path = dirpath + '/' + str(fileNo) + '/statistics/' + time_resolution + '_std.csv'
if (os.path.exists(path)):
df = pd.read_csv(path)
#print(df[df['Road Name'] == edge][pcent].values[0])
temp_2.append(df[df['Road Name'] == edge][pcent].values/math.sqrt(percent)) # standard error of mean of speed of vehicles on this edge
temp_1.append(np.mean(temp_2))
temp_df[time_resolution] = temp_1
temp_df.to_csv(dirpath + '/StandardErrorOfMeanForAllEdges/' + str(pcent) + '_std_ForAllEdges.csv',
index=False)
def plotStandardErrorOfMeanForEachLink_SamplesFileIndex():
import pandas as pd
import matplotlib.pyplot as plt
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID/'
percentage.insert(0,'100%')
for pcent in percentage:
path = dirpath + '/StandardErrorOfMeanForAllEdges/StandardErrorOfMeanForEachLink_SamplesFileIndex/'+pcent+'_std_ForAllEdges - Copy.csv'
if (os.path.exists(path)):
print(percentage)
stderror_all_df = | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `hotelling` package."""
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
from hotelling.stats import hotelling_t2
def test_hotelling_test_array_two_sample():
x = np.asarray([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]])
y = np.asarray([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205, 42]])
res = hotelling_t2(x, y)
assert round(res[0], 4) == 11.1037 # T2
assert round(res[1], 4) == 2.7759 # F
assert round(res[2], 5) == 0.15004 # p value
def test_hotelling_test_df_two_sample():
x = pd.DataFrame([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]])
y = pd.DataFrame([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205, 42]])
res = hotelling_t2(x, y)
assert round(res[0], 4) == 11.1037 # T2
assert round(res[1], 4) == 2.7759 # F
assert round(res[2], 5) == 0.15004 # p value
def test_hotelling_test_df_two_sample_no_bessel():
x = pd.DataFrame([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]])
y = pd.DataFrame([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205, 42]])
res = hotelling_t2(x, y, bessel=False)
assert round(res[0], 4) == 11.1037 # T2
assert round(res[1], 4) == 2.2207 # F
assert round(res[2], 5) == 0.17337
def test_nutrients_data_integrity_means_procedure():
df = pd.read_csv('data/nutrient.txt', delimiter=' ', skipinitialspace=True, index_col=0)
res = df.describe().T
assert (res['count'] == [737, 737, 737, 737, 737]).all()
# mean
assert_series_equal(res['mean'],
pd.Series([624.0492537, 11.1298996, 65.8034410, 839.6353460, 78.9284464],
name='mean',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
# for the next one, SAS displays 1633.54 for 'a' - that is an error, inconsistent. everything else is 7 digits
# standard deviation
assert_series_equal(res['std'],
pd.Series([397.2775401, 5.9841905, 30.5757564, 1633.5398283, 73.5952721],
name='std',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
# min
assert_series_equal(res['min'],
pd.Series([7.4400000, 0, 0, 0, 0],
name='min',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
# max
assert_series_equal(res['max'],
pd.Series([2866.44, 58.6680000, 251.0120000, 34434.27, 433.3390000],
name='max',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
def test_nutrient_data_corr_procedure():
df = pd.read_csv('data/nutrient.txt', delimiter=' ', skipinitialspace=True, index_col=0)
# Covariance matrix
cov = df.cov()
assert_frame_equal(cov,
pd.DataFrame([[157829.444, 940.089, 6075.816, 102411.127, 6701.616],
[940.089, 35.811, 114.058, 2383.153, 137.672],
[6075.816, 114.058, 934.877, 7330.052, 477.200],
[102411.127, 2383.153, 7330.052, 2668452.371, 22063.249],
[6701.616, 137.672, 477.200, 22063.249, 5416.264]
],
index= | pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object') | pandas.Index |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import datetime
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_df= | pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') | pandas.read_csv |
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
class TestConcatSort:
def test_concat_sorts_columns(self, sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(self, sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]},
index=["a", "b", "c"],
columns=["a", "b"],
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(self, sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame(
{"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"]
)
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort(self):
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat(
[df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True
)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise(self):
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = | pd.date_range('2010-01-01', periods=2, freq='m') | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Module containing utility constants, functions and classes.
"""
# Standard packages
import math
import logging
import numpy as np
import pandas as pd
from copy import copy
from typing import Union
from scipy import sparse
from shap import Explainer
from functools import wraps
from collections import deque
from datetime import datetime
from inspect import signature
from collections.abc import MutableMapping
from sklearn.preprocessing import (
StandardScaler,
MinMaxScaler,
MaxAbsScaler,
RobustScaler,
)
from sklearn.ensemble import IsolationForest
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from sklearn.cluster import DBSCAN, OPTICS
from sklearn.utils import _print_elapsed_time
# Encoders
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.basen import BaseNEncoder
from category_encoders.binary import BinaryEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.polynomial import PolynomialEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
# Balancers
from imblearn.under_sampling import (
CondensedNearestNeighbour,
EditedNearestNeighbours,
RepeatedEditedNearestNeighbours,
AllKNN,
InstanceHardnessThreshold,
NearMiss,
NeighbourhoodCleaningRule,
OneSidedSelection,
RandomUnderSampler,
TomekLinks,
)
from imblearn.over_sampling import (
ADASYN,
BorderlineSMOTE,
KMeansSMOTE,
RandomOverSampler,
SMOTE,
SMOTENC,
SMOTEN,
SVMSMOTE,
)
from imblearn.combine import SMOTEENN, SMOTETomek
# Sklearn
from sklearn.metrics import (
SCORERS,
make_scorer,
confusion_matrix,
matthews_corrcoef,
)
from sklearn.utils import _safe_indexing
from sklearn.inspection._partial_dependence import (
_grid_from_X,
_partial_dependence_brute,
)
# Plotting
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Global constants ================================================= >>
SEQUENCE = (list, tuple, np.ndarray, pd.Series)
# Variable types
SCALAR = Union[int, float]
SEQUENCE_TYPES = Union[SEQUENCE]
X_TYPES = Union[iter, dict, list, tuple, np.ndarray, sparse.spmatrix, pd.DataFrame]
Y_TYPES = Union[int, str, SEQUENCE_TYPES]
# Non-sklearn models
OPTIONAL_PACKAGES = dict(XGB="xgboost", LGB="lightgbm", CatB="catboost")
# Attributes shared betwen atom and a pd.DataFrame
DF_ATTRS = (
"size",
"head",
"tail",
"loc",
"iloc",
"describe",
"iterrows",
"dtypes",
"at",
"iat",
)
# List of available distributions
DISTRIBUTIONS = (
"beta",
"expon",
"gamma",
"invgauss",
"lognorm",
"norm",
"pearson3",
"triang",
"uniform",
"weibull_min",
"weibull_max",
)
# List of custom metrics for the evaluate method
CUSTOM_METRICS = (
"cm",
"tn",
"fp",
"fn",
"tp",
"lift",
"fpr",
"tpr",
"fnr",
"tnr",
"sup",
)
# Acronyms for some common scorers
SCORERS_ACRONYMS = dict(
ap="average_precision",
ba="balanced_accuracy",
auc="roc_auc",
logloss="neg_log_loss",
ev="explained_variance",
me="max_error",
mae="neg_mean_absolute_error",
mse="neg_mean_squared_error",
rmse="neg_root_mean_squared_error",
msle="neg_mean_squared_log_error",
mape="neg_mean_absolute_percentage_error",
medae="neg_median_absolute_error",
poisson="neg_mean_poisson_deviance",
gamma="neg_mean_gamma_deviance",
)
# All available scaling strategies
SCALING_STRATS = dict(
standard=StandardScaler,
minmax=MinMaxScaler,
maxabs=MaxAbsScaler,
robust=RobustScaler,
)
# All available encoding strategies
ENCODING_STRATS = dict(
backwarddifference=BackwardDifferenceEncoder,
basen=BaseNEncoder,
binary=BinaryEncoder,
catboost=CatBoostEncoder,
# hashing=HashingEncoder,
helmert=HelmertEncoder,
jamesstein=JamesSteinEncoder,
leaveoneout=LeaveOneOutEncoder,
mestimate=MEstimateEncoder,
# onehot=OneHotEncoder,
ordinal=OrdinalEncoder,
polynomial=PolynomialEncoder,
sum=SumEncoder,
target=TargetEncoder,
woe=WOEEncoder,
)
# All available pruning strategies
PRUNING_STRATS = dict(
iforest=IsolationForest,
ee=EllipticEnvelope,
lof=LocalOutlierFactor,
svm=OneClassSVM,
dbscan=DBSCAN,
optics=OPTICS,
)
# All available balancing strategies
BALANCING_STRATS = dict(
# clustercentroids=ClusterCentroids,
condensednearestneighbour=CondensedNearestNeighbour,
editednearestneighborus=EditedNearestNeighbours,
repeatededitednearestneighbours=RepeatedEditedNearestNeighbours,
allknn=AllKNN,
instancehardnessthreshold=InstanceHardnessThreshold,
nearmiss=NearMiss,
neighbourhoodcleaningrule=NeighbourhoodCleaningRule,
onesidedselection=OneSidedSelection,
randomundersampler=RandomUnderSampler,
tomeklinks=TomekLinks,
randomoversampler=RandomOverSampler,
smote=SMOTE,
smotenc=SMOTENC,
smoten=SMOTEN,
adasyn=ADASYN,
borderlinesmote=BorderlineSMOTE,
kmanssmote=KMeansSMOTE,
svmsmote=SVMSMOTE,
smoteenn=SMOTEENN,
smotetomek=SMOTETomek,
)
# Functions ======================================================== >>
def flt(item):
"""Utility to reduce sequences with just one item."""
if isinstance(item, SEQUENCE) and len(item) == 1:
return item[0]
else:
return item
def lst(item):
"""Utility used to make sure an item is iterable."""
if isinstance(item, (dict, CustomDict, *SEQUENCE)):
return item
else:
return [item]
def it(item):
"""Utility to convert rounded floats to int."""
try:
is_equal = int(item) == float(item)
except ValueError: # Item may not be numerical
return item
return int(item) if is_equal else float(item)
def divide(a, b):
"""Divide two numbers and return 0 if division by zero."""
return np.divide(a, b) if b != 0 else 0
def merge(X, y):
"""Merge a pd.DataFrame and pd.Series into one dataframe."""
return X.merge(y.to_frame(), left_index=True, right_index=True)
def variable_return(X, y):
"""Return one or two arguments depending on which is None."""
if y is None:
return X
elif X is None:
return y
else:
return X, y
def is_multidim(df):
"""Check if the dataframe contains a multidimensional column."""
return df.columns[0] == "Multidimensional feature" and len(df.columns) <= 2
def check_dim(cls, method):
"""Raise an error if the dataset has more than two dimensions."""
if is_multidim(cls.X):
raise PermissionError(
f"The {method} method is not available for "
f"datasets with more than two dimensions!"
)
def check_goal(cls, method, goal):
"""Raise an error if the goal is invalid."""
if not goal.startswith(cls.goal):
raise PermissionError(
f"The {method} method is only available for {goal} tasks!"
)
def check_binary_task(cls, method):
"""Raise an error if the task is invalid."""
if not cls.task.startswith("bin"):
raise PermissionError(
f"The {method} method is only available for binary classification tasks!"
)
def check_predict_proba(models, method):
"""Raise an error if a model doesn't have a predict_proba method."""
for m in [m for m in models if m.name != "Vote"]:
if not hasattr(m.estimator, "predict_proba"):
raise AttributeError(
f"The {method} method is only available for "
f"models with a predict_proba method, got {m.name}."
)
def get_proba_attr(model):
"""Get the predict_proba, decision_function or predict method."""
for attr in ("predict_proba", "decision_function", "predict"):
if hasattr(model.estimator, attr):
return attr
def check_scaling(X):
"""Check if the data is scaled to mean=0 and std=1."""
mean = X.mean(numeric_only=True).mean()
std = X.std(numeric_only=True).mean()
return True if mean < 0.05 and 0.93 < std < 1.07 else False
def get_corpus(X):
"""Get text column from dataframe."""
try:
return [col for col in X if col.lower() == "corpus"][0]
except IndexError:
raise ValueError("The provided dataset does not contain a text corpus!")
def get_pl_name(name, steps, counter=1):
"""Add a counter to a pipeline name if already in steps."""
og_name = name
while name.lower() in [elem[0] for elem in steps]:
counter += 1
name = og_name + str(counter)
return name.lower()
def get_best_score(item, metric=0):
"""Returns the bootstrap or test score of a model.
Parameters
----------
item: model or pd.Series
Model instance or row from the results dataframe.
metric: int, optional (default=0)
Index of the metric to use.
"""
if getattr(item, "mean_bootstrap", None):
return lst(item.mean_bootstrap)[metric]
else:
return lst(item.metric_test)[metric]
def time_to_str(t_init):
"""Convert time integer to string.
Convert a time duration to a string of format 00h:00m:00s
or 1.000s if under 1 min.
Parameters
----------
t_init: datetime
Time to convert (in seconds).
Returns
-------
time: str
Time representation.
"""
t = datetime.now() - t_init
h = t.seconds // 3600
m = t.seconds % 3600 // 60
s = t.seconds % 3600 % 60 + t.microseconds / 1e6
if not h and not m: # Only seconds
return f"{s:.3f}s"
elif not h: # Also minutes
return f"{m}m:{s:02.0f}s"
else: # Also hours
return f"{h}h:{m:02.0f}m:{s:02.0f}s"
def to_df(data, index=None, columns=None, dtypes=None):
"""Convert a dataset to pd.Dataframe.
Parameters
----------
data: list, tuple, dict, np.array, sps.matrix, pd.DataFrame or None
Dataset to convert to a dataframe. If already a dataframe
or None, return unchanged.
index: sequence or Index
Values for the dataframe's index.
columns: sequence or None, optional (default=None)
Name of the columns. Use None for automatic naming.
dtypes: str, dict, np.dtype or None, optional (default=None)
Data types for the output columns. If None, the types are
inferred from the data.
Returns
-------
df: pd.DataFrame or None
Transformed dataframe.
"""
if data is not None and not isinstance(data, pd.DataFrame):
if not isinstance(data, dict): # Dict already has column names
if sparse.issparse(data):
data = data.toarray()
if columns is None:
columns = [f"Feature {str(i)}" for i in range(1, len(data[0]) + 1)]
data = pd.DataFrame(data, index=index, columns=columns)
if dtypes is not None:
data = data.astype(dtypes)
return data
def to_series(data, index=None, name="target", dtype=None):
"""Convert a column to pd.Series.
Parameters
----------
data: sequence or None
Data to convert. If None, return unchanged.
index: sequence or Index, optional (default=None)
Values for the indices.
name: string, optional (default="target")
Name of the target column.
dtype: str, np.dtype or None, optional (default=None)
Data type for the output series. If None, the type is
inferred from the data.
Returns
-------
series: pd.Series or None
Transformed series.
"""
if data is not None and not isinstance(data, pd.Series):
data = | pd.Series(data, index=index, name=name, dtype=dtype) | pandas.Series |
import os
import warnings
from six import BytesIO
from six.moves import cPickle
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import pandas as pd
import pandas.util.testing as tm
import pytest
from sm2 import datasets
from sm2.regression.linear_model import OLS
from sm2.tsa.arima_model import AR, ARMA, ARIMA
from sm2.tsa.arima_process import arma_generate_sample
from sm2.tools.sm_exceptions import MissingDataError
from .results import results_arma, results_arima
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
current_path = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_path, 'results', 'y_arma_data.csv')
y_arma = pd.read_csv(path, float_precision='high').values
cpi_dates = pd.PeriodIndex(start='1959q1', end='2009q3', freq='Q')
sun_dates = pd.PeriodIndex(start='1700', end='2008', freq='A')
cpi_predict_dates = pd.PeriodIndex(start='2009q3', end='2015q4', freq='Q')
sun_predict_dates = pd.PeriodIndex(start='2008', end='2033', freq='A')
@pytest.mark.not_vetted
@pytest.mark.skip(reason="fa, Arma not ported from upstream")
def test_compare_arma():
# dummies to avoid flake8 warnings until porting
fa = None
Arma = None
# import statsmodels.sandbox.tsa.fftarma as fa
# from statsmodels.tsa.arma_mle import Arma
# this is a preliminary test to compare
# arma_kf, arma_cond_ls and arma_cond_mle
# the results returned by the fit methods are incomplete
# for now without random.seed
np.random.seed(9876565)
famod = fa.ArmaFft([1, -0.5], [1., 0.4], 40)
x = famod.generate_sample(nsample=200, burnin=1000)
modkf = ARMA(x, (1, 1))
reskf = modkf.fit(trend='nc', disp=-1)
dres = reskf
modc = Arma(x)
resls = modc.fit(order=(1, 1))
rescm = modc.fit_mle(order=(1, 1), start_params=[0.4, 0.4, 1.], disp=0)
# decimal 1 corresponds to threshold of 5% difference
# still different sign corrected
assert_almost_equal(resls[0] / dres.params,
np.ones(dres.params.shape),
decimal=1)
# TODO: Is the next comment still accurate. It is retained from upstream
# where there was a commented-out assertion after the comment
# rescm also contains variance estimate as last element of params
assert_almost_equal(rescm.params[:-1] / dres.params,
np.ones(dres.params.shape),
decimal=1)
@pytest.mark.not_vetted
class CheckArmaResultsMixin(object):
"""
res2 are the results from gretl. They are in results/results_arma.
res1 are from sm2
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params,
self.res2.params,
self.decimal_params)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic,
self.res2.aic,
self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic,
self.res2.bic,
self.decimal_bic)
decimal_arroots = DECIMAL_4
def test_arroots(self):
assert_almost_equal(self.res1.arroots,
self.res2.arroots,
self.decimal_arroots)
decimal_maroots = DECIMAL_4
def test_maroots(self):
assert_almost_equal(self.res1.maroots,
self.res2.maroots,
self.decimal_maroots)
decimal_bse = DECIMAL_2
def test_bse(self):
assert_almost_equal(self.res1.bse,
self.res2.bse,
self.decimal_bse)
decimal_cov_params = DECIMAL_4
def test_covparams(self):
assert_almost_equal(self.res1.cov_params(),
self.res2.cov_params,
self.decimal_cov_params)
decimal_hqic = DECIMAL_4
def test_hqic(self):
assert_almost_equal(self.res1.hqic,
self.res2.hqic,
self.decimal_hqic)
decimal_llf = DECIMAL_4
def test_llf(self):
assert_almost_equal(self.res1.llf,
self.res2.llf,
self.decimal_llf)
decimal_resid = DECIMAL_4
def test_resid(self):
assert_almost_equal(self.res1.resid,
self.res2.resid,
self.decimal_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues,
self.res2.fittedvalues,
self.decimal_fittedvalues)
decimal_pvalues = DECIMAL_2
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues,
self.res2.pvalues,
self.decimal_pvalues)
decimal_t = DECIMAL_2 # only 2 decimal places in gretl output
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues,
self.res2.tvalues,
self.decimal_t)
decimal_sigma2 = DECIMAL_4
def test_sigma2(self):
assert_almost_equal(self.res1.sigma2,
self.res2.sigma2,
self.decimal_sigma2)
@pytest.mark.smoke
def test_summary(self):
self.res1.summary()
@pytest.mark.not_vetted
class CheckForecastMixin(object):
decimal_forecast = DECIMAL_4
def test_forecast(self):
assert_almost_equal(self.res1.forecast_res,
self.res2.forecast,
self.decimal_forecast)
decimal_forecasterr = DECIMAL_4
def test_forecasterr(self):
assert_almost_equal(self.res1.forecast_err,
self.res2.forecasterr,
self.decimal_forecasterr)
@pytest.mark.not_vetted
class CheckDynamicForecastMixin(object):
decimal_forecast_dyn = 4
def test_dynamic_forecast(self):
assert_almost_equal(self.res1.forecast_res_dyn,
self.res2.forecast_dyn,
self.decimal_forecast_dyn)
#def test_forecasterr(self):
# assert_almost_equal(self.res1.forecast_err_dyn,
# self.res2.forecasterr_dyn,
# DECIMAL_4)
@pytest.mark.not_vetted
class CheckArimaResultsMixin(CheckArmaResultsMixin):
def test_order(self):
assert self.res1.k_diff == self.res2.k_diff
assert self.res1.k_ar == self.res2.k_ar
assert self.res1.k_ma == self.res2.k_ma
decimal_predict_levels = DECIMAL_4
def test_predict_levels(self):
assert_almost_equal(self.res1.predict(typ='levels'),
self.res2.linear,
self.decimal_predict_levels)
@pytest.mark.not_vetted
class Test_Y_ARMA11_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 0]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend='nc', disp=-1)
fc_res, fc_err, ci = cls.res1.forecast(10)
cls.res1.forecast_res = fc_res
cls.res1.forecast_err = fc_err
cls.res2 = results_arma.Y_arma11()
# TODO: share with test_ar? other test classes?
def test_pickle(self):
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa:E721
# TODO: Test equality instead of just type equality?
@pytest.mark.not_vetted
class Test_Y_ARMA14_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 1]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma14()
@pytest.mark.not_vetted
@pytest.mark.slow
class Test_Y_ARMA41_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
decimal_maroots = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 2]
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma41()
@pytest.mark.not_vetted
class Test_Y_ARMA22_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 3]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma22()
@pytest.mark.not_vetted
class Test_Y_ARMA50_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 4]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50()
@pytest.mark.not_vetted
class Test_Y_ARMA02_NoConst(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 5]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma02()
@pytest.mark.not_vetted
class Test_Y_ARMA11_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
@pytest.mark.not_vetted
class Test_Y_ARMA14_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 7]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma14c()
@pytest.mark.not_vetted
class Test_Y_ARMA41_Const(CheckArmaResultsMixin, CheckForecastMixin):
decimal_cov_params = DECIMAL_3
decimal_fittedvalues = DECIMAL_3
decimal_resid = DECIMAL_3
decimal_params = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 8]
cls.res2 = results_arma.Y_arma41c()
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend="c", disp=-1,
start_params=cls.res2.params)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
@pytest.mark.not_vetted
class Test_Y_ARMA22_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 9]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma22c()
@pytest.mark.not_vetted
class Test_Y_ARMA50_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 10]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50c()
@pytest.mark.not_vetted
class Test_Y_ARMA02_Const(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 11]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma02c()
# cov_params and tvalues are off still but not as much vs. R
@pytest.mark.not_vetted
class Test_Y_ARMA11_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 0]
cls.res1 = ARMA(endog, order=(1, 1)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma11("css")
# better vs. R
@pytest.mark.not_vetted
class Test_Y_ARMA14_NoConst_CSS(CheckArmaResultsMixin):
decimal_fittedvalues = DECIMAL_3
decimal_resid = DECIMAL_3
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 1]
cls.res1 = ARMA(endog, order=(1, 4)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma14("css")
# bse, etc. better vs. R
# maroot is off because maparams is off a bit (adjust tolerance?)
@pytest.mark.not_vetted
class Test_Y_ARMA41_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_pvalues = 0
decimal_cov_params = DECIMAL_3
decimal_maroots = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 2]
cls.res1 = ARMA(endog, order=(4, 1)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma41("css")
# same notes as above
@pytest.mark.not_vetted
class Test_Y_ARMA22_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_resid = DECIMAL_3
decimal_pvalues = DECIMAL_1
decimal_fittedvalues = DECIMAL_3
@classmethod
def setup_class(cls):
endog = y_arma[:, 3]
cls.res1 = ARMA(endog, order=(2, 2)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma22("css")
# NOTE: gretl just uses least squares for AR CSS
# so BIC, etc. is
# -2*res1.llf + np.log(nobs)*(res1.q+res1.p+res1.k)
# with no adjustment for p and no extra sigma estimate
# NOTE: so our tests use x-12 arima results which agree with us and are
# consistent with the rest of the models
@pytest.mark.not_vetted
class Test_Y_ARMA50_NoConst_CSS(CheckArmaResultsMixin):
decimal_t = 0
decimal_llf = DECIMAL_1 # looks like rounding error?
@classmethod
def setup_class(cls):
endog = y_arma[:, 4]
cls.res1 = ARMA(endog, order=(5, 0)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma50("css")
@pytest.mark.not_vetted
class Test_Y_ARMA02_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 5]
cls.res1 = ARMA(endog, order=(0, 2)).fit(method="css",
trend="nc", disp=-1)
cls.res2 = results_arma.Y_arma02("css")
# NOTE: our results are close to --x-12-arima option and R
@pytest.mark.not_vetted
class Test_Y_ARMA11_Const_CSS(CheckArmaResultsMixin):
decimal_params = DECIMAL_3
decimal_cov_params = DECIMAL_3
decimal_t = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARMA(endog, order=(1, 1)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma11c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA14_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_pvalues = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 7]
cls.res1 = ARMA(endog, order=(1, 4)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma14c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA41_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_cov_params = DECIMAL_1
decimal_maroots = DECIMAL_3
decimal_bse = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 8]
cls.res1 = ARMA(endog, order=(4, 1)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma41c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA22_Const_CSS(CheckArmaResultsMixin):
decimal_t = 0
decimal_pvalues = DECIMAL_1
@classmethod
def setup_class(cls):
endog = y_arma[:, 9]
cls.res1 = ARMA(endog, order=(2, 2)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma22c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA50_Const_CSS(CheckArmaResultsMixin):
decimal_t = DECIMAL_1
decimal_params = DECIMAL_3
decimal_cov_params = DECIMAL_2
@classmethod
def setup_class(cls):
endog = y_arma[:, 10]
cls.res1 = ARMA(endog, order=(5, 0)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma50c("css")
@pytest.mark.not_vetted
class Test_Y_ARMA02_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setup_class(cls):
endog = y_arma[:, 11]
cls.res1 = ARMA(endog, order=(0, 2)).fit(trend="c",
method="css", disp=-1)
cls.res2 = results_arma.Y_arma02c("css")
@pytest.mark.not_vetted
class Test_ARIMA101(CheckArmaResultsMixin):
# just make sure this works
@classmethod
def setup_class(cls):
endog = y_arma[:, 6]
cls.res1 = ARIMA(endog, (1, 0, 1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
cls.res2.k_diff = 0
cls.res2.k_ar = 1
cls.res2.k_ma = 1
@pytest.mark.not_vetted
class Test_ARIMA111(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
decimal_llf = 3
decimal_aic = 3
decimal_bic = 3
decimal_cov_params = 2 # this used to be better?
decimal_t = 0
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 1)).fit(disp=-1)
cls.res2 = results_arima.ARIMA111()
# make sure endog names changes to D.cpi
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
# TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164 + 63,
typ='levels',
dynamic=True)
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.0000], 4)
assert_almost_equal(self.res1.mafreq, [0.0000], 4)
@pytest.mark.not_vetted
class Test_ARIMA111CSS(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
decimal_forecast = 2
decimal_forecast_dyn = 2
decimal_forecasterr = 3
decimal_arroots = 3
decimal_cov_params = 3
decimal_hqic = 3
decimal_maroots = 3
decimal_t = 1
decimal_fittedvalues = 2 # because of rounding when copying
decimal_resid = 2
decimal_predict_levels = DECIMAL_2
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 1)).fit(disp=-1, method='css')
cls.res2 = results_arima.ARIMA111(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
(fc_res, fc_err, conf_int) = cls.res1.forecast(25)
cls.res1.forecast_res = fc_res
cls.res1.forecast_err = fc_err
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164 + 63,
typ='levels',
dynamic=True)
@pytest.mark.not_vetted
class Test_ARIMA112CSS(CheckArimaResultsMixin):
decimal_llf = 3
decimal_aic = 3
decimal_bic = 3
decimal_arroots = 3
decimal_maroots = 2
decimal_t = 1
decimal_resid = 2
decimal_fittedvalues = 3
decimal_predict_levels = DECIMAL_3
@classmethod
def setup_class(cls):
cpi = datasets.macrodata.load_pandas().data['cpi'].values
cls.res1 = ARIMA(cpi, (1, 1, 2)).fit(disp=-1, method='css',
start_params=[.905322, -.692425,
1.07366, 0.172024])
cls.res2 = results_arima.ARIMA112(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
#(cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226,
# typ='levels',
# dynamic=True)
# TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
#cls.res1.forecast_res_dyn = self.predict(start=164, end=164+63,
# typ='levels', dynamic=True)
# since we got from gretl don't have linear prediction in differences
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.5000], 4)
assert_almost_equal(self.res1.mafreq, [0.5000, 0.5000], 4)
#class Test_ARIMADates(CheckArmaResults, CheckForecast, CheckDynamicForecast):
# @classmethod
# def setup_class(cls):
# cpi = datasets.macrodata.load_pandas().data['cpi'].values
# dates = pd.date_range('1959', periods=203, freq='Q')
# cls.res1 = ARIMA(cpi, dates=dates, freq='Q').fit(order=(1, 1, 1),
# disp=-1)
# cls.res2 = results_arima.ARIMA111()
# # make sure endog names changes to D.cpi
# cls.decimal_llf = 3
# cls.decimal_aic = 3
# cls.decimal_bic = 3
# (cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
@pytest.mark.not_vetted
@pytest.mark.slow
def test_start_params_bug():
data = np.array([
1368., 1187, 1090, 1439, 2362, 2783, 2869, 2512, 1804,
1544, 1028, 869, 1737, 2055, 1947, 1618, 1196, 867, 997, 1862, 2525,
3250, 4023, 4018, 3585, 3004, 2500, 2441, 2749, 2466, 2157, 1847,
1463, 1146, 851, 993, 1448, 1719, 1709, 1455, 1950, 1763, 2075, 2343,
3570, 4690, 3700, 2339, 1679, 1466, 998, 853, 835, 922, 851, 1125,
1299, 1105, 860, 701, 689, 774, 582, 419, 846, 1132, 902, 1058, 1341,
1551, 1167, 975, 786, 759, 751, 649, 876, 720, 498, 553, 459, 543,
447, 415, 377, 373, 324, 320, 306, 259, 220, 342, 558, 825, 994,
1267, 1473, 1601, 1896, 1890, 2012, 2198, 2393, 2825, 3411, 3406,
2464, 2891, 3685, 3638, 3746, 3373, 3190, 2681, 2846, 4129, 5054,
5002, 4801, 4934, 4903, 4713, 4745, 4736, 4622, 4642, 4478, 4510,
4758, 4457, 4356, 4170, 4658, 4546, 4402, 4183, 3574, 2586, 3326,
3948, 3983, 3997, 4422, 4496, 4276, 3467, 2753, 2582, 2921, 2768,
2789, 2824, 2482, 2773, 3005, 3641, 3699, 3774, 3698, 3628, 3180,
3306, 2841, 2014, 1910, 2560, 2980, 3012, 3210, 3457, 3158, 3344,
3609, 3327, 2913, 2264, 2326, 2596, 2225, 1767, 1190, 792, 669,
589, 496, 354, 246, 250, 323, 495, 924, 1536, 2081, 2660, 2814, 2992,
3115, 2962, 2272, 2151, 1889, 1481, 955, 631, 288, 103, 60, 82, 107,
185, 618, 1526, 2046, 2348, 2584, 2600, 2515, 2345, 2351, 2355,
2409, 2449, 2645, 2918, 3187, 2888, 2610, 2740, 2526, 2383, 2936,
2968, 2635, 2617, 2790, 3906, 4018, 4797, 4919, 4942, 4656, 4444,
3898, 3908, 3678, 3605, 3186, 2139, 2002, 1559, 1235, 1183, 1096,
673, 389, 223, 352, 308, 365, 525, 779, 894, 901, 1025, 1047, 981,
902, 759, 569, 519, 408, 263, 156, 72, 49, 31, 41, 192, 423, 492,
552, 564, 723, 921, 1525, 2768, 3531, 3824, 3835, 4294, 4533, 4173,
4221, 4064, 4641, 4685, 4026, 4323, 4585, 4836, 4822, 4631, 4614,
4326, 4790, 4736, 4104, 5099, 5154, 5121, 5384, 5274, 5225, 4899,
5382, 5295, 5349, 4977, 4597, 4069, 3733, 3439, 3052, 2626, 1939,
1064, 713, 916, 832, 658, 817, 921, 772, 764, 824, 967, 1127, 1153,
824, 912, 957, 990, 1218, 1684, 2030, 2119, 2233, 2657, 2652, 2682,
2498, 2429, 2346, 2298, 2129, 1829, 1816, 1225, 1010, 748, 627, 469,
576, 532, 475, 582, 641, 605, 699, 680, 714, 670, 666, 636, 672,
679, 446, 248, 134, 160, 178, 286, 413, 676, 1025, 1159, 952, 1398,
1833, 2045, 2072, 1798, 1799, 1358, 727, 353, 347, 844, 1377, 1829,
2118, 2272, 2745, 4263, 4314, 4530, 4354, 4645, 4547, 5391, 4855,
4739, 4520, 4573, 4305, 4196, 3773, 3368, 2596, 2596, 2305, 2756,
3747, 4078, 3415, 2369, 2210, 2316, 2263, 2672, 3571, 4131, 4167,
4077, 3924, 3738, 3712, 3510, 3182, 3179, 2951, 2453, 2078, 1999,
2486, 2581, 1891, 1997, 1366, 1294, 1536, 2794, 3211, 3242, 3406,
3121, 2425, 2016, 1787, 1508, 1304, 1060, 1342, 1589, 2361, 3452,
2659, 2857, 3255, 3322, 2852, 2964, 3132, 3033, 2931, 2636, 2818, 3310,
3396, 3179, 3232, 3543, 3759, 3503, 3758, 3658, 3425, 3053, 2620, 1837,
923, 712, 1054, 1376, 1556, 1498, 1523, 1088, 728, 890, 1413, 2524,
3295, 4097, 3993, 4116, 3874, 4074, 4142, 3975, 3908, 3907, 3918, 3755,
3648, 3778, 4293, 4385, 4360, 4352, 4528, 4365, 3846, 4098, 3860, 3230,
2820, 2916, 3201, 3721, 3397, 3055, 2141, 1623, 1825, 1716, 2232, 2939,
3735, 4838, 4560, 4307, 4975, 5173, 4859, 5268, 4992, 5100, 5070, 5270,
4760, 5135, 5059, 4682, 4492, 4933, 4737, 4611, 4634, 4789, 4811, 4379,
4689, 4284, 4191, 3313, 2770, 2543, 3105, 2967, 2420, 1996, 2247, 2564,
2726, 3021, 3427, 3509, 3759, 3324, 2988, 2849, 2340, 2443, 2364, 1252,
623, 742, 867, 684, 488, 348, 241, 187, 279, 355, 423, 678, 1375, 1497,
1434, 2116, 2411, 1929, 1628, 1635, 1609, 1757, 2090, 2085, 1790, 1846,
2038, 2360, 2342, 2401, 2920, 3030, 3132, 4385, 5483, 5865, 5595, 5485,
5727, 5553, 5560, 5233, 5478, 5159, 5155, 5312, 5079, 4510, 4628, 4535,
3656, 3698, 3443, 3146, 2562, 2304, 2181, 2293, 1950, 1930, 2197, 2796,
3441, 3649, 3815, 2850, 4005, 5305, 5550, 5641, 4717, 5131, 2831, 3518,
3354, 3115, 3515, 3552, 3244, 3658, 4407, 4935, 4299, 3166, 3335, 2728,
2488, 2573, 2002, 1717, 1645, 1977, 2049, 2125, 2376, 2551, 2578, 2629,
2750, 3150, 3699, 4062, 3959, 3264, 2671, 2205, 2128, 2133, 2095, 1964,
2006, 2074, 2201, 2506, 2449, 2465, 2064, 1446, 1382, 983, 898, 489,
319, 383, 332, 276, 224, 144, 101, 232, 429, 597, 750, 908, 960, 1076,
951, 1062, 1183, 1404, 1391, 1419, 1497, 1267, 963, 682, 777, 906,
1149, 1439, 1600, 1876, 1885, 1962, 2280, 2711, 2591, 2411])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ARMA(data, order=(4, 1)).fit(start_ar_lags=5, disp=-1)
@pytest.mark.not_vetted
def test_arima_predict_mle_dates():
cpi = datasets.macrodata.load_pandas().data['cpi'].values
res1 = ARIMA(cpi, (4, 1, 1), dates=cpi_dates, freq='Q').fit(disp=-1)
path = os.path.join(current_path, 'results',
'results_arima_forecasts_all_mle.csv')
arima_forecasts = pd.read_csv(path).values
fc = arima_forecasts[:, 0]
fcdyn = arima_forecasts[:, 1]
fcdyn2 = arima_forecasts[:, 2]
start, end = 2, 51
fv = res1.predict('1959Q3', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates,
cpi_dates[start:end + 1])
start, end = 202, 227
fv = res1.predict('2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates,
cpi_predict_dates)
# make sure dynamic works
start, end = '1960q2', '1971q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:51 + 1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227 + 1], DECIMAL_4)
@pytest.mark.not_vetted
def test_arma_predict_mle_dates():
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
mod = ARMA(sunspots, (9, 0), dates=sun_dates, freq='A')
mod.method = 'mle'
with pytest.raises(ValueError):
mod._get_prediction_index('1701', '1751', True)
start, end = 2, 51
mod._get_prediction_index('1702', '1751', False)
tm.assert_index_equal(mod.data.predict_dates, sun_dates[start:end + 1])
start, end = 308, 333
mod._get_prediction_index('2008', '2033', False)
tm.assert_index_equal(mod.data.predict_dates, sun_predict_dates)
@pytest.mark.not_vetted
def test_arima_predict_css_dates():
cpi = datasets.macrodata.load_pandas().data['cpi'].values
res1 = ARIMA(cpi, (4, 1, 1), dates=cpi_dates, freq='Q').fit(disp=-1,
method='css',
trend='nc')
params = np.array([1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
path = os.path.join(current_path, 'results',
'results_arima_forecasts_all_css.csv')
arima_forecasts = pd.read_csv(path).values
fc = arima_forecasts[:, 0]
fcdyn = arima_forecasts[:, 1]
fcdyn2 = arima_forecasts[:, 2]
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates, cpi_dates[start:end + 1])
start, end = 202, 227
fv = res1.model.predict(params, '2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end + 1], DECIMAL_4)
tm.assert_index_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels',
dynamic=True)
assert_almost_equal(fv, fcdyn[start:end + 1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227 + 1], DECIMAL_4)
@pytest.mark.not_vetted
def test_arma_predict_css_dates():
# TODO: GH reference?
sunspots = datasets.sunspots.load_pandas().data['SUNACTIVITY'].values
mod = ARMA(sunspots, (9, 0), dates=sun_dates, freq='A')
mod.method = 'css'
with pytest.raises(ValueError):
mod._get_prediction_index('1701', '1751', False)
def test_arima_wrapper():
# test that names get attached to res.params correctly
# TODO: GH reference?
cpi = datasets.macrodata.load_pandas().data['cpi']
cpi.index = pd.Index(cpi_dates)
res = ARIMA(cpi, (4, 1, 1), freq='Q').fit(disp=-1)
expected_index = pd.Index(['const', 'ar.L1.D.cpi', 'ar.L2.D.cpi',
'ar.L3.D.cpi', 'ar.L4.D.cpi',
'ma.L1.D.cpi'])
assert expected_index.equals(res.params.index)
| tm.assert_index_equal(res.params.index, expected_index) | pandas.util.testing.assert_index_equal |
""" test indexing with ix """
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.types.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.core.common import PerformanceWarning
class TestIX(tm.TestCase):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = | DataFrame({'a': [0, 1, 2]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Untitled0.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1uPsIhY5eetnUG-xeLtHmKvq5K0mIr6wW
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
dataset = | pd.read_csv('Churn_Modelling.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Dynamic Allocation
#
# We really don't save money this way (defined amounts every month). In reality we would be better to set up a budget and allocate any leftover money to savings that is above and beyond our spending habits.
# %%
import pandas as pd
import plotly.express as px
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
]
)
app.title = "Savings"
# %% {"code_folding": [0]}
acct_info = html.Div(
[
html.Div([
html.H4(
"Savings"
),
html.P(
"Contribution Weight",
className="control_label"
),
dcc.Input(
id='hi-contrib',
type="number",
value=1,
placeholder="contribution weight",
className="dcc_control"
),
html.P(
"Expected Return",
className="control_label"
),
dcc.Input(
id="hi-interest",
type="number",
value=2,
placeholder="interest (%)",
className="dcc_control"
),
html.P(
"Starting Value",
className="control_label"
),
dcc.Input(
id='hi-starting',
type="number",
placeholder="current balance",
className="dcc_control"
),
html.P(
"End Value",
className="control_label"
),
dcc.Input(
id='hi-limit',
type="number",
placeholder="hold the value here",
className="dcc_control"
),
html.H4(
"Unregistered",
style={"padding-top":"15px"}
),
html.P(
"Expected Return"
),
dcc.Input(
id="unreg-interest",
type="number",
value=3,
placeholder="interest (%)",
className="dcc_control"
),
], className="one-third column"),
html.Div([
html.H4(
"TFSA"
),
html.P(
"Contribution Weight",
className="control_label"
),
dcc.Input(
id='tfsa-contrib-weight',
type="number",
placeholder="contribution weight",
className="dcc_control"
),
html.P(
"Expected Return",
className="control_label"
),
dcc.Input(
id="tfsa-interest",
type="number",
placeholder="interest (%)",
className="dcc_control"
),
html.P(
"Starting Value",
className="control_label"
),
dcc.Input(
id='tfsa-starting',
type="number",
placeholder="current balance",
className="dcc_control"
),
html.P(
"End Value",
className="control_label"
),
dcc.Input(
id="tfsa-limit",
type="number",
placeholder="hold the value here",
className="dcc_control"
),
html.P(
"Contribution Room",
className="control_label"
),
dcc.Input(
id="tfsa-contrib-room",
type="number",
placeholder="current room",
className="dcc_control"
),
html.P(
"Contribution Reset",
className="control_label"
),
dcc.Input(
id="tfsa-contrib-reset",
type="number",
value=5000,
placeholder="added room each year",
className="dcc_control"
),
], className="one-third column"),
html.Div(children=[
html.H4(
"RRSP"
),
html.P(
"Contribution Weight",
className="control_label"
),
dcc.Input(
id='rrsp-contrib',
type="number",
placeholder="contribution weight",
className="dcc_control"
),
html.P(
"Expected Return",
className="control_label"
),
dcc.Input(
id="rrsp-interest",
type="number",
placeholder="interest (%)",
className="dcc_control"
),
html.P(
"Starting Value",
className="control_label"
),
dcc.Input(
id='rrsp-starting',
type="number",
placeholder="current balance",
className="dcc_control"
),
html.P(
"End Value",
className="control_label"
),
dcc.Input(
id="rrsp-limit",
type="number",
value=35000,
placeholder="hold the value here",
className="dcc_control"
),
html.P(
"Contribution Room",
className="control_label"
),
dcc.Input(
id="rrsp-contrib-room",
type="number",
value=26500,
placeholder="current room",
className="dcc_control"
),
html.P(
"Contribution Reset",
className="control_label"
),
dcc.Input(
id="rrsp-contrib-reset",
type="number",
value=26500,
placeholder="added room each year",
className="dcc_control"
),
], className="one-third column"),
],
style={"padding-top":"20px"}
)
# %% {"code_folding": [0]}
main_view = dcc.Tabs(
[
dcc.Tab(label="Accounts", children=[
acct_info
]),
dcc.Tab(label="Savings", value="savings", children=[
html.Div(
[
dcc.Graph(
id='savings_graph',
figure=px.area(pd.DataFrame(), width=600)
)
],
id="countGraphContainer",
style={"minHeight":"600px"}
)
])
],
id="input-tabs",
)
# %% {"code_folding": [0]}
summary_stats = [
html.Div(
[
html.P("Total Saved"),
html.H6(
id="total-saved",
className="info_text"
)
],
className="mini_container",
style={"flex":"4"}
),
html.Div(
[
html.P("Avg Saved"),
html.H6(
id="avg-saved",
className="info_text"
)
],
className="mini_container",
style={"flex":"4"}
),
html.Div(
[
html.P("Capital Gains Tax"),
html.H6(
id="total-tax",
className="info_text"
)
],
className="mini_container",
style={"flex":"4"}
),
html.Div(
[
html.P("Rent"),
html.H6(
id="total-rent",
className="info_text"
)
],
className="mini_container",
style={"flex":"4"}
),
]
# %% {"code_folding": [24, 98, 103]}
# Create app layout
app.layout = html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Img(
src="assets/stonks.png",
height=75,
className="flex-display",
style={"marginRight":"5px"}
),
html.H2(
'When can I buy that house?',
className="flex-display",
),
],className="flex-display"),
html.H4(
'Investment Calculator',
)
],
className='eight columns'
),
],
id="header",
className='row',
),
html.Div(
[
html.Div(
[
html.H3("A little about you:"),
html.P(
"How much is a paycheck?",
className="control_label"
),
dcc.Input(
id='biweekly_income',
type="number",
placeholder="biweekly income",
className="dcc_control"
),
html.P(
'How much will that grow each year(%)?',
className="control_label"
),
dcc.Input(
id='expected_raise_pct',
type="number",
placeholder="percentage growth (%)",
className="dcc_control"
),
html.P(
'Is there a max(%)?',
className="control_label"
),
dcc.Input(
id='salary_cap_pct',
type="number",
placeholder="salary cap (%)",
className="dcc_control"
),
html.P(
'How much do you spend per paycheck?',
className="control_label"
),
dcc.Input(
id='biweekly_spend',
type="number",
placeholder="biweekly spend ($)",
className="dcc_control"
),
html.P(
"What's rent every month?",
className="control_label"
),
dcc.Input(
id='monthly_rent',
type="number",
placeholder="monthly rent ($)",
className="dcc_control"
),
html.P(
"And how many years should be projected?",
className="control_label"
),
dcc.Input(
id='projected_years',
type="number",
placeholder="years",
className="dcc_control"
),
html.H6(
"We are going to invest all of your unspent money each month. Where it ends up depends on some limits and weights you define under 'Accounts'."
),
],
className="pretty_container four columns"
),
html.Div(
[
html.Div(
summary_stats,
id="info-container",
className="row container-display"
),
html.Div(className="pretty_container", children=
[
main_view
]
)
],
id="right-column",
className="eight columns"
)
],
className="row flex-display",
)],
id="mainContainer"
)
# %% [markdown]
# ## Callbacks
#
# We made the wireframe, but now here's the hard part.
#
# We want to update the graph whenever:
#
# - Value on the left is changed (personal info)
# - Clicked onto the "savings" tab
#
# We also want to make it easier(/obvious) to switch to the "Savings" tab to view the results. MAYBE IT SHOULD BE CALLED RESULTS?
# %%
from dash.dependencies import Output, Input, State
# %%
personal_info_inputs = [
Input("biweekly_income","value"),
Input("expected_raise_pct","value"),
Input("salary_cap_pct","value"),
Input("biweekly_spend","value"),
Input("monthly_rent","value"),
Input("projected_years","value")
]
# %%
acct_info_ids = [child.id for kid in acct_info.children for child in kid.children if type(child)==dcc.Input]
account_info_inputs = [ State(component_id,"value") for component_id in acct_info_ids]
# %% {"code_folding": [0, 17, 26, 29, 37, 67]}
@app.callback(
[
Output(component_id="savings_graph", component_property="figure"),
Output(component_id="total-saved", component_property="children"),
Output(component_id="avg-saved", component_property="children"),
Output(component_id="total-tax", component_property="children"),
Output(component_id="total-rent", component_property="children"),
],
[
*personal_info_inputs,
Input(component_id="input-tabs", component_property="value")
],
[
*account_info_inputs,
State(component_id="savings_graph", component_property="figure")
]
)
def calculate_cashflows(
biweekly_income,expected_raise_pct,salary_cap_pct,biweekly_spend,monthly_rent,projected_years,
tab,
hi_contrib,hi_interest,hi_starting,hi_limit, unreg_interest,
tfsa_contrib,tfsa_interest,tfsa_starting,tfsa_limit,tfsa_contrib_room,tfsa_contrib_reset,
rrsp_contrib,rrsp_interest,rrsp_starting,rrsp_limit,rrsp_contrib_room,rrsp_contrib_reset,
figure
):
#if tab != "savings":
# return [figure, "$—", "$—", "$—", "$—"]
me = {
'biweekly_income': (biweekly_income or 0),
'expected_raise_pct': (expected_raise_pct or 0) * 0.01,
'salary_cap_pct': (salary_cap_pct or 0) * 0.01,
'biweekly_spend': (biweekly_spend or 0),
'monthly_rent': (monthly_rent or 0)
}
accounts_definition = [
{
'name':'tfsa',
'registered':True,
'rate': (tfsa_interest or 0)*0.01,
'starting_balance': (tfsa_starting or 0),
'biweekly_contribution': (tfsa_contrib or 0),
'contribution_room': (tfsa_contrib_room or 0),
'max_value': tfsa_limit or pd.np.inf,
'yearly_contrib': tfsa_contrib_reset or 0 # contrib room added each year
},
{
'name':'rrsp',
'registered':True,
'rate': (rrsp_interest or 0)*0.01,
'starting_balance': rrsp_starting or 0,
'biweekly_contribution': rrsp_contrib or 0,
'max_value': rrsp_limit or pd.np.inf, #for first-time home buy
'contribution_room': rrsp_contrib_room or 0,
'yearly_contrib':rrsp_contrib_reset or 0
},
{
'name':'high_interest_savings',
'rate': (hi_interest or 0)*0.01,
'starting_balance': hi_starting or 0,
'max_value': hi_limit or pd.np.inf, #hold at this value
'biweekly_contribution':hi_contrib or 0
}
]
unregistered = {
'name':'unregistered',
'rate': (unreg_interest or 0)*0.01,
'starting_balance': 0,
}
BW = projected_years * 26 if projected_years else 5 * 26 # years in 2-week chunks
## Income
#Pure salary income, with option for it to grow annualy
income = pd.np.ones(BW)*me.get("biweekly_income", 0)
for year in range(BW//26):
if me.get("salary_cap_pct") and (1 + me.get("salary_cap_pct", 0)) * me.get("biweekly_income", 0) < income[year*26]:
break
else:
income[year*26:]*= 1 + me.get("expected_raise_pct",0)
unregistered_balance = pd.np.ones(BW)*unregistered.get('starting_balance',0)
spending = pd.np.ones(BW) * me.get("biweekly_spend",0)
rent = pd.np.ones(BW) * me.get("monthly_rent",0) * 12/26 # biweekly rent?
income -= spending + rent # this is our takehome
invest = sum([acct.get("biweekly_contribution") for acct in accounts_definition])# amount invested biweekly
for account in accounts_definition:
amount = account.get("biweekly_contribution",0)/invest if invest else 0
account['ratio'] = amount
### Rules for distribution
accts = []
taxes = 0
for account in accounts_definition:
balance = pd.np.ones(BW)*account.get('starting_balance',0)
contribution = pd.np.zeros(BW)
interest = | pd.np.zeros(BW) | pandas.np.zeros |
import os
import pandas as pd
FOLDER = 'data'
FILENAME = 'gl.csv'
COLUMNS = ['GL_Account', 'GL_Description', 'Amount']
class GeneralLedger():
def __init__(self, folder=FOLDER, filename=FILENAME, columns=COLUMNS):
base_folder = os.path.abspath(os.path.dirname(__file__))
self.columns = columns
self.filename = filename
self.full_path = os.path.join(base_folder, folder)
self.file_path = os.path.join(self.full_path, self.filename)
if not os.path.exists(self.full_path) or not os.path.isdir(self.full_path):
os.mkdir(self.full_path)
if os.path.isfile(self.file_path):
try:
self.df = pd.read_csv(os.path.join(self.full_path, self.filename), index_col=0)
except Exception as e:
print('WARNING! Found gl file but could not load. {}. Creating empty gl.'.format(e))
self.df = pd.DataFrame()
else:
self.df = pd.DataFrame()
def __repr__(self):
return str(self.df)
def save(self):
self.df.to_csv(self.file_path)
def record(self, new_row):
self.df = self.df.append( | pd.DataFrame([new_row], columns=self.columns) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
Age,
EmailAddressToDomain,
IsFreeEmailDomain,
TimeSince,
URLToDomain,
URLToProtocol,
URLToTLD,
Week,
get_transform_primitives
)
def test_time_since():
time_since = TimeSince()
# class datetime.datetime(year, month, day[, hour[, minute[, second[, microsecond[,
times = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2019, 3, 1, 0, 0, 1, 0),
datetime(2019, 3, 1, 0, 2, 0, 0)])
cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, -120])
time_since = TimeSince(unit='nanoseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(round, values)) == [-1000, -1000000000, -120000000000])
time_since = TimeSince(unit='milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Years')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, 0, 0])
times_y = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2020, 3, 1, 0, 0, 1, 0),
datetime(2017, 3, 1, 0, 0, 0, 0)])
time_since = TimeSince(unit='Years')
values = time_since(array=times_y, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, 1])
error_text = 'Invalid unit given, make sure it is plural'
with pytest.raises(ValueError, match=error_text):
time_since = TimeSince(unit='na')
time_since(array=times, time=cutoff_time)
def test_age():
age = Age()
dates = pd.Series(datetime(2010, 2, 26))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.005] # .005 added due to leap years
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_two_years_quarterly():
age = Age()
dates = pd.Series(pd.date_range('2010-01-01', '2011-12-31', freq='Q'))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [9.915, 9.666, 9.414, 9.162, 8.915, 8.666, 8.414, 8.162]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_leap_year():
age = Age()
dates = pd.Series([datetime(2016, 1, 1)])
ages = age(dates, time=datetime(2016, 3, 1))
correct_ages = [(31 + 29) / 365.0]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
# born leap year date
dates = pd.Series([datetime(2016, 2, 29)])
ages = age(dates, time=datetime(2020, 2, 29))
correct_ages = [4.0027] # .0027 added due to leap year
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_nan():
age = Age()
dates = pd.Series([datetime(2010, 1, 1), np.nan, datetime(2012, 1, 1)])
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.159, np.nan, 8.159]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_week_no_deprecation_message():
dates = [datetime(2019, 1, 3),
datetime(2019, 6, 17, 11, 10, 50),
datetime(2019, 11, 30, 19, 45, 15)
]
with pytest.warns(None) as record:
week = Week()
week(dates).tolist()
assert not record
def test_url_to_domain_urls():
url_to_domain = URLToDomain()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org'])
correct_urls = ['play.google.com',
'mplay.google.co.in',
'lplay.google.co.in',
'play.google.co.in',
'tplay.google.co.in',
'google.co.in',
'google.co.in',
'google.com',
'compzets.com',
'compzets.com',
'facebook.com',
'compzets.net',
'featuretools.org']
np.testing.assert_array_equal(url_to_domain(urls), correct_urls)
def test_url_to_domain_long_url():
url_to_domain = URLToDomain()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['chart.apis.google.com']
results = url_to_domain(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_domain_nan():
url_to_domain = URLToDomain()
urls = pd.Series(['www.featuretools.com', np.nan], dtype='object')
correct_urls = pd.Series(['featuretools.com', np.nan], dtype='object')
results = url_to_domain(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_urls():
url_to_protocol = URLToProtocol()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'https://featuretools.com'])
correct_urls = pd.Series(['https',
'http',
'http',
np.nan,
'http',
'https',
np.nan,
np.nan,
'https',
'http',
'https'])
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_long_url():
url_to_protocol = URLToProtocol()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['http']
results = url_to_protocol(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_protocol_nan():
url_to_protocol = URLToProtocol()
urls = pd.Series(['www.featuretools.com', np.nan, ''], dtype='object')
correct_urls = pd.Series([np.nan, np.nan, np.nan], dtype='object')
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_tld_urls():
url_to_tld = URLToTLD()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.dev?asd=10',
'www.compzets.com?asd=10',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'featuretools.org'])
correct_urls = ['com',
'in',
'in',
'in',
'in',
'in',
'in',
'com',
'dev',
'com',
'net',
'org',
'org']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_long_url():
url_to_tld = URLToTLD()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['com']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_nan():
url_to_tld = URLToTLD()
urls = pd.Series(['www.featuretools.com', np.nan, 'featuretools', ''], dtype='object')
correct_urls = pd.Series(['com', np.nan, np.nan, np.nan], dtype='object')
results = url_to_tld(urls)
pd.testing.assert_series_equal(results, correct_urls, check_names=False)
def test_is_free_email_domain_valid_addresses():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(['<EMAIL>', '<EMAIL>', '<EMAIL>', 'free<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
| pd.testing.assert_series_equal(answers, correct_answers) | pandas.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Jan 5 09:20:37 2022
Compiles NDFD data into SQLite DB
@author: buriona,tclarkin
"""
import sys
from pathlib import Path
import pandas as pd
import sqlalchemy as sql
import sqlite3
import zipfile
from zipfile import ZipFile
# Load directories and defaults
this_dir = Path(__file__).absolute().resolve().parent
#this_dir = Path('C:/Programs/shread_dash/database/SHREAD')
ZIP_IT = False
ZIP_FRMT = zipfile.ZIP_LZMA
DEFAULT_DATE_FIELD = 'Date_Valid'
DEFAULT_CSV_DIR = Path(this_dir, 'data')
DEFAULT_DB_DIR = this_dir
COL_TYPES = {
'Date_Valid':str,'Date_Init':str,'Type':str,'Source':str,'OBJECTID':int,
'Join_Count':int,'TARGET_FID':int,'pointid':int,"grid_code":int,
'elev_ft': int, 'slope_d': int,'aspct': int, 'nlcd': int,
'LOCAL_ID': str,"POLY_SOURC":str,"TOTAL_ID":str,"TOTAL_NAME":str,
'LOCAL_NAME':str,'min':float,'max':float,'mean':float,'median':float
}
# Define functions
def get_dfs(data_dir=DEFAULT_CSV_DIR, verbose=False):
"""
Get and merge dataframes imported using shread.py
"""
mint_df_list = []
maxt_df_list = []
rhm_df_list = []
pop12_df_list = []
qpf_df_list = []
snow_df_list = []
sky_df_list = []
print('Preparing .csv files for database creation...')
for data_file in data_dir.glob('ndfd*.csv'):
if verbose:
print(f'Adding {data_file.name} to dataframe...')
df = pd.read_csv(
data_file,
usecols=COL_TYPES.keys(),
parse_dates=[DEFAULT_DATE_FIELD],
dtype=COL_TYPES
)
if not df.empty:
df = df.drop(axis=1,columns=["Source","Join_Count","TARGET_FID","pointid","grid_code","POLY_SOURC","TOTAL_ID","TOTAL_NAME","min","max","median"])
df = df.rename(columns={"Date_Valid":"Date"})
mint_df_list.append(
df[df['Type'] == 'mint'].drop(columns='Type').copy()
)
maxt_df_list.append(
df[df['Type'] == 'maxt'].drop(columns='Type').copy()
)
rhm_df_list.append(
df[df['Type'] == 'rhm'].drop(columns='Type').copy()
)
pop12_df_list.append(
df[df['Type'] == 'pop12'].drop(columns='Type').copy()
)
qpf_df_list.append(
df[df['Type'] == 'qpf'].drop(columns='Type').copy()
)
snow_df_list.append(
df[df['Type'] == 'snow'].drop(columns='Type').copy()
)
sky_df_list.append(
df[df['Type'] == 'sky'].drop(columns='Type').copy()
)
df_mint = pd.concat(mint_df_list)
df_mint.name = 'mint'
df_maxt = pd.concat(maxt_df_list)
df_maxt.name = 'maxt'
df_rhm = pd.concat(rhm_df_list)
df_rhm.name = 'rhm'
df_pop12 = pd.concat(pop12_df_list)
df_pop12.name = 'pop12'
df_qpf = pd.concat(qpf_df_list)
df_qpf.name = 'qpf'
df_snow = pd.concat(snow_df_list)
df_snow.name = 'snow'
df_sky = pd.concat(sky_df_list)
df_sky.name = 'sky'
print(' Success!!!\n')
return {'mint':df_mint,'maxt':df_maxt,'rhm':df_rhm,'pop12':df_pop12,'qpf':df_qpf,'snow':df_snow,'sky':df_sky}
def get_unique_dates(tbl_name, db_path, date_field=DEFAULT_DATE_FIELD):
"""
Get unique dates from shread data, to ensure no duplicates
"""
if not db_path.is_file():
return pd.DataFrame(columns=[DEFAULT_DATE_FIELD])
db_con_str = f'sqlite:///{db_path.as_posix()}'
eng = sql.create_engine(db_con_str)
with eng.connect() as con:
try:
unique_dates = pd.read_sql(
f'select distinct {date_field} from {tbl_name}',
con
).dropna()
except Exception:
return | pd.DataFrame(columns=[DEFAULT_DATE_FIELD]) | pandas.DataFrame |
from __future__ import division
import torch
import numpy as np
import os
import math
import argparse
import logging
from collections import OrderedDict
import pandas as pd
import json
'''
Histogram of simalarities:
a) positive
b) Top-k percent
'''
def histogram(sim, top_k_percents, writer, i_epoch, name):
K = np.array([int(sim.size(0) * top_k_percent) for top_k_percent in top_k_percents])
max_K = K.max()
y, yi = torch.topk(sim, int(max_K), largest=True, sorted=True)
try:
for i, top_k_percent in enumerate(top_k_percents):
writer.add_histogram(
'{}/top_{}'.format(name, int(top_k_percent*100)),
y[:int(K[i])],
i_epoch,
)
except:
print('histogram wrong')
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_data = next(self.loader)
except StopIteration:
self.next_data = None
return
with torch.cuda.stream(self.stream):
for i in range(2):
self.next_data[i] = self.next_data[i].cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
data = self.next_data
self.preload()
return data
def batch_addscalar(writer, allloss, lossname, i):
for loss, lossname in zip(allloss, lossname):
writer.add_scalar(lossname, loss, i)
def batch_logging(allloss, lossname, i):
for loss, lossname in zip(allloss, lossname):
logging.info('[Epoch: {}] {}: {:.4f}'.format(i, lossname, loss))
def colorful(text):
return '\033[1;33m {} \033[0m'.format(text)
def exclude_bn_weight_bias_from_weight_decay(model, weight_decay):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# if len(param.shape) == 1 or name in skip_list:
if 'bn' in name:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}
]
class GroupAvgMeter(object):
def __init__(self, n, name=''):
self.n = n
self.group_names = [name + '_' + str(i) for i in range(n)]
self.avg_meters = {}
for i in range(len(self.group_names)):
self.avg_meters.update({self.group_names[i]: AvgMeter()})
def add(self, values):
for value, group_name in zip(values, self.group_names):
self.avg_meters[group_name].add(value)
def get(self, group_name):
return self.avg_meters[group_name].get()
def get_all(self):
return [self.avg_meters[group_name].get() for group_name in self.group_names]
def s(self):
return ','.join(['{:.4f}'.format(value) for value in self.get_all()])
class AvgMeter(object):
def __init__(self):
self.clear()
def add(self, value):
self.value += value
self.n += 1
def get(self):
if self.n == 0:
return 0
return self.value/self.n
def clear(self):
self.n = 0
self.value = 0.
class AccuracyMeter(object):
def __init__(self):
self.clear()
def add(self, correct, total):
self.correct += correct
self.total += total
def get(self):
return self.correct/self.total
def clear(self):
self.correct = 0.
self.total = 0.
def getLogger(path):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
fh = logging.FileHandler(os.path.join(path, 'logs', 'log.txt'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
return logger
def beautify(dic):
if type(dic) == argparse.Namespace:
dic = vars(dic)
return json.dumps(dic, indent=4, sort_keys=True)
def get_expidentifier(keys, args):
args = vars(args)
all_pairs = []
for key in keys:
all_pairs.append(key)
all_pairs.append(args[key])
print(all_pairs)
ret = ('[{}={}]'*len(keys)).format(*all_pairs)
return ret
def save_result(args):
res_path = args.res_path
args = sorted(vars(args).items(), key=lambda obj: obj[0])
a = OrderedDict()
for key, value in args:
a[key] = [value,]
df = | pd.DataFrame(a) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import time as tm
import os, cx_Oracle
from datetime import *
import numpy as np
import pandas as pd
pt = os.getcwd() + "\\book1.csv"
df = pd.read_csv(pt)
df = df.astype (str)
df = df.rename (columns=str.upper)
df1 = df[['SERIAL','SUMMARY','CUSTOMATTR15','CUSTOMATTR11','LASTOCCURRENCE']]
df1 = df1.assign(DHM ='0')
df1['DHM'] = df.apply(lambda x: | pd.to_datetime(x['LASTOCCURRENCE'], dayfirst=True) | pandas.to_datetime |
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column_name = "Modification Type(s)"
observed_mz = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def calculate_proportion(self, occupancy=True):
df = self.df.copy()
#print(df)
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = [# "Isoform",
"Peptides", "Position"]
else:
gr = [# "Isoform",
"Position"]
for _, g in df.groupby(gr):
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
if df is None:
df = self.df
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index([# "Isoform",
"Position", "Glycans"])
else:
temp = df.set_index([# "Isoform",
"Peptides", "Glycans", "Position"])
temp.rename(columns={"Value": name}, inplace=True)
return temp
class GlypnirOComponent:
def __init__(self, filename, area_filename, replicate_id, condition_id, protein_name, minimum_score=0, trust_byonic=False, legacy=False):
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
data = pd.read_excel(filename, sheet_name="Spectra")
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = pd.read_csv(area_filename, sep="\t")
data["Scan number"] = pd.to_numeric(data["Scan #"].str.extract("scan=(\d+)", expand=False))
data = pd.merge(data, file_with_area, left_on="Scan number", right_on="First Scan")
self.protein_name = protein_name
self.data = data.sort_values(by=['Area'], ascending=False)
self.replicate_id = replicate_id
self.condition_id = condition_id
self.data = data[data["Area"].notnull()]
self.data = self.data[(self.data["Score"] >= minimum_score) &
(self.data[protein_column_name].str.contains(protein_name))
# (data["Protein Name"] == ">"+protein_name) &
]
self.data = self.data[~self.data[protein_column_name].str.contains(">Reverse")]
if len(self.data.index) > 0:
self.empty = False
else:
self.empty = True
self.row_to_glycans = {}
self.glycan_to_row = {}
self.trust_byonic = trust_byonic
self.legacy = legacy
self.sequon_glycosites = set()
self.glycosylated_seq = set()
def calculate_glycan(self, glycan):
current_mass = 0
current_string = ""
for i in glycan:
current_string += i
if i == ")":
s = glycan_regex.search(current_string)
if s:
name = s.group(1)
amount = s.group(2)
current_mass += glycan_block_dict[name]*int(amount)
current_string = ""
return current_mass
def process(self):
# entries_number = len(self.data.index)
# if analysis == "N-glycan":
# expand_window = 2
# self.data["total_number_of_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_n-linked_sequon"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_hexnac"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_deamidation"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_asn"] = pd.Series([0] * entries_number, index=self.data.index, dtype=int)
# elif analysis == "O-glycan":
# self.data["total_number_of_hex"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_ser_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_ser_or_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["o_glycosylation_status"] = pd.Series([False]*entries_number, index=self.data.index, dtype=bool)
for i, r in self.data.iterrows():
glycan_dict = {}
search = sequence_regex.search(r[sequence_column_name])
seq = Sequence(search.group(0))
stripped_seq = seq.to_stripped_string()
# modifications = {}
# if pd.notnull(r[modifications_column_name]):
#
# for mod in r[modifications_column_name].split(","):
# number = 1
# if "*" in mod:
# m = mod.split("*")
# minimod = Sequence(m[0].strip())
# number = int(m[1].strip())
#
# else:
# minimod = Sequence(mod.strip())
# for mo in minimod[0].mods:
# if mo.value not in modifications:
# modifications[mo.value] = {}
# modifications[mo.value][minimod[0].value] = {"mod": deepcopy(mo),
# "number": number}
# #if minimod[0].mods[0].value not in modifications:
# # modifications[minimod[0].mods[0].value] = {}
# #modifications[minimod[0].mods[0].value][minimod[0].value] = {"mod": deepcopy(minimod[0].mods[0]),
# # "number": number}
#
# if minimod[0].value == "N":
# if analysis == "N-glycan":
# for mo in minimod[0].mods:
# if mo.value == 1:
# #if minimod[0].mods[0].value == 1:
# self.data.at[i, "total_number_of_deamidation"] += number
# self.data.at[i, "total_number_of_modded_asn"] += number
# elif minimod[0].value in "ST":
# if analysis == "O-glycan":
# for mo in minimod[0].mods:
# self.data.at[i, "total_number_of_modded_ser_thr"] += number
glycans = []
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
if search:
self.data.at[i, "stripped_seq"] = stripped_seq.rstrip(".").lstrip(".")
origin_seq = r[starting_position_column_name] - 1
glycan_reordered = []
self.data.at[i, "origin_start"] = origin_seq
self.data.at[i, "Ending Position"] = r[starting_position_column_name] + len(self.data.at[i, "stripped_seq"])
self.data.at[i, "position_to_glycan"] = ""
if self.trust_byonic:
n_site_status = {}
p_n = r[protein_column_name].lstrip(">")
# print(self.protein_name, p_n)
# motifs = [match for match in seq.find_with_regex(motif, ignore=seq.gaps())]
# if self.analysis == "N-glycan":
# if len(fasta_library[p_n]) >= origin_seq + expand_window:
# if expand_window:
# expanded_window = Sequence(fasta_library[p_n][origin_seq: origin_seq + len(self.data.at[i, "stripped_seq"]) + expand_window])
# expanded_window_motifs = [match for match in expanded_window.find_with_regex(motif, ignore=expanded_window.gaps())]
# origin_map = [i.start + origin_seq for i in expanded_window_motifs]
# if len(expanded_window_motifs) > len(motifs):
# self.data.at[i, "expanded_motif"] = str(expanded_window[expanded_window_motifs[-1]])
# self.data.at[i, "expanded_aa"] = str(expanded_window[-expand_window:])
#
# else:
# origin_map = [i.start + origin_seq for i in motifs]
# else:
# origin_map = [i.start + origin_seq for i in motifs]
#
# if analysis == "N-glycan":
# self.data.at[i, "total_number_of_asn"] = seq.count("N", 0, len(seq))
# if expand_window:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(expanded_window_motifs)
# else:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(motifs)
# self.data.at[i, "total_number_of_unmodded_asn"] = self.data.at[i, "total_number_of_asn"] - self.data.at[i, "total_number_of_modded_asn"]
# elif analysis == "O-glycan":
# self.data.at[i, "total_number_of_ser_thr"] = seq.count("S", 0, len(seq)) + seq.count("T", 0, len(seq))
# self.data.at[i, "total_number_of_unmodded_ser_or_thr"] = self.data.at[i, "total_number_of_modded_ser_thr"] - self.data.at[i, "total_number_of_modded_ser_thr"]
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
self.glycan_to_row[g] = i
glycosylated_site = []
for aa in range(1, len(seq) - 1):
if seq[aa].mods:
mod_value = float(seq[aa].mods[0].value)
round_mod_value = round(mod_value)
# str_mod_value = seq[aa].mods[0].value[0] + str(round_mod_value)
#if str_mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
# if seq[aa].value in modifications[str_mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[str_mod_value][seq[aa].value]['number'] > 0:
# modifications[str_mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = mod_value
round_3 = round(mod_value, 3)
if str(round_3) in glycan_dict:
seq[aa].extra = "Glycosylated"
pos = int(r[starting_position_column_name]) + aa - 2
self.sequon_glycosites.add(pos + 1)
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = seq[aa].value + str(pos + 1)
glycosylated_site.append(self.data.at[i, position] + "_" + str(round_mod_value))
glycosylation_count += 1
glycan_reordered.append(glycan_dict[str(round_3)])
if glycan_reordered:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if self.trust_byonic:
# if not in origin_map:
#
# # position = "{}_position".format(str(glycosylation_count))
# # self.data.at[i, position] = seq[aa].value + str(
# # r[starting_position_column_name]+aa)
# # self.data.at[i, position + "_match"] = "H"
# # glycosylation_count += 1
# self.data.at[i, "total_number_of_hexnac"] += 1
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
# if mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
#
# if seq[aa].value in modifications[mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[mod_value][seq[aa].value]['number'] > 0:
# modifications[mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = float(seq[aa].mods[0].value)
#
# if max_glycans and current_glycan != max_glycans:
#
# seq[aa].mods[0].value = glycans[current_glycan]
# seq[aa].extra = "Glycosylated"
#
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if "hexnac" in glycans[current_glycan].lower():
# self.data.at[i, "total_number_of_hexnac"] += 1
#
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
#
# current_glycan += 1
#if current_glycan == max_glycans:
#break
# for n in origin_map:
# position = "{}_position".format(str(glycosylation_count))
# self.data.at[i, position] = seq[n-origin_seq+1].value + str(
# n + 1)
#
# if seq[n-origin_seq+1].extra == "Glycosylated":
# self.data.at[i, position + "_match"] = "H"
# elif seq[n-origin_seq+1].extra == "Deamidated":
# self.data.at[i, position + "_match"] = "D"
# else:
# self.data.at[i, position + "_match"] = "U"
#
# if analysis == "N-glycan":
# if self.legacy:
# if self.data.at[i, "total_number_of_n-linked_sequon"] != self.data.at[i, "total_number_of_hexnac"]:
# if seq[n-origin_seq+1].extra == "Deamidated":
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "D"
# else:
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# if self.data.at[i, "total_number_of_deamidation"] == 0:
# self.data.at[i, position + "_match"] = "H"
# else:
# self.data.at[i, position + "_match"] ="D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# if not seq[n-origin_seq+1].extra:
# if self.data.at[i, "total_number_of_hexnac"] > 0 and self.data.at[i, "total_number_of_deamidation"]> 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# elif self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "U"
# glycosylation_count += 1
else:
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
glycans.sort()
self.data.at[i, glycans_column_name] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
self.glycosylated_seq.add(self.data.at[i, "stripped_seq"])
def analyze(self, max_sites=0, combine_d_u=True, splitting_sites=False):
result = []
temp = self.data.sort_values(["Area", "Score"], ascending=False)
temp[glycans_column_name] = temp[glycans_column_name].fillna("None")
out = []
if self.trust_byonic:
seq_glycosites = list(self.sequon_glycosites)
seq_glycosites.sort()
# print(seq_glycosites)
# if self.analysis == "N-glycan":
# if max_sites == 0:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"])]
# else:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"]) & (temp["total_number_of_n-linked_sequon"]<= max_sites) ]
for i, g in temp.groupby(["stripped_seq", "z", "glycoprofile", observed_mz]):
seq_within = []
unique_row = g.loc[g["Area"].idxmax()]
#
# glycan = 0
# first_site = ""
if seq_glycosites:
for n in seq_glycosites:
if unique_row[starting_position_column_name] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][n-unique_row[starting_position_column_name]]+str(n))
# print(unique_row)
# if self.legacy:
# for c in range(len(unique_row.index)):
# if unique_row.index[c].endswith("_position"):
#
# if pd.notnull(unique_row[unique_row.index[c]]):
# if not first_site:
# first_site = unique_row[unique_row.index[c]]
# if unique_row[unique_row.index[c]] not in result:
# result[unique_row[unique_row.index[c]]] = {}
#
# if "U" in unique_row[unique_row.index[c+1]]:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# elif "D" in unique_row[unique_row.index[c+1]]:
# if combine_d_u:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# else:
# if "D" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["D"] = 0
# result[unique_row[unique_row.index[c]]]["D"] += unique_row["Area"]
# else:
# if splitting_sites or unique_row["total_number_of_hexnac"] == 1:
#
# if self.row_to_glycans[unique_row.name][glycan] not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][self.row_to_glycans[unique_row.name][glycan]] = 0
# result[unique_row[unique_row.index[c]]][
# self.row_to_glycans[unique_row.name][glycan]] += unique_row["Area"]
# glycan += 1
#
# else:
# if unique_row["total_number_of_hexnac"] > 1 and not splitting_sites:
# temporary_glycan = ";".join(self.row_to_glycans[unique_row.name][glycan])
#
# if temporary_glycan not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][temporary_glycan] = unique_row["Area"]
# break
# else:
glycosylation_count = 0
glycans = unique_row["position_to_glycan"].split(",")
for c in range(len(unique_row.index)):
if unique_row.index[c].endswith("_position"):
if pd.notnull(unique_row[unique_row.index[c]]):
pos = unique_row[unique_row.index[c]]
result.append({"Position": pos, "Glycans": glycans[glycosylation_count], "Value": unique_row["Area"]})
ind = seq_within.index(pos)
seq_within.pop(ind)
glycosylation_count += 1
if seq_within:
for s in seq_within:
result.append({"Position": s, "Glycans": "U", "Value": unique_row["Area"]})
# if N_combo:
#
# N_combo.sort()
# sequons = ";".join(N_combo)
#
# # working_isoform = unique_row["isoform"]
# # if working_isoform not in result:
# # # if working_isoform != 1.0 and 1.0 in result:
# # # if sequons in result[working_isoform][1.0]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][sequons] or "U" in result[working_isoform][1.0][sequons]:
# # # working_isoform = 1.0
# # # else:
# # result[working_isoform] = {}
# if sequons not in result[working_isoform]:
# result[working_isoform][sequons] = {}
# #if pd.notnull(unique_row[glycans_column_name]):
# if unique_row[glycans_column_name] != "None":
# if unique_row[glycans_column_name] not in result[working_isoform][sequons]:
# result[working_isoform][sequons][unique_row[glycans_column_name]] = 0
# result[working_isoform][sequons][unique_row[glycans_column_name]] += unique_row["Area"]
# else:
# if "U" not in result[working_isoform][sequons]:
# result[working_isoform][sequons]["U"] = 0
# result[working_isoform][sequons]["U"] += unique_row["Area"]
# #print(result)
if result:
result = pd.DataFrame(result)
group = result.groupby(["Position", "Glycans"])
out = group.agg(np.sum).reset_index()
else:
out = pd.DataFrame([], columns=["Position", "Glycans", "Values"])
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# out.append({"Isoform": k, "Position": k2, "Glycans": k3, "Value": result[k][k2][k3]})
else:
# result_total = {}
# if max_sites != 0:
# temp = temp[temp['total_number_of_hex'] <= max_sites]
for i, g in temp.groupby(["stripped_seq", "z", glycans_column_name, starting_position_column_name, observed_mz]):
unique_row = g.loc[g["Area"].idxmax()]
if unique_row[glycans_column_name] != "None":
result.append({"Peptides": i[0], "Glycans": i[2], "Value": unique_row["Area"], "Position": i[3]})
else:
result.append({"Peptides": i[0], "Glycans": "U", "Value": unique_row["Area"], "Position": i[3]})
result = pd.DataFrame(result)
group = result.groupby(["Peptides", "Position", "Glycans"])
out = group.agg(np.sum).reset_index()
# working_isoform = unique_row["isoform"]
# if working_isoform not in result:
# # if working_isoform != 1.0 and 1.0 in result:
# # if unique_row["stripped_seq"] in result[working_isoform][1.0]:
# # #if i[3] in result[working_isoform][1.0][unique_row["stripped_seq"]]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]] or "U" in \
# # # result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]]:
# # working_isoform = 1.0
# # else:
# result[working_isoform] = {}
#
# if unique_row["stripped_seq"] not in result[working_isoform]:
# result[working_isoform][unique_row["stripped_seq"]] = {}
# # result_total[unique_row["isoform"]][unique_row["stripped_seq"]] = 0
# if i[3] not in result[working_isoform][unique_row["stripped_seq"]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]] = {}
# if i[2] == "None":
# if "U" not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] += unique_row["Area"]
#
# else:
# # if splitting_sites:
# # for gly in self.row_to_glycans[unique_row.name]:
# # if gly not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] = 0
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] += unique_row["Area"]
# # else:
# if unique_row[glycans_column_name] not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] += unique_row["Area"]
#
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# for k4 in result[k][k2][k3]:
# out.append({"Isoform": k, "Peptides": k2, "Glycans": k4, "Value": result[k][k2][k3][k4], "Position": k3})
return Result(out)
class GlypnirO:
def __init__(self, trust_byonic=False, get_uniprot=False):
self.trust_byonic = trust_byonic
self.components = None
self.uniprot_parsed_data = pd.DataFrame([])
self.get_uniprot = get_uniprot
def add_component(self, filename, area_filename, replicate_id, sample_id):
component = GlypnirOComponent(filename, area_filename, replicate_id, sample_id)
def add_batch_component(self, component_list, minimum_score, protein=None, combine_uniprot_isoform=True, legacy=False):
self.load_dataframe(component_list)
protein_list = []
if protein is not None:
self.components["Protein"] = pd.Series([protein]*len(self.components.index), index=self.components.index)
for i, r in self.components.iterrows():
comp = GlypnirOComponent(r["filename"], r["area_filename"], r["replicate_id"], condition_id=r["condition_id"], protein_name=protein, minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
self.components.at[i, "component"] = comp
print("{} - {}, {} peptides has been successfully loaded".format(r["condition_id"], r["replicate_id"], str(len(comp.data.index))))
else:
components = []
for i, r in self.components.iterrows():
data = pd.read_excel(r["filename"], sheet_name="Spectra")
protein_id_column = protein_column_name
if combine_uniprot_isoform:
protein_id_column = "master_id"
for i2, r2 in data.iterrows():
search = uniprot_regex.search(r2[protein_column_name])
if not r2[protein_column_name].startswith(">Reverse") and not r2[protein_column_name].endswith("(Common contaminant protein)"):
if search:
data.at[i2, "master_id"] = search.groupdict(default="")["accession"]
if not self.get_uniprot:
protein_list.append([search.groupdict(default="")["accession"], r2[protein_column_name]])
if search.groupdict(default="")["isoform"] != "":
data.at[i2, "isoform"] = int(search.groupdict(default="")["isoform"][1:])
else:
data.at[i2, "isoform"] = 1
else:
data.at[i2, "master_id"] = r2[protein_column_name]
data.at[i2, "isoform"] = 1
else:
data.at[i2, "master_id"] = r2[protein_column_name]
data.at[i2, "isoform"] = 1
if r["area_filename"].endswith("xlsx"):
file_with_area = pd.read_excel(r["area_filename"])
else:
file_with_area = pd.read_csv(r["area_filename"], sep="\t")
for index, g in data.groupby([protein_id_column]):
u = index
if not u.startswith(">Reverse") and not u.endswith("(Common contaminant protein)"):
comp = GlypnirOComponent(g, file_with_area, r["replicate_id"],
condition_id=r["condition_id"], protein_name=u,
minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
if not comp.empty:
components.append({"filename": r["filename"], "area_filename": r["area_filename"], "condition_id": r["condition_id"], "replicate_id": r["replicate_id"], "Protein": u, "component": comp})
yield i, r
print(
"{} - {} peptides has been successfully loaded".format(r["condition_id"],
r["replicate_id"]))
self.components = pd.DataFrame(components, columns=list(self.components.columns) + ["component", "Protein"])
if not self.get_uniprot:
protein_df = pd.DataFrame(protein_list, columns=["Entry", "Protein names"])
self.uniprot_parsed_data = protein_df
#print(self.uniprot_parsed_data)
def load_dataframe(self, component_list):
if type(component_list) == list:
self.components = | pd.DataFrame(component_list) | pandas.DataFrame |
from collections import Counter
import altair as alt
import pandas as pd
import streamlit as st
def stat_explorer(num_players):
global data, board_spaces
st.title('Mpoly Junior Game statistics explorer')
st.write("""
We play 5,000,000 games and see what we can find.
Use the radio buttons at the sidebar to change the number of players in the game.
""")
with st.expander('Notes'):
st.write("""
* The games are played under advanced rules -- if a player is bankrupt, the player can sell property
to the creditor. Games end when all players are truly bankrupt or the games last longer than
300 rounds (a draw).
* Some algorithms are in place to mimic decisions made in an actual game. For example:
* If a player has a choice to move to a space where there is liability for paying rent, and one that
does not, player chooses the free space.
* If a player has to sell property to a bank or player, it chooses the least expensive property first.
* If a player gets a choice to pick a property to buy, it picks the most expensive one that would make
a pair.
""")
@st.cache
def load_data(no_of_players: int):
data_to_be_loaded = pd.read_csv(f'games_{no_of_players}p_5m.csv.gz', compression='gzip')
data_to_be_loaded.index.name = 'Game'
return data_to_be_loaded
with st.spinner('Loading data...'):
data = load_data(num_players)
st.header('Win Percentage')
st.write("""
What are the odds of 1 player winning? This shows how significant the 1st player's advantage is, and how
many games end in a draw (more than 300 rounds).
(Incidentally, the youngest player is designated the first player in the junior game.)
""")
@st.cache
def compile_win_percentages(players: int, data, games=5000000):
winners = data['winner'].value_counts().to_dict()
if num_players == 2:
result = pd.DataFrame({'player': ['Player 1', 'Player 2', 'Draw'],
'Win Count': [winners[0], winners[1], winners[-1]]})
elif num_players == 3:
result = pd.DataFrame({'player': ['Player 1', 'Player 2', 'Player 3', 'Draw'],
'Win Count': [winners[0], winners[1], winners[2], winners[-1]]})
elif num_players == 4:
result = pd.DataFrame({'player': ['Player 1', 'Player 2', 'Player 3', 'Player 4', 'Draw'],
'Win Count': [winners[0], winners[1], winners[2], winners[3], winners[-1]]})
result['win_percent'] = result['Win Count'].map(lambda x: f"{round(x / games * 100, 2)}%")
return result
def make_win_chart(source):
chart = alt.Chart(source).properties(height=350, width=700, title='Win Count(%) by player')
win_chart = chart.mark_bar().encode(
y='player',
x='Win Count',
color=alt.Color('player', scale=alt.Scale(scheme='dark2'))
)
text = win_chart.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='win_percent'
)
return win_chart + text
st.altair_chart(make_win_chart(compile_win_percentages(num_players, data)))
st.header('Number of rounds to complete game')
st.write("""
Shows how long a game would typically take.
""")
@st.cache
def compile_rounds(rounds_series):
rounds_container = [value for _, value in rounds_series.items()]
rounds_counter = Counter(rounds_container)
return pd.DataFrame({'Rounds': list(rounds_counter.keys()), 'count': list(rounds_counter.values())})
def make_rounds_chart(source):
base = alt.Chart(source).encode(
x=alt.X('Rounds', bin=alt.Bin(step=5)),
)
rounds_bar = base.mark_bar().encode(
y='count',
color=alt.condition(alt.datum.Rounds > 300, alt.value('orange'), alt.value('slateblue'))
).properties(width=700, height=500)
return rounds_bar
st.altair_chart(make_rounds_chart(compile_rounds(data['rounds'])))
st.header('Most visited spaces')
st.write("""
This shows you how often a place is visited during the 5 million games.
Wouldn't you consider trying for the hottest space?
""")
board_spaces = ['GO', 'CHANCE', 'JAIL', 'FREE PARKING', 'TACO TRUCK', 'PIZZA HOUSE', 'BAKERY',
'ICE CREAM PARLOUR',
'MUSEUM', 'LIBRARY', 'GO-KARTS', 'SWIMMING POOL', 'FERRIS WHEEL', 'ROLLER COASTER', 'TOY SHOP',
'PET SHOP',
'AQUARIUM', 'THE ZOO', 'PARK LANE', 'BOARDWALK']
@st.cache
def compile_spaces():
visit_count = []
for space in board_spaces:
if space == 'CHANCE':
visit_count.append(data[space].sum() / 4)
else:
visit_count.append(data[space].sum())
return | pd.DataFrame({'Space': board_spaces, 'Visit Count': visit_count}) | pandas.DataFrame |
"""
This script preprocesses data and prepares data to be actually used in training
"""
import re
import os
import pickle
import unicodedata
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import logging
logging.basicConfig(filename="memo_1.txt", level=logging.INFO)
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalizeString(s):
"""
Lowercase, trim, and remove non-letter characters
"""
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def transcribe_sessions():
file2transcriptions = {}
useful_regex = re.compile(r'^(\w+)', re.IGNORECASE)
transcript_path = '/home/Data/IEMOCAP_session_only/Session{}/dialog/transcriptions/'
for sess in range(1, 6):
transcript_path_i = transcript_path.format(sess)
for f in os.listdir(transcript_path_i):
with open('{}{}'.format(transcript_path_i, f), 'r') as f:
all_lines = f.readlines()
for l in all_lines:
logging.info(l)
audio_code = useful_regex.match(l).group()
transcription = l.split(':')[-1].strip()
# assuming that all the keys would be unique and hence no `try`
file2transcriptions[audio_code] = transcription
with open('../data/t2e/audiocode2text.pkl', 'wb') as file:
pickle.dump(file2transcriptions, file)
return file2transcriptions
def prepare_text_data(audiocode2text):
# Prepare text data
df = | pd.read_csv('../data/pre-processed/audio_features.csv') | pandas.read_csv |
from itertools import product
import numpy as np
from numpy import ma
import pandas as pd
import pytest
from scipy import sparse as sp
from scipy.sparse import csr_matrix, issparse
from anndata import AnnData
from anndata.tests.helpers import assert_equal, gen_adata
# some test objects that we use below
adata_dense = AnnData(np.array([[1, 2], [3, 4]]))
adata_dense.layers["test"] = adata_dense.X
adata_sparse = AnnData(
csr_matrix([[0, 2, 3], [0, 5, 6]]),
dict(obs_names=["s1", "s2"], anno1=["c1", "c2"]),
dict(var_names=["a", "b", "c"]),
)
def test_creation():
AnnData(np.array([[1, 2], [3, 4]]))
AnnData(np.array([[1, 2], [3, 4]]), {}, {})
AnnData(ma.array([[1, 2], [3, 4]]), uns=dict(mask=[0, 1, 1, 0]))
AnnData(sp.eye(2))
X = np.array([[1, 2, 3], [4, 5, 6]])
adata = AnnData(
X=X,
obs=dict(Obs=["A", "B"]),
var=dict(Feat=["a", "b", "c"]),
obsm=dict(X_pca=np.array([[1, 2], [3, 4]])),
raw=dict(X=X, var=dict(var_names=["a", "b", "c"])),
)
assert adata.raw.X.tolist() == X.tolist()
assert adata.raw.var_names.tolist() == ["a", "b", "c"]
with pytest.raises(ValueError):
AnnData(np.array([[1, 2], [3, 4]]), dict(TooLong=[1, 2, 3, 4]))
# init with empty data matrix
shape = (3, 5)
adata = AnnData(None, uns=dict(test=np.array((3, 3))), shape=shape)
assert adata.X is None
assert adata.shape == shape
assert "test" in adata.uns
def test_create_with_dfs():
X = np.ones((6, 3))
obs = pd.DataFrame(dict(cat_anno=pd.Categorical(["a", "a", "a", "a", "b", "a"])))
obs_copy = obs.copy()
adata = AnnData(X=X, obs=obs)
assert obs.index.equals(obs_copy.index)
assert obs.index.astype(str).equals(adata.obs.index)
def test_create_from_df():
df = pd.DataFrame(np.ones((3, 2)), index=["a", "b", "c"], columns=["A", "B"])
ad = AnnData(df)
assert df.values.tolist() == ad.X.tolist()
assert df.columns.tolist() == ad.var_names.tolist()
assert df.index.tolist() == ad.obs_names.tolist()
def test_create_from_sparse_df():
s = sp.random(20, 30, density=0.2)
obs_names = [f"obs{i}" for i in range(20)]
var_names = [f"var{i}" for i in range(30)]
df = pd.DataFrame.sparse.from_spmatrix(s, index=obs_names, columns=var_names)
a = AnnData(df)
b = AnnData(s, obs=pd.DataFrame(index=obs_names), var=pd.DataFrame(index=var_names))
assert_equal(a, b)
assert issparse(a.X)
def test_create_from_df_with_obs_and_var():
df = pd.DataFrame(np.ones((3, 2)), index=["a", "b", "c"], columns=["A", "B"])
obs = pd.DataFrame(np.ones((3, 1)), index=df.index, columns=["C"])
var = pd.DataFrame(np.ones((2, 1)), index=df.columns, columns=["D"])
ad = AnnData(df, obs=obs, var=var)
assert df.values.tolist() == ad.X.tolist()
assert df.columns.tolist() == ad.var_names.tolist()
assert df.index.tolist() == ad.obs_names.tolist()
assert obs.equals(ad.obs)
assert var.equals(ad.var)
with pytest.raises(ValueError, match=r"Index of obs must match index of X."):
AnnData(df, obs=obs.reset_index())
with pytest.raises(ValueError, match=r"Index of var must match columns of X."):
AnnData(df, var=var.reset_index())
def test_from_df_and_dict():
df = pd.DataFrame(dict(a=[0.1, 0.2, 0.3], b=[1.1, 1.2, 1.3]))
adata = AnnData(df, dict(species=pd.Categorical(["a", "b", "a"])))
assert adata.obs["species"].values.tolist() == ["a", "b", "a"]
def test_df_warnings():
df = pd.DataFrame(dict(A=[1, 2, 3], B=[1.0, 2.0, 3.0]), index=["a", "b", "c"])
with pytest.warns(UserWarning, match=r"X.*dtype float64"):
adata = AnnData(df)
with pytest.warns(UserWarning, match=r"X.*dtype float64"):
adata.X = df
def test_attr_deletion():
full = gen_adata((30, 30))
# Empty has just X, obs_names, var_names
empty = AnnData(None, obs=full.obs[[]], var=full.var[[]])
for attr in ["X", "obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]:
delattr(full, attr)
assert_equal(getattr(full, attr), getattr(empty, attr))
assert_equal(full, empty, exact=True)
def test_names():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
assert adata.obs_names.tolist() == "A B".split()
assert adata.var_names.tolist() == "a b c".split()
adata = AnnData(np.array([[1, 2], [3, 4], [5, 6]]), var=dict(var_names=["a", "b"]))
assert adata.var_names.tolist() == ["a", "b"]
@pytest.mark.parametrize(
"names,after",
[
pytest.param(["a", "b"], None, id="list"),
pytest.param(
pd.Series(["AAD", "CCA"], name="barcodes"), "barcodes", id="Series-str"
),
pytest.param(pd.Series(["x", "y"], name=0), None, id="Series-int"),
],
)
@pytest.mark.parametrize("attr", ["obs_names", "var_names"])
def test_setting_index_names(names, after, attr):
adata = adata_dense.copy()
assert getattr(adata, attr).name is None
setattr(adata, attr, names)
assert getattr(adata, attr).name == after
if hasattr(names, "name"):
assert names.name is not None
# Testing for views
new = adata[:, :]
assert new.is_view
setattr(new, attr, names)
assert_equal(new, adata, exact=True)
assert not new.is_view
@pytest.mark.parametrize("attr", ["obs_names", "var_names"])
def test_setting_index_names_error(attr):
orig = adata_sparse[:2, :2]
adata = adata_sparse[:2, :2]
assert getattr(adata, attr).name is None
with pytest.raises(ValueError, match=fr"AnnData expects \.{attr[:3]}\.index\.name"):
setattr(adata, attr, pd.Index(["x", "y"], name=0))
assert adata.is_view
assert getattr(adata, attr).tolist() != ["x", "y"]
assert getattr(adata, attr).tolist() == getattr(orig, attr).tolist()
assert_equal(orig, adata, exact=True)
@pytest.mark.parametrize("dim", ["obs", "var"])
def test_setting_dim_index(dim):
index_attr = f"{dim}_names"
mapping_attr = f"{dim}m"
orig = gen_adata((5, 5))
orig.raw = orig
curr = orig.copy()
view = orig[:, :]
new_idx = pd.Index(list("abcde"), name="letters")
setattr(curr, index_attr, new_idx)
pd.testing.assert_index_equal(getattr(curr, index_attr), new_idx)
pd.testing.assert_index_equal(getattr(curr, mapping_attr)["df"].index, new_idx)
pd.testing.assert_index_equal(getattr(curr, mapping_attr).dim_names, new_idx)
| pd.testing.assert_index_equal(curr.obs_names, curr.raw.obs_names) | pandas.testing.assert_index_equal |
#! /usr/bin/env python3
#SBATCH -J get_csv
#SBATCH -t 4:0:0
#SBATCH --mem=5G
### Get one csv with the normalized expression data
# This Python script to make a csv of the output data from cuffdiff. It extracts the gene_id and expressionvalues for each analysis and writes it to 1 csv file
# This scripts needs os.system and pandas
# 00: import system commands and pandas (for DF management)
import os
import pandas as pd
#
# set path were data is
path = '/home/uu_bio_fg/rbrouns/data/ant_fungus/TC6/data/ophio/cuffdiff_out/'
# 01: Create a list with all the files in the directory where the gene_ex data is
ls_gene_ex_files = (os.listdir(path))
# sort the list
ls_gene_ex_files.sort()
# some how the sample for file 20 does not contain data so that one is removed from te list
ls_gene_ex_files.remove('gene_exp_02Avs20A.csv')
### 02: Create DF frame with rownames and without exp_values
#
# get first filename of list (thus file of sample 1)
sample1 = (ls_gene_ex_files[0])
# create df with the data from sample 1
file0 = pd.read_csv(f'{path}{sample1}', sep='\t')
# extract the columns gene_id, gene, locus into new DF
TC6_gene_ex = file0[['gene_id', 'gene', 'locus']]
### 03: Append expression values as new column matchin to rownames
#
for sample in ls_gene_ex_files:
# create df with the data from sample
file = pd.read_csv(f'{path}{sample}', sep='\t')
# extract the column of expression value_2 and
value_2 = file[['gene_id','value_2']]
# rename header of expression value to sample name
s_header = sample.split('.')[0]
ss_header = 'sample_' + s_header.split('vs')[1]
value_2.columns = ['gene_id',ss_header]
# append TC6 DF with expression values DF
TC6_gene_ex = | pd.concat([TC6_gene_ex, value_2.iloc[:,1]], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 14:52:26 2021
@author: IneR
"""
#set inputs
#Folder (Adapt!!)
Folder = 'I:\\Las\\InputRF\\ReferenceData\\FixedDistance\\'
#%%
#import modules
import os
import glob
import pandas as pd
import numpy
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import sklearn.model_selection as ms
Test = "Velm"
#%%
########################## Import variables ###################################
###############################################################################
###############################################################################
#import True positive
TruePos_H = pd.read_csv(Folder + 'TP_Huldenberg_Features_fixedDist_MoreFeatures_r20.csv', index_col=0)
TruePos_T = pd.read_csv(Folder + 'TP_Tervuren_Features_fixedDist_MoreFeatures_r20.csv', index_col=0)
TruePos_V = pd.read_csv(Folder + 'TP_Velm_Features_fixedDist_MoreFeatures_r20.csv', index_col=0)
#import True negative
TrueNeg_H = pd.read_csv(Folder + 'TN_Huldenberg_Features_fixedDist_MoreFeatures_r20.csv', index_col=0)
TrueNeg_T = pd.read_csv(Folder + 'TN_Tervuren_Features_fixedDist_MoreFeatures_r20.csv', index_col=0)
TrueNeg_V = pd.read_csv(Folder + 'TN_Velm_Features_fixedDist_MoreFeatures_r20.csv', index_col=0)
#
dataRF_H = pd.concat([TruePos_H, TrueNeg_H])
dataRF_V = pd.concat([TruePos_V, TrueNeg_V])
dataRF_T = pd.concat([TruePos_T, TrueNeg_T])
#Get feature names --> no feature columns: 0, 1, 2, 3, 5, 7, 8, 161 --> drop them
cols = [0, 1, 2, 3, 5, 7, 8, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
156, 157, 158, 159, 160, 161]
features_df = dataRF_H.drop(dataRF_H.columns[cols],axis=1)
features = features_df.columns[:125] #column names feat
#dataRF: 1,2,3 = x,y,z 4,6,9:160 = features response = 161
cols2 = [0, 5, 7, 8, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161] #all columns but xyz and features
if Test == "Huldenberg":
Train1 = "Tervuren"
Train2 = "Velm"
dataRF_train = pd.concat([TruePos_T, TrueNeg_T, TruePos_V, TrueNeg_V])
dataRF_train1 = dataRF_T
dataRF_train2 = dataRF_V
dataRF_test = dataRF_H
X_xyz_values_train = dataRF_train.drop(dataRF_train.columns[cols2], axis=1) #only keep xyz and features
X_xyz_values_train1 = dataRF_T.drop(dataRF_T.columns[cols2], axis=1) #only keep xyz and features
X_xyz_values_train2 = dataRF_V.drop(dataRF_V.columns[cols2], axis=1) #only keep xyz and features
X_xyz_values_test = dataRF_H.drop(dataRF_H.columns[cols2], axis=1) #only keep xyz and features
X_test = X_xyz_values_test.iloc[ : , 3:157]
X_train1 = X_xyz_values_train1.iloc[ : , 3:157]
X_train2 = X_xyz_values_train2.iloc[ : , 3:157]
y_test = dataRF_H.iloc[:,161].values
y_train1 = dataRF_T.iloc[:,161].values
y_train2 = dataRF_V.iloc[:,161].values
if Test == "Tervuren":
Train1 = "Huldenberg"
Train2 = "Velm"
dataRF_train = pd.concat([TruePos_H, TrueNeg_H, TruePos_V, TrueNeg_V])
dataRF_train1 = dataRF_H
dataRF_train2 = dataRF_V
dataRF_test = dataRF_T
X_xyz_values_train = dataRF_train.drop(dataRF_train.columns[cols2], axis=1) #only keep xyz and features
X_xyz_values_train1 = dataRF_H.drop(dataRF_H.columns[cols2], axis=1) #only keep xyz and features
X_xyz_values_train2 = dataRF_V.drop(dataRF_V.columns[cols2], axis=1) #only keep xyz and features
X_xyz_values_test = dataRF_T.drop(dataRF_T.columns[cols2], axis=1) #only keep xyz and features
X_test = X_xyz_values_test.iloc[ : , 3:157]
X_train1 = X_xyz_values_train1.iloc[ : , 3:157]
X_train2 = X_xyz_values_train2.iloc[ : , 3:157]
y_test = dataRF_T.iloc[:,161].values
y_train1 = dataRF_H.iloc[:,161].values
y_train2 = dataRF_V.iloc[:,161].values
if Test =="Velm":
Train1 = "Huldenberg"
Train2 = "Tervuren"
dataRF_train = | pd.concat([TruePos_T, TrueNeg_T, TruePos_H, TrueNeg_H]) | pandas.concat |
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import Series, Timestamp
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize("val,expected", [
(2**63 - 1, 3),
(2**63, 4),
])
def test_loc_uint64(val, expected):
# see gh-19399
s = Series({2**63 - 1: 3, 2**63: 4})
assert s.loc[val] == expected
def test_loc_getitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
assert_series_equal(
test_data.series.loc[inds],
test_data.series.reindex(inds))
assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])
# slice with indices
d1, d2 = test_data.ts.index[[5, 15]]
result = test_data.ts.loc[d1:d2]
expected = test_data.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = test_data.series > test_data.series.median()
assert_series_equal(test_data.series.loc[mask], test_data.series[mask])
# ask for index value
assert test_data.ts.loc[d1] == test_data.ts[d1]
assert test_data.ts.loc[d2] == test_data.ts[d2]
def test_loc_getitem_not_monotonic(test_data):
d1, d2 = test_data.ts.index[[5, 15]]
ts2 = test_data.ts[::2][[1, 2, 0]]
msg = r"Timestamp\('2000-01-10 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2]
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2] = 0
def test_loc_getitem_setitem_integer_slice_keyerrors():
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
with pytest.raises(KeyError, match=r"^3L?$"):
s2.loc[3:11]
with pytest.raises(KeyError, match=r"^3L?$"):
s2.loc[3:11] = 0
def test_loc_getitem_iterator(test_data):
idx = iter(test_data.series.index[:10])
result = test_data.series.loc[idx]
assert_series_equal(result, test_data.series[:10])
def test_loc_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
result = test_data.series.copy()
result.loc[mask] = 0
expected = test_data.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_loc_setitem_corner(test_data):
inds = list(test_data.series.index[[5, 8, 12]])
test_data.series.loc[inds] = 5
msg = r"\['foo'\] not in index"
with pytest.raises(KeyError, match=msg):
test_data.series.loc[inds + ['foo']] = 5
def test_basic_setitem_with_labels(test_data):
indices = test_data.ts.index[[5, 10, 15]]
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
msg = r"\[5\] not contained in the index"
with pytest.raises(ValueError, match=msg):
s[inds_notfound] = 0
with pytest.raises(Exception, match=msg):
s[arr_inds_notfound] = 0
# GH12089
# with tz for values
s = Series( | pd.date_range("2011-01-01", periods=3, tz="US/Eastern") | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# ### Explore processed pan-cancer data
# In[1]:
import os
import sys
import numpy as np; np.random.seed(42)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
# In[2]:
DATA_TYPE = 'mut_sigs'
# load gene/classification info and sample/cancer type info
print('Loading gene label data...', file=sys.stderr)
genes_df = du.load_vogelstein()
sample_info_df = du.load_sample_info(DATA_TYPE, verbose=True)
# load mutation info
# this returns a tuple of dataframes, unpack it below
pancancer_data = du.load_pancancer_data(verbose=True)
(sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df) = pancancer_data
# In[3]:
# load relevant data
data_df = du.load_raw_data(DATA_TYPE, verbose=True)
# standardize columns of expression dataframe
if DATA_TYPE in cfg.standardize_data_types:
print('Standardizing columns of {} data...'.format(DATA_TYPE),
file=sys.stderr)
data_df[data_df.columns] = StandardScaler().fit_transform(data_df[data_df.columns])
print(data_df.shape)
data_df.iloc[:5, :5]
# First, let's look at the low-dimensional representation of the chosen data type.
#
# We'll choose a few cancer types that are similar to one another (LUSC/LUAD, LGG/GBM) and a few that should be dissimilar (BRCA, THCA).
# In[25]:
assert sample_info_df.index.equals(data_df.index)
# data_cancer_types = sorted(sample_info_df.cancer_type.unique())
data_cancer_types = ['LUAD', 'LUSC', 'THCA', 'LGG', 'GBM', 'BRCA']
data_types_df = (data_df
.merge(sample_info_df, left_index=True, right_index=True)
.query('cancer_type in @data_cancer_types')
.drop(columns=['sample_type', 'id_for_stratification'])
.reset_index()
)
print(data_types_df.cancer_type.unique())
data_types_df.iloc[:5, -5:]
# In[26]:
from sklearn.decomposition import PCA
from umap import UMAP
sns.set({'figure.figsize': (20, 8)})
fig, axarr = plt.subplots(1, 2)
pca = PCA(n_components=2)
X_proj_pca = pca.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
reducer = UMAP(n_components=2, random_state=42)
X_proj_umap = reducer.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
for i, cancer_type in enumerate(data_cancer_types):
ixs = data_types_df.index[data_types_df.cancer_type == cancer_type].tolist()
axarr[0].scatter(X_proj_pca[ixs, 0], X_proj_pca[ixs, 1], label=cancer_type, s=5)
axarr[1].scatter(X_proj_umap[ixs, 0], X_proj_umap[ixs, 1], label=cancer_type, s=5)
axarr[0].set_xlabel('PC1')
axarr[0].set_ylabel('PC2')
axarr[0].set_title('PCA projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[0].legend()
axarr[1].set_xlabel('UMAP dimension 1')
axarr[1].set_ylabel('UMAP dimension 2')
axarr[1].set_title('UMAP projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[1].legend()
# Now we want to dig a bit deeper into LGG and GBM, using expression and methylation data. It's fairly well-known that IDH1 mutation status defines distinct subtypes in both classes of brain tumors. We'll compare methylation and gene expression in IDH1-mutated vs. non-mutated samples, expecting to see a separation in our low dimensional representation.
#
# IDH1 plays a direct role in DNA methylation, so we anticipate that this separation between mutated and non-mutated samples will be slightly clearer in the methylation data.
# In[5]:
# load relevant data
rnaseq_df = du.load_raw_data('expression', verbose=True)
print('Standardizing columns of expression data...', file=sys.stderr)
rnaseq_df[rnaseq_df.columns] = StandardScaler().fit_transform(rnaseq_df[rnaseq_df.columns])
methylation_df = du.load_raw_data('me_27k', verbose=True)
print(methylation_df.shape)
methylation_df.iloc[:5, :5]
# In[6]:
from mpmp.utilities.tcga_utilities import process_y_matrix
def generate_labels(gene, classification):
# process the y matrix for the given gene or pathway
y_mutation_df = mutation_df.loc[:, gene]
# include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = copy_gain_df.loc[:, gene]
elif classification == "TSG":
y_copy_number_df = copy_loss_df.loc[:, gene]
else:
y_copy_number_df = | pd.DataFrame() | pandas.DataFrame |
def get_default_fitkwargs(dataset=None):
return {'d': 10, 'n_iters': 1000, 'max_n': 3000, 'batch_size': 100, 'lr': 1e-2, 'stop_iters':50, 'norm': True, 'ybar_bias': True}
#=====================================For things implemented in DJKP
import pandas as pd
import numpy as np
import os
import ipdb
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_diabetes, fetch_california_housing
import torch
from models.baselines.djkp.dj_models.static import *
from models.baselines.djkp.dj_models.PBP import *
import _settings #from main repo
TRAIN = 'train'
VALID = 'val'
TEST = 'test'
VALID_1 = 'val1'
VALID_2 = 'val2'
SEED_OFFSETS = {TRAIN: 101, VALID: 202, VALID_1: 203, VALID_2: 204, TEST: 303}
def my_split(idx, split_ratio=[0.8, 0.2], seed=10):
assert len(split_ratio) == 2, "for this task"
np.random.seed(seed)
n = len(idx)
perm = np.random.permutation(n)
split_ratio = np.concatenate([[0.], np.cumsum(split_ratio) / sum(split_ratio)])
splits = np.round(split_ratio * n).astype(np.int)
idxs = [idx[perm[splits[i]:splits[i + 1]]] for i in range(len(split_ratio) - 1)]
return idxs[0], idxs[1]
def load_dataset(dataset='UCI_Yacht', seed=7, data_path=_settings.DATA_PATH):
if dataset == _settings.HOUSING_NAME:
X, y = load_boston(return_X_y=True)
elif dataset == _settings.ENERGY_NAME:
fpath = os.path.join(data_path, dataset, 'ENB2012_data.xlsx')
raw_df = pd.read_excel(fpath, engine='openpyxl')
# raw_df = raw_df.iloc[:, :10]
# raw_df.columns = ["X%d" % d for d in range(self.raw_df.shape[1] - 2)] + ['Y0', 'Y1']
raw_df = raw_df.iloc[:, :9]
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
X = raw_df.iloc[:, :-1].values
y = raw_df.iloc[:, -1].values
elif dataset == _settings.YACHT_NAME:
fpath = os.path.join(data_path, 'UCI_Yacht', 'yacht_hydrodynamics.data')
df = pd.read_fwf(fpath, header=None)
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
elif dataset == _settings.KIN8NM_NAME:
fpath = os.path.join(data_path, 'Kin8nm', 'dataset_2175_kin8nm.csv')
df = pd.read_csv(fpath)
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
elif dataset == _settings.BIKE_NAME:
fpath = os.path.join(data_path, 'UCI_BikeSharing', 'Bike-Sharing-Dataset.zip')
from zipfile import ZipFile
archive = ZipFile(fpath)
#raw_df = pd.read_csv(archive.open('day.csv')).set_index('dteday', verify_integrity=True).drop('instant',axis=1)
raw_df = pd.read_csv(archive.open('hour.csv')).set_index('instant', verify_integrity=True)
drop_cols = ['yr', 'mnth', 'dteday']
enum_cols = ['season', 'hr', 'weekday', 'weathersit']
raw_df = raw_df.drop(drop_cols, axis=1)
for enum_col in enum_cols:
ser = raw_df[enum_col]
tdf = pd.get_dummies(ser).rename(columns=lambda x: "%s%d"%(enum_col, x))
raw_df = | pd.concat([raw_df, tdf], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 12:48:08 2020
@author: smith
"""
import spacy
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
import os
import multiprocessing
import csv
import re
import pandas as pd
from time import time
from datetime import datetime
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
import logging
import gensim
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
w2v_dir = '/home/smith/Smith_Scripts/NLP_GeneExpression/w2v_model/model071520/'
w2v_model = Word2Vec.load(os.path.join(w2v_dir, 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model'))
modelName = '_w2v071520_'
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/'
clusters = ['Cluster' + str(x) for x in range(20)]
category = 'CellTypes'
comparison = 'MarkerGenes'
termIndex = pd.read_excel(os.path.join(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison + '_Frequency.xlsx'), index_col=0)
termIndex = termIndex.sort_values(by='Combined Occurances', ascending=False)
enrichIndex = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx', index_col=0)
enrIndex = enrichIndex.iloc[:,::4]
def calcTopSimilarities(cluster, category, min_freq=5, topn=2000, save=False):
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
clusterNum=cluster.replace('Cluster', '')
genesDf = pd.read_excel('/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx')
genesList = genesDf[str(clusterNum) + '_n'].tolist()
genes = genesList
genes = []
for gene in genesList:
genes.append(gene.lower())
# words = pd.read_excel(os.path.join(resultDirectory, str(cluster) + '_' + comparison + '_Results/' + category + '_' + cluster + '_Frequency.xlsx'), index_col=0)
# words = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster0_EnrichedFunctions_onlyTest.xlsx', index_col=0)
# wordsRedacted = words.loc[words['Occurances'] > min_freq]['word'].tolist()
words = enrIndex
wordsRedacted = words[cluster + ' term'].tolist()[:-1]
if category == 'CellTypes':
wordsRedacted = termIndex['word'].tolist()[:150]
newWords = []
for item in wordsRedacted:
try:
item = item.replace(' ', '_')
newWords.append(item)
except AttributeError:
pass
cat = pd.DataFrame()
catX = | pd.DataFrame() | pandas.DataFrame |
from sklearn.metrics import roc_auc_score, roc_curve, auc
import pandas as pd
from typing import Dict, List
from progress.bar import Bar
import os
import pickle
from prismx.utils import read_gmt, load_correlation, load_feature
from prismx.loaddata import get_genes
def calculate_set_auc(prediction: pd.DataFrame, library: Dict, min_lib_size: int=1) -> pd.DataFrame:
aucs = []
setnames = []
gidx = prediction.index
for se in library:
if len(library[se]) >= min_lib_size:
lenc = [x.encode('utf-8') for x in library[se]]
gold = [i in lenc for i in gidx]
fpr, tpr, _ = roc_curve(list(gold), list(prediction.loc[:,se]))
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
setnames.append(se)
aucs = pd.DataFrame(aucs, index=setnames)
return(aucs)
def calculate_gene_auc(prediction: pd.DataFrame, rev_library: Dict, min_lib_size: int=1) -> List[float]:
aucs = []
gidx = prediction.index
for se in rev_library:
gold = [i in rev_library[se] for i in prediction.columns]
if len(rev_library[se]) >= min_lib_size and se.encode("UTF-8") in gidx:
fpr, tpr, _ = roc_curve(list(gold), list(prediction.loc[se.encode("UTF-8"),:]))
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
return(aucs)
def benchmark_gmt(gmt_file: str, workdir: str, prediction_file: str, intersect: bool=False, verbose=False):
genes = [x.decode("UTF-8") for x in get_genes(workdir)]
library, rev_library, unique_genes = read_gmt(gmt_file, genes, verbose=verbose)
if intersect:
ugenes = list(set(sum(library.values(), [])))
genes = list(set(ugenes) & set(genes))
prediction_files = os.listdir(workdir+"/features")
lk = list(range(0, len(prediction_files)-1))
lk.append("global")
geneAUC = pd.DataFrame()
setAUC = pd.DataFrame()
if verbose: bar = Bar('AUC calculation', max=len(lk))
for i in lk:
prediction = load_feature(workdir, i).loc[genes,:]
prediction.index = [x.decode("UTF-8") for x in prediction.index]
prediction = prediction.loc[genes,:]
geneAUC[i] = calculate_gene_auc(prediction, rev_library)
setAUC[i] = calculate_set_auc(prediction, library)[0]
if verbose: bar.next()
if verbose: bar.finish()
prediction = pd.read_feather(prediction_file).set_index("index").loc[genes,:]
geneAUC["prismx"] = calculate_gene_auc(prediction, rev_library)
geneAUC.index = unique_genes
setAUC["prismx"] = calculate_set_auc(prediction, library)[0]
return([geneAUC, setAUC])
def benchmarkGMTfast(gmt_file: str, correlationFolder: str, predictionFolder: str, prismxPrediction: str, minLibSize: int=1, intersect: bool=False, verbose=False):
genes = get_genes(correlationFolder)
library, rev_library, unique_genes = read_gmt(gmt_file, genes, verbose=verbose)
if intersect:
ugenes = list(set(sum(library.values(), [])))
genes = list(set(ugenes) & set(genes))
unique_genes = [x.encode('utf-8') for x in unique_genes]
prediction_files = os.listdir(predictionFolder)
geneAUC = pd.DataFrame()
setAUC = pd.DataFrame()
prediction = loadPrediction(predictionFolder, "global").loc[unique_genes,:]
geneAUC["global"] = calculateGeneAUC(prediction, rev_library)
setAUC["global"] = calculateSetAUC(prediction, library)[0]
prediction = pd.read_feather(prismxPrediction).set_index("index").loc[unique_genes,:]
geneAUC["prismx"] = calculateGeneAUC(prediction, rev_library)
geneAUC.index = unique_genes
setAUC["prismx"] = calculateSetAUC(prediction, library)[0]
return([geneAUC, setAUC])
def benchmark_gmt_fast(gmt_file: str, workdir: str, prediction_file: str, intersect: bool=False, verbose=False):
genes = [x.decode("UTF-8") for x in get_genes(workdir)]
library, rev_library, unique_genes = read_gmt(gmt_file, genes, verbose=verbose)
if intersect:
ugenes = list(set(sum(library.values(), [])))
genes = list(set(ugenes) & set(genes))
prediction_files = os.listdir(workdir+"/features")
lk = list(range(0, len(prediction_files)-1))
lk.append("global")
geneAUC = pd.DataFrame()
setAUC = | pd.DataFrame() | pandas.DataFrame |
"""
Author: <NAME>, <NAME>
"""
import math
import pandas as pd
from bloomberg import BBG
from pandas.tseries.offsets import BDay
class BondFutureTracker(object):
futures_ticker_dict = {'US': 'TY',
'DE': 'RX',
'FR': 'OAT',
'IT': 'IK',
'JP': 'JB',
'AU': 'XM',
'GB': 'G ',
'CA': 'CN'}
fx_dict = {'DE': 'EURUSD Curncy',
'GB': 'GBPUSD Curncy',
'CA': 'CADUSD Curncy',
'JP': 'JPYUSD Curncy',
'AU': 'AUDUSD Curncy',
'FR': 'EURUSD Curncy',
'IT': 'EURUSD Curncy',
'US': 'USD Curncy'}
def __init__(self, country, start_date, end_date):
assert country in list(self.futures_ticker_dict.keys()), 'Country not yet supported'
self.bbg = BBG()
self.country = country
self.start_date = self._assert_date_type(start_date)
self.end_date = self._assert_date_type(end_date)
self.generic_tickers = [self.futures_ticker_dict[country] + str(x) + ' Comdty' for x in range(1, 4)]
self.df_generics = self._get_generic_future_series()
self.df_uc = self._get_underlying_contracts()
self.contract_list = self._get_contracts_list()
self.df_fn = self._get_first_notice_dates()
self.df_prices = self._get_all_prices()
self.df_tracker = self._build_tracker()
self.tr_index = self.df_tracker[['er_index']]
self.fh_ticker = 'fibf ' + self.country.lower() + ' 10y'
self.df_roll_info = self.df_tracker[['contract_rolling_out', 'roll_out_date', 'holdings']].dropna(how='any')
self.df_metadata = self._get_metadata()
self.df_tracker = self._get_tracker_melted()
def _get_tracker_melted(self):
df = self.df_tracker[['er_index']].rename({'er_index': self.fh_ticker}, axis=1)
df['time_stamp'] = df.index.to_series()
df = df.melt(id_vars='time_stamp', var_name='fh_ticker', value_name='value')
df = df.dropna()
return df
def _get_generic_future_series(self):
df = self.bbg.fetch_series(securities=self.generic_tickers,
fields='PX_LAST',
startdate=self.start_date,
enddate=self.end_date)
return df
def _get_underlying_contracts(self):
df = self.bbg.fetch_series(securities=self.generic_tickers,
fields='FUT_CUR_GEN_TICKER',
startdate=self.start_date,
enddate=self.end_date)
df = df.reindex(self.df_generics.index).fillna(method='ffill')
return df
def _get_contracts_list(self):
contract_list = self.bbg.fetch_futures_list(generic_ticker=self.futures_ticker_dict[self.country] + '1 Comdty')
return contract_list
def _get_first_notice_dates(self):
df = self.bbg.fetch_contract_parameter(securities=self.contract_list,
field='FUT_NOTICE_FIRST').sort_values('FUT_NOTICE_FIRST')
return df
def _get_all_prices(self):
tickers = self.contract_list + [self.fx_dict[self.country]]
df = self.bbg.fetch_series(securities=tickers,
fields='PX_LAST',
startdate=self.start_date,
enddate=self.end_date)
df = df.reindex(self.df_generics.index).fillna(method='ffill')
return df
def _build_tracker(self):
df_tracker = pd.DataFrame(index=self.df_generics.index,
columns=['contract_rolling_out', 'er_index', 'roll_out_date', 'holdings'])
# set the values for the initial date
dt_ini = self.df_uc.dropna(how='all').index[0]
df_tracker.loc[dt_ini, 'er_index'] = 100
contract_rolling_out = self.df_uc.loc[dt_ini, self.futures_ticker_dict[self.country] + '2 Comdty'] + ' Comdty'
df_tracker.loc[dt_ini, 'contract_rolling_out'] = contract_rolling_out
holdings = df_tracker.loc[dt_ini, 'er_index'] / (self.df_generics.loc[dt_ini, self.futures_ticker_dict[self.country] + '2 Comdty'] * self.df_prices[self.fx_dict[self.country]].loc[dt_ini])
df_tracker.loc[dt_ini, 'holdings'] = holdings
roll_out_date = self.df_fn.loc[df_tracker.loc[dt_ini, 'contract_rolling_out'], 'FUT_NOTICE_FIRST'] - | BDay(1) | pandas.tseries.offsets.BDay |
import time
import numpy as np
import pandas as pd
from sklearn import pipeline
from sklearn.calibration import CalibratedClassifierCV
from sklearn.kernel_approximation import (RBFSampler)
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
seed = 7
np.random.seed(seed)
start_time = time.time()
print('Start running..................')
datadir= './data/'
predictions = "./predictions/"
logs = './logs/'
df_train = | pd.read_csv(datadir + "numerai_training_data.csv") | pandas.read_csv |
from infomemes.utils import media_color_schema
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import json
def read_sim_results(sim, step_filtered=0):
"""
Basic analysis of a simulation.
sim: simulation object or string with path to json file.
step_filtered: int
Produces a boolan array 'filtered' with True for all media that were
active at any step > step_filtered.
"""
if isinstance(sim, str):
with open(sim, 'r') as f:
sim = json.loads(f.read())
# metadata
duration = sim['metadata']['duration']
media_reproduction_rate = sim['metadata']['media_reproduction_rate']
media_deactivation_rate = sim['metadata']['media_deactivation_rate']
covariance_punishment = sim['metadata']['covariance_punishment']
individuals_xy = np.array(sim['metadata']['individuals_xy'])
individual_renewal_rate = sim['metadata']['individual_renewal_rate']
individual_mui = sim['metadata']['individual_mui']
individual_mcr = sim['metadata']['individual_mcr']
max_reward = sim['metadata']['max_reward']
# data
activated = np.array(list(sim['data']['activated'].values()))
deactivated = np.array(list(sim['data']['deactivated'].values()))
active = np.array([t == duration for t in deactivated])
survival_times = np.array(deactivated) - np.array(activated)
position_x = np.array(list(sim['data']['position_x'].values()))
position_y = np.array(list(sim['data']['position_y'].values()))
cov_x = np.array(list(sim['data']['cov_x'].values()))
cov_y = np.array(list(sim['data']['cov_y'].values()))
cov_diagonal = cov_x + cov_y
cov_xy = np.array(list(sim['data']['cov_xy'].values()))
mpr = np.array(list(sim['data']['meme_production_rate'].values()))
filtered = np.array([deact > step_filtered for deact in deactivated])
else:
survival_times = []
position_x = []
position_y = []
cov_diagonal = []
cov_xy = []
mpr = []
for m in sim.all_media:
if m.active:
survival_times.append(sim.current_step - m.activated)
else:
survival_times.append(m.deactivated - m.activated)
position_x.append(m.x)
position_y.append(m.y)
cov_diagonal.append(m.cov[0, 0] + m.cov[1, 1])
cov_xy.append(m.cov[0, 1])
mpr.append(m.meme_production_rate)
results = {
'duration': duration,
'media_reproduction_rate': media_reproduction_rate,
'media_deactivation_rate': media_deactivation_rate,
'max_reward': max_reward,
'covariance_punishment': covariance_punishment,
'individuals_xy': individuals_xy,
'individual_renewal_rate': individual_renewal_rate,
'individual_mui': individual_mui,
'individual_mcr': individual_mcr,
'activated': activated[filtered],
'deactivated': deactivated[filtered],
'active': active[filtered],
'survival_times': survival_times[filtered].astype('int'),
'position_x': position_x[filtered],
'position_y': position_y[filtered],
'cov_x': cov_x[filtered],
'cov_y': cov_y[filtered],
'cov_diagonal': cov_diagonal[filtered],
'cov_xy': cov_xy[filtered],
'meme_production_rate': mpr[filtered],
'step_filtered': filtered
}
return results
def all_sims_summary(sims_list, step_filtered=0):
"""
Parameters
----------
sims_list: list
List of json files with simulation results
step_filtered: int
Produces a boolan array 'filtered' with True for all media that were
active at any step > step_filtered
Returns
-------
df_sims: Pandas DataFrame with results by simulation
df_media: Pandas DataFrame with results by media
df_clusters: Pandas DataFrame with results by cluster
"""
# Organize Simulations DataFrame
df_sims = pd.DataFrame({
'covariance_punishment': pd.Series([], dtype='float'),
'media_reproduction_rate': pd.Series([], dtype='float'),
'media_deactivation_rate': pd.Series([], dtype='float'),
'individual_mui': pd.Series([], dtype='float'),
'individual_renewal_rate': | pd.Series([], dtype='float') | pandas.Series |
"""Step 1: Solving the problem in a deterministic manner."""
import cvxpy as cp
import fledge
import numpy as np
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import shutil
def main():
# Settings.
scenario_name = 'course_project_step_1'
results_path = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'results', 'step_1')
run_primal = True
run_dual = True
run_kkt = True
# Clear / instantiate results directory.
try:
if os.path.isdir(results_path):
shutil.rmtree(results_path)
os.mkdir(results_path)
except PermissionError:
pass
# STEP 1.0: SETUP MODELS.
# Read scenario definition into FLEDGE.
# - Data directory from this repository is first added as additional data path.
fledge.config.config['paths']['additional_data'].append(
os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'data')
)
fledge.data_interface.recreate_database()
# Obtain data & models.
# Flexible loads.
der_model_set = fledge.der_models.DERModelSet(scenario_name)
# Thermal grid.
thermal_grid_model = fledge.thermal_grid_models.ThermalGridModel(scenario_name)
thermal_grid_model.cooling_plant_efficiency = 10.0 # Change model parameter to incentivize use of thermal grid.
thermal_power_flow_solution_reference = fledge.thermal_grid_models.ThermalPowerFlowSolution(thermal_grid_model)
linear_thermal_grid_model = (
fledge.thermal_grid_models.LinearThermalGridModel(thermal_grid_model, thermal_power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_head_vector_minimum = 1.5 * thermal_power_flow_solution_reference.node_head_vector
branch_flow_vector_maximum = 10.0 * thermal_power_flow_solution_reference.branch_flow_vector
# Electric grid.
electric_grid_model = fledge.electric_grid_models.ElectricGridModelDefault(scenario_name)
power_flow_solution_reference = fledge.electric_grid_models.PowerFlowSolutionFixedPoint(electric_grid_model)
linear_electric_grid_model = (
fledge.electric_grid_models.LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_voltage_magnitude_vector_minimum = 0.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
node_voltage_magnitude_vector_maximum = 1.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
branch_power_magnitude_vector_maximum = 10.0 * electric_grid_model.branch_power_vector_magnitude_reference
# Energy price.
price_data = fledge.data_interface.PriceData(scenario_name)
# Obtain time step index shorthands.
scenario_data = fledge.data_interface.ScenarioData(scenario_name)
timesteps = scenario_data.timesteps
timestep_interval_hours = (timesteps[1] - timesteps[0]) / pd.Timedelta('1h')
# Invert sign of losses.
# - Power values of loads are negative by convention. Hence, sign of losses should be negative for power balance.
# Thermal grid.
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= -1.0
linear_thermal_grid_model.thermal_power_flow_solution.pump_power *= -1.0
# Electric grid.
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= -1.0
linear_electric_grid_model.power_flow_solution.loss *= -1.0
# Apply base power / voltage scaling.
# - Scale values to avoid numerical issues.
base_power = 1e6 # in MW.
base_voltage = 1e3 # in kV.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
der_model.mapping_active_power_by_output *= 1 / base_power
der_model.mapping_reactive_power_by_output *= 1 / base_power
der_model.mapping_thermal_power_by_output *= 1 / base_power
# Thermal grid.
linear_thermal_grid_model.sensitivity_node_head_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= 1
# Electric grid.
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active *= base_power / base_voltage
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive *= base_power / base_voltage
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= 1
linear_electric_grid_model.power_flow_solution.der_power_vector *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_1 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_2 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.loss *= 1 / base_power
linear_electric_grid_model.power_flow_solution.node_voltage_vector *= 1 / base_voltage
# Limits.
node_voltage_magnitude_vector_minimum /= base_voltage
node_voltage_magnitude_vector_maximum /= base_voltage
branch_power_magnitude_vector_maximum /= base_power
# Energy price.
# - Conversion of price values from S$/kWh to S$/p.u. for convenience. Currency S$ is SGD.
# - Power values of loads are negative by convention. Hence, sign of price values is inverted here.
price_data.price_timeseries *= -1.0 * base_power / 1e3 * timestep_interval_hours
# STEP 1.1: SOLVE PRIMAL PROBLEM.
if run_primal or run_kkt: # Primal constraints are also needed for KKT problem.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
primal_problem = fledge.utils.OptimizationProblem()
# Define variables.
# Flexible loads: State space vectors.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
primal_problem.state_vector = dict.fromkeys(der_model_set.flexible_der_names)
primal_problem.control_vector = dict.fromkeys(der_model_set.flexible_der_names)
primal_problem.output_vector = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
primal_problem.state_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].states)
))
)
primal_problem.control_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].controls)
))
)
primal_problem.output_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
# Flexible loads: Power vectors.
primal_problem.der_thermal_power_vector = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
primal_problem.der_active_power_vector = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
primal_problem.der_reactive_power_vector = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Source variables.
primal_problem.source_thermal_power = cp.Variable((len(timesteps), 1))
primal_problem.source_active_power = cp.Variable((len(timesteps), 1))
primal_problem.source_reactive_power = cp.Variable((len(timesteps), 1))
# Define constraints.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
# Initial state.
primal_problem.constraints.append(
primal_problem.state_vector[der_model.der_name][0, :]
==
der_model.state_vector_initial.values
)
# State equation.
primal_problem.constraints.append(
primal_problem.state_vector[der_model.der_name][1:, :]
==
cp.transpose(
der_model.state_matrix.values
@ cp.transpose(primal_problem.state_vector[der_model.der_name][:-1, :])
+ der_model.control_matrix.values
@ cp.transpose(primal_problem.control_vector[der_model.der_name][:-1, :])
+ der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.iloc[:-1, :].values)
)
)
# Output equation.
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
==
cp.transpose(
der_model.state_output_matrix.values
@ cp.transpose(primal_problem.state_vector[der_model.der_name])
+ der_model.control_output_matrix.values
@ cp.transpose(primal_problem.control_vector[der_model.der_name])
+ der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
)
# Output limits.
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
>=
der_model.output_minimum_timeseries.values
)
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
<=
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
)
# Power mapping.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
primal_problem.constraints.append(
primal_problem.der_active_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_active_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
primal_problem.constraints.append(
primal_problem.der_reactive_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_reactive_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
primal_problem.constraints.append(
primal_problem.der_thermal_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_thermal_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
# Thermal grid.
# Node head limit.
primal_problem.constraints.append(
np.array([node_head_vector_minimum.ravel()])
<=
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
)
# Branch flow limit.
primal_problem.constraints.append(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
<=
np.array([branch_flow_vector_maximum.ravel()])
)
# Power balance.
primal_problem.constraints.append(
thermal_grid_model.cooling_plant_efficiency ** -1
* (
primal_problem.source_thermal_power
+ cp.sum(-1.0 * (
primal_problem.der_thermal_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
)
==
cp.transpose(
linear_thermal_grid_model.sensitivity_pump_power_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
)
# Electric grid.
# Voltage limits.
primal_problem.constraints.append(
np.array([node_voltage_magnitude_vector_minimum.ravel()])
<=
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([node_voltage_magnitude_vector_maximum.ravel()])
)
# Branch flow limits.
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
# Power balance.
primal_problem.constraints.append(
primal_problem.source_active_power
+ cp.sum(-1.0 * (
primal_problem.der_active_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
==
np.real(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
primal_problem.constraints.append(
primal_problem.source_reactive_power
+ cp.sum(-1.0 * (
primal_problem.der_reactive_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
==
np.imag(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
# Define objective.
primal_problem.objective += (
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ primal_problem.source_thermal_power
* thermal_grid_model.cooling_plant_efficiency ** -1
)
primal_problem.objective += (
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ primal_problem.source_active_power
)
if run_primal:
# Solve problem.
fledge.utils.log_time('primal solution')
primal_problem.solve()
fledge.utils.log_time('primal solution')
# Obtain results.
# Flexible loads.
primal_state_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.states)
primal_control_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.controls)
primal_output_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
for der_name in der_model_set.flexible_der_names:
primal_state_vector.loc[:, (der_name, slice(None))] = (
primal_problem.state_vector[der_name].value
)
primal_control_vector.loc[:, (der_name, slice(None))] = (
primal_problem.control_vector[der_name].value
)
primal_output_vector.loc[:, (der_name, slice(None))] = (
primal_problem.output_vector[der_name].value
)
# Thermal grid.
primal_der_thermal_power_vector = (
pd.DataFrame(
primal_problem.der_thermal_power_vector.value,
columns=linear_thermal_grid_model.thermal_grid_model.ders,
index=timesteps
)
)
primal_source_thermal_power = (
pd.DataFrame(
primal_problem.source_thermal_power.value,
columns=['total'],
index=timesteps
)
)
# Electric grid.
primal_der_active_power_vector = (
pd.DataFrame(
primal_problem.der_active_power_vector.value,
columns=linear_electric_grid_model.electric_grid_model.ders,
index=timesteps
)
)
primal_der_reactive_power_vector = (
pd.DataFrame(
primal_problem.der_reactive_power_vector.value,
columns=linear_electric_grid_model.electric_grid_model.ders,
index=timesteps
)
)
primal_source_active_power = (
pd.DataFrame(
primal_problem.source_active_power.value,
columns=['total'],
index=timesteps
)
)
primal_source_reactive_power = (
pd.DataFrame(
primal_problem.source_reactive_power.value,
columns=['total'],
index=timesteps
)
)
# Additional results.
primal_node_head_vector = (
pd.DataFrame(
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
).value,
index=timesteps,
columns=thermal_grid_model.nodes
)
)
primal_branch_flow_vector = (
pd.DataFrame(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
).value,
index=timesteps,
columns=thermal_grid_model.branches
)
)
primal_node_voltage_vector = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
primal_branch_power_vector_1 = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.branches
)
)
primal_branch_power_vector_2 = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.branches
)
)
primal_node_head_vector_per_unit = (
primal_node_head_vector
/ thermal_grid_model.node_head_vector_reference
)
primal_branch_flow_vector_per_unit = (
primal_branch_flow_vector
/ thermal_grid_model.branch_flow_vector_reference
)
primal_node_voltage_vector_per_unit = (
primal_node_voltage_vector * base_voltage
/ np.abs(electric_grid_model.node_voltage_vector_reference)
)
primal_branch_power_vector_1_per_unit = (
primal_branch_power_vector_1 * base_power
/ electric_grid_model.branch_power_vector_magnitude_reference
)
primal_branch_power_vector_2_per_unit = (
primal_branch_power_vector_2 * base_power
/ electric_grid_model.branch_power_vector_magnitude_reference
)
# Store results.
primal_state_vector.to_csv(os.path.join(results_path, 'primal_state_vector.csv'))
primal_control_vector.to_csv(os.path.join(results_path, 'primal_control_vector.csv'))
primal_output_vector.to_csv(os.path.join(results_path, 'primal_output_vector.csv'))
primal_der_thermal_power_vector.to_csv(os.path.join(results_path, 'primal_der_thermal_power_vector.csv'))
primal_source_thermal_power.to_csv(os.path.join(results_path, 'primal_source_thermal_power.csv'))
primal_der_active_power_vector.to_csv(os.path.join(results_path, 'primal_der_active_power_vector.csv'))
primal_der_reactive_power_vector.to_csv(os.path.join(results_path, 'primal_der_reactive_power_vector.csv'))
primal_source_active_power.to_csv(os.path.join(results_path, 'primal_source_active_power.csv'))
primal_source_reactive_power.to_csv(os.path.join(results_path, 'primal_source_reactive_power.csv'))
primal_node_head_vector.to_csv(os.path.join(results_path, 'primal_node_head_vector.csv'))
primal_branch_flow_vector.to_csv(os.path.join(results_path, 'primal_branch_flow_vector.csv'))
primal_node_voltage_vector.to_csv(os.path.join(results_path, 'primal_node_voltage_vector.csv'))
primal_branch_power_vector_1.to_csv(os.path.join(results_path, 'primal_branch_power_vector_1.csv'))
primal_branch_power_vector_2.to_csv(os.path.join(results_path, 'primal_branch_power_vector_2.csv'))
primal_node_head_vector_per_unit.to_csv(os.path.join(results_path, 'primal_node_head_vector_per_unit.csv'))
primal_branch_flow_vector_per_unit.to_csv(os.path.join(results_path, 'primal_branch_flow_vector_per_unit.csv'))
primal_node_voltage_vector_per_unit.to_csv(os.path.join(results_path, 'primal_node_voltage_vector_per_unit.csv'))
primal_branch_power_vector_1_per_unit.to_csv(os.path.join(results_path, 'primal_branch_power_vector_1_per_unit.csv'))
primal_branch_power_vector_2_per_unit.to_csv(os.path.join(results_path, 'primal_branch_power_vector_2_per_unit.csv'))
# Obtain variable count / dimensions.
primal_variable_count = (
sum(np.multiply(*primal_problem.state_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*primal_problem.control_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*primal_problem.output_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ np.multiply(*primal_problem.der_thermal_power_vector.shape)
+ np.multiply(*primal_problem.der_active_power_vector.shape)
+ np.multiply(*primal_problem.der_reactive_power_vector.shape)
+ np.multiply(*primal_problem.source_thermal_power.shape)
+ np.multiply(*primal_problem.source_active_power.shape)
+ np.multiply(*primal_problem.source_reactive_power.shape)
)
print(f"primal_variable_count = {primal_variable_count}")
# Print objective.
primal_objective = pd.Series(primal_problem.objective.value, index=['primal_objective'])
primal_objective.to_csv(os.path.join(results_path, 'primal_objective.csv'))
print(f"primal_objective = {primal_objective.values}")
# STEP 1.2: SOLVE DUAL PROBLEM.
if run_dual or run_kkt: # Primal constraints are also needed for KKT problem.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
dual_problem = fledge.utils.OptimizationProblem()
# Define variables.
# Flexible loads: State space equations.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
dual_problem.lambda_initial_state_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.lambda_state_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.lambda_output_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.mu_output_minimum = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.mu_output_maximum = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
dual_problem.lambda_initial_state_equation[der_name] = (
cp.Variable((
1,
len(der_model_set.flexible_der_models[der_name].states)
))
)
dual_problem.lambda_state_equation[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps[:-1]),
len(der_model_set.flexible_der_models[der_name].states)
))
)
dual_problem.lambda_output_equation[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
dual_problem.mu_output_minimum[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
), nonneg=True)
)
dual_problem.mu_output_maximum[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
), nonneg=True)
)
# Flexible loads: Power equations.
dual_problem.lambda_thermal_power_equation = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
dual_problem.lambda_active_power_equation = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
dual_problem.lambda_reactive_power_equation = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Thermal grid.
dual_problem.mu_node_head_minium = (
cp.Variable((len(timesteps), len(thermal_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_branch_flow_maximum = (
cp.Variable((len(timesteps), len(thermal_grid_model.branches)), nonneg=True)
)
dual_problem.lambda_pump_power_equation = (
cp.Variable((len(timesteps), 1))
)
# Electric grid.
dual_problem.mu_node_voltage_magnitude_minimum = (
cp.Variable((len(timesteps), len(electric_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_node_voltage_magnitude_maximum = (
cp.Variable((len(timesteps), len(electric_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_branch_power_magnitude_maximum_1 = (
cp.Variable((len(timesteps), len(electric_grid_model.branches)), nonneg=True)
)
dual_problem.mu_branch_power_magnitude_maximum_2 = (
cp.Variable((len(timesteps), len(electric_grid_model.branches)), nonneg=True)
)
dual_problem.lambda_loss_active_equation = cp.Variable((len(timesteps), 1))
dual_problem.lambda_loss_reactive_equation = cp.Variable((len(timesteps), 1))
# Define constraints.
for der_model in der_model_set.flexible_der_models.values():
# Differential with respect to state vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_initial_state_equation[der_model.der_name]
- (
dual_problem.lambda_state_equation[der_model.der_name][:1, :]
@ der_model.state_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][:1, :]
@ der_model.state_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_state_equation[der_model.der_name][0:-1, :]
- (
dual_problem.lambda_state_equation[der_model.der_name][1:, :]
@ der_model.state_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][1:-1, :]
@ der_model.state_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_state_equation[der_model.der_name][-1:, :]
- (
dual_problem.lambda_output_equation[der_model.der_name][-1:, :]
@ der_model.state_output_matrix.values
)
)
)
# Differential with respect to control vector.
dual_problem.constraints.append(
0.0
==
(
- (
dual_problem.lambda_state_equation[der_model.der_name]
@ der_model.control_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][:-1, :]
@ der_model.control_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
- (
dual_problem.lambda_output_equation[der_model.der_name][-1:, :]
@ der_model.control_output_matrix.values
)
)
)
# Differential with respect to output vector.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_output_equation[der_model.der_name]
- dual_problem.mu_output_minimum[der_model.der_name]
+ dual_problem.mu_output_maximum[der_model.der_name]
- (
dual_problem.lambda_thermal_power_equation[:, [der_index]]
@ der_model.mapping_thermal_power_by_output.values
)
- (
dual_problem.lambda_active_power_equation[:, [der_index]]
@ der_model.mapping_active_power_by_output.values
)
- (
dual_problem.lambda_reactive_power_equation[:, [der_index]]
@ der_model.mapping_reactive_power_by_output.values
)
)
)
# Differential with respect to thermal power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_thermal_power_equation
- (
dual_problem.mu_node_head_minium
@ linear_thermal_grid_model.sensitivity_node_head_by_der_power
)
+ (
dual_problem.mu_branch_flow_maximum
@ linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
)
- (
dual_problem.lambda_pump_power_equation
@ (
thermal_grid_model.cooling_plant_efficiency ** -1
* np.ones(linear_thermal_grid_model.sensitivity_pump_power_by_der_power.shape)
+ linear_thermal_grid_model.sensitivity_pump_power_by_der_power
)
)
)
)
# Differential with respect to active power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_active_power_equation
- (
dual_problem.mu_node_voltage_magnitude_minimum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
)
+ (
dual_problem.mu_node_voltage_magnitude_maximum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_1
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_2
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
)
- (
dual_problem.lambda_loss_active_equation
@ (
np.ones(linear_electric_grid_model.sensitivity_loss_active_by_der_power_active.shape)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
)
)
- (
dual_problem.lambda_loss_reactive_equation
@ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
)
)
)
# Differential with respect to reactive power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_reactive_power_equation
- (
dual_problem.mu_node_voltage_magnitude_minimum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_node_voltage_magnitude_maximum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_1
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_2
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
)
- (
dual_problem.lambda_loss_active_equation
@ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
)
- (
dual_problem.lambda_loss_reactive_equation
@ (
np.ones(linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive.shape)
+ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
)
)
)
)
# Differential with respect to thermal source power.
dual_problem.constraints.append(
0.0
==
(
np.transpose([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
+ dual_problem.lambda_pump_power_equation
)
)
# Differential with respect to active source power.
dual_problem.constraints.append(
0.0
==
(
np.transpose([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
+ dual_problem.lambda_loss_active_equation
)
)
# Differential with respect to active source power.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_loss_reactive_equation
)
)
if run_dual:
# Define objective.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_initial_state_equation[der_model.der_name],
np.array([der_model.state_vector_initial.values])
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_state_equation[der_model.der_name],
cp.transpose(
der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values[:-1, :])
)
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_output_equation[der_model.der_name],
cp.transpose(
der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_output_minimum[der_model.der_name],
der_model.output_minimum_timeseries.values
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.mu_output_maximum[der_model.der_name],
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
))
)
# Thermal grid.
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_head_minium,
(
np.array([node_head_vector_minimum])
# - node_head_vector_reference
# + (
# linear_thermal_grid_model.sensitivity_node_head_by_der_power
# @ der_thermal_power_vector_reference
# )
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_flow_maximum,
(
# - branch_flow_vector_reference
# + (
# linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
# @ der_thermal_power_vector_reference
# )
- 1.0
* np.array([branch_flow_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_pump_power_equation,
(
0.0
# - pump_power_reference
# + (
# linear_thermal_grid_model.sensitivity_pump_power_by_der_power
# @ der_thermal_power_vector_reference
# )
)
))
)
# Electric grid.
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_voltage_magnitude_minimum,
(
np.array([node_voltage_magnitude_vector_minimum])
- np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
+ np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_voltage_magnitude_maximum,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([node_voltage_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_power_magnitude_maximum_1,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([branch_power_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_power_magnitude_maximum_2,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([branch_power_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_loss_active_equation,
(
-1.0
* np.array([np.real(linear_electric_grid_model.power_flow_solution.loss)])
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_loss_reactive_equation,
(
-1.0
* np.array([np.imag(linear_electric_grid_model.power_flow_solution.loss)])
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
# Invert sign of objective for maximisation.
dual_problem.objective *= -1.0
# Solve problem.
fledge.utils.log_time('dual solution')
dual_problem.solve()
fledge.utils.log_time('dual solution')
# Obtain results.
# Flexible loads.
dual_lambda_initial_state_equation = | pd.DataFrame(0.0, index=der_model_set.timesteps[:1], columns=der_model_set.states) | pandas.DataFrame |
import torch
import pathlib
import pandas as pd
import pytorch_lightning as pl
from datetime import datetime
from collections import OrderedDict
class CSVLogger(pl.Callback):
"""Custom metric logger and model checkpoint."""
def __init__(self, output_path=None):
super(CSVLogger, self).__init__()
self._epoch = None
if output_path is None:
self.logger_path = None
else:
self.logger_path = pathlib.Path(output_path)
self.logger_path.mkdir(parents=True, exist_ok=True)
def metrics(self, interval):
if interval == 'epoch':
return self.epoch_metrics
elif interval in ['step', 'batch']:
return self.batch_metrics
@property
def batch_metrics(self):
metrics_path = self.logger_path / 'metrics_batch.csv'
return pd.read_csv(metrics_path)
@property
def epoch_metrics(self):
metrics_path = self.logger_path / 'metrics_epoch.csv'
return pd.read_csv(metrics_path)
def _extract_metrics(self, trainer, interval):
metrics = trainer.callback_metrics
metric_keys = list(metrics.keys())
data_dict = OrderedDict()
if interval == 'epoch':
metric_keys.remove('epoch')
data_dict['epoch'] = metrics['epoch']
data_dict['time'] = str(datetime.now())
elif interval in ['step', 'batch']:
remove_list = ['train', 'val', 'epoch']
for m in metrics.keys():
if any(sub in m for sub in remove_list):
metric_keys.remove(m)
data_dict[interval] = trainer.global_step
for k in metric_keys:
if isinstance(metrics[k], dict):
for j in metrics[k].keys():
data_dict[j] = metrics[k][j]
else:
data_dict[k] = metrics[k]
# cleanup
for k in data_dict.keys():
try:
data_dict[k] = float(data_dict[k].cpu())
except Exception:
pass
return data_dict
def _log_csv(self, trainer, metrics_path, interval):
data_dict = self._extract_metrics(trainer, interval)
new_metrics = pd.DataFrame.from_records([data_dict], index=interval)
if metrics_path.exists():
config = dict(header=False, mode='a')
old_metrics = self.metrics(interval).set_index(interval)
if not new_metrics.columns.equals(old_metrics.columns):
new_metrics = pd.concat([old_metrics, new_metrics])
config = dict(header=True, mode='w')
else:
config = dict(header=True, mode='w')
new_metrics.to_csv(metrics_path, **config)
def on_init_start(self, trainer):
"""Called when the trainer initialization begins, model has not yet been set."""
pass
def on_init_end(self, trainer):
"""Called when the trainer initialization ends, model has not yet been set."""
if self.logger_path is None:
checkpoint_path = trainer.checkpoint_callback.dirpath
# checkpoint_path = trainer.logger.log_dir
self.logger_path = checkpoint_path.parent / 'logging'
self.logger_path.mkdir(parents=True, exist_ok=True)
def on_batch_end(self, trainer, pl_module):
"""Called when the training batch ends."""
if trainer.global_step > 1:
metrics_path = self.logger_path / 'metrics_batch.csv'
self._log_csv(trainer, metrics_path, interval='batch')
def on_epoch_end(self, trainer, pl_module):
"""Called when the epoch ends."""
metrics_path = self.logger_path / 'metrics_epoch.csv'
self._log_csv(trainer, metrics_path, interval='epoch')
def on_sanity_check_start(self, trainer, pl_module):
"""Called when the validation sanity check starts."""
pass
def on_sanity_check_end(self, trainer, pl_module):
"""Called when the validation sanity check ends."""
pass
def on_epoch_start(self, trainer, pl_module):
"""Called when the epoch begins."""
pass
def on_batch_start(self, trainer, pl_module):
"""Called when the training batch begins."""
pass
def on_validation_batch_start(self, trainer, pl_module):
"""Called when the validation batch begins."""
pass
def on_validation_batch_end(self, trainer, pl_module):
"""Called when the validation batch ends."""
pass
def on_test_batch_start(self, trainer, pl_module):
"""Called when the test batch begins."""
pass
def on_test_batch_end(self, trainer, pl_module):
"""Called when the test batch ends."""
pass
def on_train_start(self, trainer, pl_module):
"""Called when the train begins."""
pass
def on_train_end(self, trainer, pl_module):
"""Called when the train ends."""
pass
def on_validation_start(self, trainer, pl_module):
"""Called when the validation loop begins."""
pass
def on_validation_end(self, trainer, pl_module):
"""Called when the validation loop ends."""
pass
def on_test_start(self, trainer, pl_module):
"""Called when the test begins."""
pass
def on_test_end(self, trainer, pl_module):
"""Called when the test ends."""
pass
class PandasLogger(pl.Callback):
"""PandasLogger metric logger and model checkpoint."""
def __init__(self, save_path=None):
super(PandasLogger, self).__init__()
self.batch_metrics = pd.DataFrame()
self.epoch_metrics = pd.DataFrame()
self._epoch = 0
def _extract_metrics(self, trainer, interval):
metrics = trainer.callback_metrics
metric_keys = list(metrics.keys())
data_dict = OrderedDict()
# setup required metrics depending on interval
if interval == 'epoch':
if interval in metric_keys:
metric_keys.remove('epoch')
data_dict['epoch'] = metrics['epoch']
else:
data_dict['epoch'] = self._epoch
data_dict['time'] = str(datetime.now())
self._epoch += 1
elif interval in ['step', 'batch']:
remove_list = ['train', 'val', 'epoch']
for m in metrics.keys():
if any(sub in m for sub in remove_list):
metric_keys.remove(m)
data_dict[interval] = trainer.global_step
# populate ordered dictionary
for k in metric_keys:
if isinstance(metrics[k], dict):
continue
else:
data_dict[k] = float(metrics[k])
# dataframe with a single row (one interval)
metrics = | pd.DataFrame.from_records([data_dict], index=interval) | pandas.DataFrame.from_records |
"""
Holt-Winters from statsmodels
"""
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from hyperopt import hp, fmin, tpe, Trials
# local module
from foresee.models import models_util
from foresee.models import param_optimizer
from foresee.scripts import fitter
def holt_winters_fit_forecast(ts, fcst_len, params=None, args=None):
"""[summary]
Parameters
----------
ts : [type]
[description]
fcst_len : [type]
[description]
freq : [type]
[description]
params : [type]
[description]
Returns
-------
[type]
[description]
"""
freq = args['FREQ']
try:
if params is None:
hw_model = ExponentialSmoothing(
endog = ts,
seasonal_periods = freq
).fit(optimized=True)
else:
hw_model = ExponentialSmoothing(
endog = ts,
trend = params['trend'],
seasonal = params['seasonal'],
seasonal_periods = freq
).fit(optimized=True)
hw_fittedvalues = hw_model.fittedvalues
hw_forecast = hw_model.predict(
start = len(ts),
end = len(ts) + fcst_len - 1
)
err = None
except Exception as e:
hw_fittedvalues = None
hw_forecast = None
err = str(e)
return hw_fittedvalues, hw_forecast, err
def holt_winters_tune(ts_train, ts_test, params=None, args=None):
model = 'holt_winters'
try:
options = ['add', 'mul']
space = {
'trend': hp.choice('trend', options),
'seasonal': hp.choice('seasonal', options)
}
f = fitter.model_loss(model)
f_obj = lambda params: f.fit_loss(ts_train, ts_test, params, args)
trials = Trials()
best_raw = fmin(f_obj, space, algo=tpe.suggest, trials=trials, max_evals=10, show_progressbar=False, verbose=False)
best = {'trend': options[best_raw['trend']], 'seasonal': options[best_raw['seasonal']]}
err = None
except Exception as e:
err = str(e)
best = None
return best, err
def holt_winters_main(data_dict, param_config, model_params):
"""[summary]
Parameters
----------
data_dict : [type]
[description]
freq : [type]
[description]
fcst_len : [type]
[description]
model_params : [type]
[description]
run_type : [type]
[description]
tune : [type]
[description]
epsilon : [type]
[description]
Returns
-------
[type]
[description]
"""
model = 'holt_winters'
fcst_len = param_config['FORECAST_LEN']
output_format = param_config['OUTPUT_FORMAT']
tune = param_config['TUNE']
epsilon = param_config['EPSILON']
freq = param_config['FREQ']
complete_fact = data_dict['complete_fact']
# dataframe to hold fitted values
fitted_fact = pd.DataFrame()
fitted_fact['y'] = complete_fact['y']
fitted_fact['data_split'] = complete_fact['data_split']
# dataframe to hold forecast values
forecast_fact = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import networkx as nx
import scipy.sparse as sparse
from base import BaseFeature
class PageRank(BaseFeature):
def import_columns(self):
return ["engaged_user_id", "engaging_user_id", "engagee_follows_engager"]
def make_features(self, df_train_input, df_test_input):
self._make_graph(df_train_input, df_test_input)
pagerank = nx.pagerank_scipy(self.G)
df_train_features = pd.DataFrame()
df_test_features = | pd.DataFrame() | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
import numpy as np
import pandas_validator as pv
from pandas_validator.core.exceptions import ValidationError
class BaseSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.BaseSeriesValidator(series_type=np.int64)
def test_is_valid_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_should_return_true_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertIsNone(self.validator.validate(series))
def test_should_return_false_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertRaises(ValidationError, self.validator.validate, series)
class IntegerSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.IntegerSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0, 1, 2])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-1, 0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = pd.Series([0, 1, 2, 3])
self.assertFalse(self.validator.is_valid(series))
class FloatSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.FloatSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0., 1., 2.])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_integer_series(self):
series = pd.Series([0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-0.1, 0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = pd.Series([0., 1., 2.1])
self.assertFalse(self.validator.is_valid(series))
class CharSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.CharSeriesValidator(min_length=0, max_length=4)
def test_is_valid(self):
series = pd.Series(['', 'ab', 'abcd'])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_integer_series(self):
series = | pd.Series([0, 1, 2]) | pandas.Series |
#<NAME> Data Mining Project B00721425
'''
from surprise import SVD
from surprise import Dataset
from surprise.model_selection import GridSearchCV
from surprise import Reader
import pandas as pd #https://pandas.pydata.org/docs/reference/index.html all the methods I used I searched from here
from surprise import NMF
from surprise import SVDpp
#pulling the data from text file and making it a dataframe
data = pd.read_csv ('train.txt', header = None, sep=' ')
data.columns = ['User_ID', 'Movie_ID', 'Ratings']
reader = Reader(rating_scale=(1,5))
dataset = Dataset.load_from_df(data[['User_ID', 'Movie_ID', 'Ratings']], reader)
#https://surprise.readthedocs.io/en/stable/matrix_factorization.html I found the documentation for parameters for above methods here.
#from https://surprise.readthedocs.io/en/stable/getting_started.html I found how to do GridSearchCV
#from the codes below I tested SVD,NMF and SVDpp functions to see which one is giving
#the best resut for rmse value you can command and un command parts to see
#I took this code to be able to find the best parameters to run by SVD
#To be able to get best rmse possible this will help us predict ratings more accurate
#I changed the parameters a lot this is not the only set up I tried.
#I run the test for atleast 5 times to be able to make sure I choose the right parameters
'''
'''
name = 'SVD: '
#n_epochs: The number of iteration of the SGD procedure.
#lr_all: The learning rate for all parameters.
#reg_all: The regularization term for all parameters.
param_grid_SVD = {'n_epochs': [20], 'lr_all': [0.005],
'reg_all': [0.02,0.1,0.05,0.2]}
#the code below splits the data to 5 randomly equal datasets and uses 1 of them as testset
#and others as training set to test and see which n_epochs,lr_all,reg_all values are the best
#choice for low rmse.
gs = GridSearchCV(SVD, param_grid_SVD, measures=['rmse'], cv=5)
'''
'''
name = 'NMF: '
param_grid_NMF = {'n_epochs': [20]}
gs = GridSearchCV(NMF, param_grid, measures=['rmse'], cv=5)
'''
'''
name = 'SVDpp: '
#n_epochs: The number of iteration of the SGD procedure.
#lr_all: The learning rate for all parameters.
#reg_all: The regularization term for all parameters.
param_grid_SVDpp = {'n_epochs': [20], 'lr_all': [0.005,0.007],
'reg_all': [0.1,0.05]}
gs = GridSearchCV(SVD, param_grid_SVDpp, measures=['rmse'], cv=5)
'''
'''
gs.fit(dataset)
# best RMSE score
print(name + str(gs.best_score['rmse']))
# combination of parameters that gave the best RMSE score
print(gs.best_params['rmse'])
'''
'''
#I wanted to play with the test and train size between svdpp and svd to find which one is givin better rmse
import pandas as pd
from surprise import SVD
from surprise import SVDpp
from surprise import accuracy
from surprise.model_selection import train_test_split
from surprise import Reader
from surprise import Dataset
data = pd.read_csv ('train.txt', header = None, sep=' ')
data.columns = ['User_ID', 'Movie_ID', 'Ratings']
reader = Reader(rating_scale=(1,5))
dataset = Dataset.load_from_df(data[['User_ID', 'Movie_ID', 'Ratings']], reader)
trainset, testset = train_test_split(dataset, test_size = .25) #https://surprise.readthedocs.io/en/stable/getting_started.html here is the doc I found for this part I figured out Train-test split method section
model = SVDpp(n_epochs= 20, lr_all= 0.007, reg_all= 0.1)
model.fit(trainset)
model_SVD = SVD(n_epochs = 20, lr_all = 0.005, reg_all = 0.1)
model_SVD.fit(trainset)
predictions = model.test(testset)
print('SVDpp')
accuracy.rmse(predictions)
predictions = model_SVD.test(testset)
print('SVD')
accuracy.rmse(predictions)
'''
#I found all the functions that I used from surprise library is here => https://surprise.readthedocs.io/en/v1.0.1/index.html
from surprise import SVDpp #After doing lots of testing on SVD, SVDpp and NMF
#I conclude that SVDpp is giving the best RMSE value with test and train splits
#Thats why I wanted to choose SVDpp for this project to predict values.
#you can check SVDpp algorithm from https://surprise.readthedocs.io/en/stable/matrix_factorization.html
from surprise import Dataset #to create dataset
import pandas as pd #pandas for dataframe #https://pandas.pydata.org/docs/reference/index.html all the methods I used I searched from here
from surprise import accuracy # to calculate rmse value
from surprise import Reader #Reader object for Surprise to be able to parse the file or the dataframe.
data = pd.read_csv ('train.txt', header = None, sep=' ')#reading train.txt file and converting it to dataframe
data.columns = ['User_ID', 'Movie_ID', 'Ratings'] #sicne the train.txt does not have column names we put the column names to dataframe
reader = Reader(rating_scale=(1,5))# A reader is still needed but only the rating_scale param is requiered.
dataset = Dataset.load_from_df(data[['User_ID', 'Movie_ID', 'Ratings']], reader)#Creating dataset https://surprise.readthedocs.io/en/stable/dataset.html
#I made this matrix to make a condition in output phase.If the user rated the movie we dont want to make prediction again we only need to pull the
#rating that user did for that spesific movie thats why this matrix helped me pull data I need from train.txt
user_item_matrix = data.pivot_table(index= 'User_ID', columns= 'Movie_ID', values = "Ratings")
print('Data Frame Created')
#finding the biggest user_ID to be able to reach the last index of user_id list
user_id_list = data.User_ID.unique()
user_id_list.sort()
#finding the bigget movie_id to be able to reach the last index of movie_id
movie_id_list = data.Movie_ID.unique()
movie_id_list.sort()
model = SVDpp(n_epochs = 20, lr_all = 0.007, reg_all = 0.1)#selecting model with spesificed parameters
print('Fitting the model to dataset')
model.fit(dataset.build_full_trainset())#fitting dataset to model
print('Creating the submit_sample.txt')
file = open("submit_sample.txt","w")#creating submit_sample.txt as output
for user_id in range(user_id_list[-1]):#user_id_list[-1] is equal to 943 and user id loops from 0 to 942 in this case
for movie_id in range(1,movie_id_list[-1]+1):#for loop goes from (1 to 1682) in this case
#I did try Except here cause in user_item_matrix there are some missing columns which will create error when we write
#(user_item_matrix.iloc[user_id][movie_id]) since there is no data about the missing columns
#Since my model can predict everycolumn eventhough it is missing I wrote except: part to just take out the predictions of
#those missing columns to output file. This way I did not need to create the actual columns for the missing data.
#Instead I output the prediction from model.predict to output file.
try:
#https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.iloc.html for iloc[][] usage.
if | pd.isnull(user_item_matrix.iloc[user_id][movie_id]) | pandas.isnull |
import time
import datetime as dt
import pandas as pd
import numpy as np
import logging
import coloredlogs
import pytz
from typing import List, Dict, Tuple, Any
from polygon import RESTClient
from trader.common.helpers import dateify
class PolygonFinancials():
def __init__(self, financials: pd.DataFrame, dividends: pd.DataFrame, splits: pd.DataFrame):
self.financials = financials
self.splits = splits
self.dividends = dividends
class PolygonListener():
def __init__(self,
key: str = '<KEY>',
limit: int = 50000,
request_sleep: int = 0):
self.client: RESTClient = RESTClient(key)
self.request_sleep = request_sleep
self.limit = limit
def date(self, date_time: dt.datetime) -> str:
return dt.datetime.strftime(date_time, '%Y-%m-%d')
def date_from_ts(self, unixtimestamp: int, zone: str = 'America/New_York') -> dt.datetime:
return dt.datetime.fromtimestamp(unixtimestamp / 1000.0, tz=pytz.timezone(zone))
def date_from_nanots(self, unixtimestamp: int, zone: str = 'America/New_York') -> dt.datetime:
return dt.datetime.fromtimestamp(unixtimestamp / 1000000000.0, tz=pytz.timezone(zone))
def round_to_second(self, unixtimestamp: int, zone: str = 'America/New_York') -> np.int64:
d = self.date_from_nanots(unixtimestamp, zone)
date_time = dt.datetime(d.year, d.month, d.day, d.hour, d.minute, d.second)
return np.int64(date_time.timestamp())
def get_all_equity_symbols(self):
yesterday = dt.datetime.now() - dt.timedelta(days=1)
result = self.client.stocks_equities_grouped_daily('US', 'STOCKS', self.date(yesterday))
symbols = [r['T'] for r in result.results] # type: ignore
return symbols
def get_grouped_daily(self, market: str, date_time: dt.datetime) -> pd.DataFrame:
resp = self.client.stocks_equities_grouped_daily('US', market, date=self.date(date_time))
columns = {
'T': 'symbol',
'v': 'volume',
'o': 'open',
'c': 'close',
'h': 'high',
'l': 'low',
't': 'date',
'vw': 'vwap',
'n': 'items'
}
df = pd.DataFrame(resp.results) # type: ignore
df = df.rename(columns=columns) # type: ignore
return df
def get_aggregates(self,
symbol: str,
multiplier: int,
timespan: str,
start_date: dt.datetime,
end_date: dt.datetime) -> pd.DataFrame:
timespans = ['day', 'minute', 'hour', 'week', 'month', 'quarter', 'year']
if timespan not in timespans:
raise ValueError('incorrect timespan, must be {}'.format(timespans))
start_date = dateify(start_date, timezone='America/New_York')
end_date = dateify(end_date + dt.timedelta(days=1), timezone='America/New_York')
logging.info('get_aggregates {} mul: {} timespan: {} {} {}'.format(symbol,
multiplier,
timespan,
start_date,
end_date))
result = self.client.stocks_equities_aggregates(symbol,
multiplier,
timespan,
self.date(start_date),
self.date(end_date),
**{'limit': self.limit})
time.sleep(self.request_sleep)
columns = {
'T': 'symbol',
'v': 'volume',
'o': 'open',
'c': 'close',
'h': 'high',
'l': 'low',
't': 'date',
'n': 'items',
'vw': 'vwap'
}
df = pd.DataFrame(result.results) # type: ignore
df = df.rename(columns=columns) # type: ignore
df['symbol'] = symbol
df['date'] = df['date'].apply(self.date_from_ts) # convert to proper timezone
df.index = df['date']
df.drop(columns=['date'], inplace=True)
df['volume'] = df['volume'].astype(int)
df = df.reindex(['symbol', 'open', 'close', 'high', 'low', 'volume', 'vwap', 'items'], axis=1)
# we have more work to do
if len(df) == 50000:
last_date = df.index[-1].to_pydatetime()
combined = df.append(self.get_aggregates(symbol, multiplier, timespan, last_date, end_date))
result = combined[~combined.index.duplicated()]
return result[(result.index >= start_date) & (result.index <= end_date)]
return df[(df.index >= start_date) & (df.index <= end_date)]
def get_aggregates_as_ib(self,
symbol: str,
multiplier: int,
timespan: str,
start_date: dt.datetime,
end_date: dt.datetime) -> pd.DataFrame:
result = self.get_aggregates(symbol, multiplier, timespan, start_date, end_date)
# interactive brokers history mapping
mapping = {
'open_trades': 'open',
'high_trades': 'high',
'low_trades': 'low',
'close_trades': 'close',
'volume_trades': 'volume',
'average_trades': 'vwap',
'open_bid': 'open',
'high_bid': 'high',
'low_bid': 'low',
'close_bid': 'close',
'open_ask': 'open',
'high_ask': 'high',
'low_ask': 'low',
'close_ask': 'close',
'barCount_trades': 'items',
}
for key, value in mapping.items():
result[key] = result[value]
result.rename_axis(None, inplace=True)
result.drop(columns=['symbol', 'open', 'close', 'high', 'low', 'volume', 'vwap', 'items'], inplace=True)
result = result.reindex(
['high_ask', 'high_trades', 'close_trades', 'low_bid', 'average_trades',
'open_trades', 'low_trades', 'barCount_trades', 'open_bid', 'volume_trades',
'low_ask', 'high_bid', 'close_ask', 'close_bid', 'open_ask'], axis=1) # type: ignore
return result
def get_financials(self, symbol: str) -> PolygonFinancials:
financials = self.client.reference_stock_financials(symbol, **{'limit': self.limit}).results
dividends = self.client.reference_stock_dividends(symbol, **{'limit': self.limit}).results
splits = self.client.reference_stock_splits(symbol, **{'limit': self.limit}).results
result = PolygonFinancials(pd.DataFrame(financials), pd.DataFrame(dividends), | pd.DataFrame(splits) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
# Libraries
import pandas as pd
import numpy as np
from numpy import std, mean, sqrt
from scipy.stats import linregress
import os
#plotting
import seaborn as sns
import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
from matplotlib.ticker import MaxNLocator
pd.options.mode.chained_assignment = None
#%% ==========================================================
# scarhrd vs probability plot
# ============================================================
def make_graph(df, goi, other_val, fs):
scatsize = 10
prob_column = f"{goi}_prob_of_true"
#color schemes
#define all the colors
color_list = list(sns.color_palette().as_hex())
blue = color_list[0] #DRwt
orange = color_list[1] #atm
green = color_list[2] #cdk12
red = color_list[3] #brca2
purple = color_list[4] #mmr
brc = df.query('(label == "BRCA2d")')
cdk = df.query('(label == "CDK12d")')
atm = df.query('(label == "ATMd")')
mmr = df.query('(label == "MMRd")')
DRwt = df.query('(label == "DRwt")')
all_brcap = df.query('(label != "BRCA2d")')
def cohen_d(x,y):
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (mean(x) - mean(y)) / sqrt(((nx-1)*std(x, ddof=1) ** 2 + (ny-1)*std(y, ddof=1) ** 2) / dof)
fig, ax = plt.subplots(figsize=(3.25,2.5), ncols=2, nrows=2, gridspec_kw={'height_ratios':[1,5],'width_ratios':[5,1]})
#first do scatter plot
ax[1,0].scatter(x=DRwt[prob_column], y=DRwt[other_val], color=blue, s=scatsize, alpha=0.6, zorder=10, linewidth=0, label="DRwt")
ax[1,0].scatter(x=atm[prob_column], y=atm[other_val], color=orange, s=scatsize, alpha=0.7, zorder=20, linewidth=0, label="ATMd")
ax[1,0].scatter(x=cdk[prob_column], y=cdk[other_val], color=green, s=scatsize, alpha=0.7, zorder=20, linewidth=0, label="CDK12d")
ax[1,0].scatter(mmr[prob_column], mmr[other_val], color=purple, s=scatsize, alpha=0.8, zorder=30, linewidth=0, label="MMRd")
ax[1,0].scatter(x=brc[prob_column], y=brc[other_val], color=red, s=scatsize, alpha=0.9, zorder=40, linewidth=0, label="BRCA2d")
ax[1,0].tick_params(axis='x', which='both', length=3, pad=2, labelsize=fs)
ax[1,0].tick_params(axis='y', which='both', length=3, pad=2, labelsize=fs)
ax[1,0].grid(which='both', axis='both', color='0.6', linewidth=0.7, linestyle='dotted', zorder=-100)
ax[1,0].set_ylabel(other_val, fontsize=fs, labelpad=4, verticalalignment='center')
# ax[1,0].yaxis.set_label_coords(-0.12, 0.5)
ax[1,0].set_xlabel(f"Probability of {goi}", fontsize=fs, labelpad=5, verticalalignment='center')
slope_, intercept_, r_value, p_value, std_err_ = linregress(df[prob_column].values, df[other_val].values)
ax[1,0].text(0.5, 0, f"R={r_value.round(2)}", fontsize=fs, color="k", va="bottom", ha="center", ma='center', alpha=0.9, transform=ax[1,0].transAxes)
# do ax[0,0] top plot
ax[0,0].get_shared_x_axes().join(ax[0,0], ax[1,0])
ax[0,0].xaxis.set_ticklabels([])
ax[0,0].get_xaxis().set_visible(False)
ax[0,0].yaxis.set_ticklabels([])
ax[0,0].get_yaxis().set_visible(False)
sns.kdeplot(all_brcap[prob_column], color="grey", shade=True, ax=ax[0,0])
sns.kdeplot(brc[prob_column], color=red, shade=True, ax=ax[0,0])
# test conditions
c0 = all_brcap[prob_column]
c1 = brc[prob_column]
cd = cohen_d(c1,c0)
ax[0,0].text(0.5, 0.15, f"Cohen's D: {round(cd, 2)}", fontsize=fs, color="k", va="bottom", ha="center", ma='center', alpha=0.9, transform=ax[0,0].transAxes)
ax[1,1].get_shared_y_axes().join(ax[1,1], ax[1,0])
ax[1,1].xaxis.set_ticklabels([])
ax[1,1].get_xaxis().set_visible(False)
ax[1,1].yaxis.set_ticklabels([])
ax[1,1].get_yaxis().set_visible(False)
sns.kdeplot(y=all_brcap[otherval], color="grey", shade=True, ax=ax[1,1])
sns.kdeplot(y=brc[otherval], color=red, shade=True, ax=ax[1,1])
c0 = all_brcap[otherval]
c1 = brc[otherval]
cd = cohen_d(c1,c0)
ax[1,1].text(0.01, 0.01, f"Cohen's D:\n{round(cd, 2)}", fontsize=fs, color="k", va="bottom", ha="left", ma='center', alpha=0.9, transform=ax[1,1].transAxes)
ax[1,0].yaxis.set_label_coords(-0.12, 0.5)
ax[1,0].set_xlim(-0.001,1.01)
ax[1,0].set_ylim(-0.001,df[other_val].max()+1)
ax[1,0].yaxis.set_major_locator(MaxNLocator(integer=True))
ax[0,1].xaxis.set_ticklabels([])
ax[0,1].get_xaxis().set_visible(False)
ax[0,1].yaxis.set_ticklabels([])
ax[0,1].get_yaxis().set_visible(False)
sns.despine(ax=ax[0,0], top=True, right=True, left=True, bottom=True)
sns.despine(ax=ax[1,1], top=True, right=True, left=True, bottom=True)
sns.despine(ax=ax[0,1], top=True, right=True, left=True, bottom=True)
sns.despine(ax=ax[1,0], top=True, right=True, left=False, bottom=False)
fig.subplots_adjust(hspace=0.01, wspace=0.01, left=0.11, right=0.98, top=0.99, bottom=0.11)
return fig,ax
def plot_legend_scatter(figdir, fs=6, ss=6):
color_list = list(sns.color_palette().as_hex())
blue = color_list[0] #DRwt
orange = color_list[1] #atm
green = color_list[2] #cdk12
red = color_list[3] #brca2
purple = color_list[4] #mmr
fig, ax = plt.subplots(figsize=(3.1,0.15))
handles = []
handles.append(mlines.Line2D([], [], color=blue, markeredgecolor=blue, marker='o', lw=0, markersize=ss, label='DRwt'))
handles.append(mlines.Line2D([], [], color=orange, markeredgecolor=orange, marker='o', lw=0, markersize=ss, label='ATMd'))
handles.append(mlines.Line2D([], [], color=green, markeredgecolor=green, marker='o', lw=0, markersize=ss, label='CDK12d'))
handles.append(mlines.Line2D([], [], color=red, markeredgecolor=red, marker='o', lw=0, markersize=ss, label='BRCA2d'))
handles.append(mlines.Line2D([], [], color=purple, markeredgecolor=purple, marker='o', lw=0, markersize=ss, label='MMRd'))
ax.axis('off')
plt.grid(b=False, which='both')
plt.legend(handles=handles,loc='center', edgecolor='0.5', fancybox=True, frameon=False, facecolor='white', ncol=5, fontsize=fs, labelspacing=0.1, handletextpad=-0.2, columnspacing=0.5)
fig.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01, hspace=0, wspace=0)
plt.savefig(os.path.join(figdir, "scarhrd_vs_darcsign_scatter_legend.pdf"))
plt.savefig(os.path.join(figdir, "scarhrd_vs_darcsign_scatter_legend.png"), dpi=500, transparent=False, facecolor="w")
def plot_legend_kde(figdir, fs=6, ss=7):
color_list = list(sns.color_palette().as_hex())
red = color_list[3] #brca2
# purple = color_list[4] #mmr
fig, ax = plt.subplots(figsize=(1.5,0.15))
handles = []
handles.append(mlines.Line2D([], [], color=red, markeredgecolor=red, marker='s', lw=0, markersize=ss, label='BRCA2d'))
handles.append(mlines.Line2D([], [], color="grey", markeredgecolor="grey", marker='s', lw=0, markersize=ss, label='BRCA2p'))
ax.axis('off')
plt.grid(b=False, which='both')
plt.legend(handles=handles,loc='center', edgecolor='0.5', fancybox=True, frameon=False, facecolor='white', ncol=2, fontsize=fs, labelspacing=0.1, handletextpad=-0.2, columnspacing=0.5)
fig.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01, hspace=0, wspace=0)
plt.savefig(os.path.join(figdir, "scarhrd_vs_darcsign_kde_legend.pdf"))
plt.savefig(os.path.join(figdir, "scarhrd_vs_darcsign_kde_legend.png"), dpi=500, transparent=False, facecolor="w")
#%% ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#files from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
figdir = os.path.join(rootdir, "figures", "sup_fig10")
datadir = os.path.join(rootdir, "data")
# cohort_data = os.path.join(datadir, "cohort.tsv")
shrd_path = os.path.join(datadir, "scarhrd.tsv")
prob_path = os.path.join(datadir, "darcsign_probability.tsv")
shrd = pd.read_csv(shrd_path, sep='\t', low_memory=False)
prob = | pd.read_csv(prob_path, sep='\t', low_memory=False) | pandas.read_csv |
import train
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
def main():
# test = os.environ.get("TEST_DATA")
# train_data = os.environ.get("TRAINING_DATA")
TRAINING_DATA_DIR = os.environ.get("TRAINING_DATA")
TEST_DATA = os.environ.get("TEST_DATA")
train_data = pd.read_csv(TRAINING_DATA_DIR)
test = pd.read_csv(TEST_DATA)
add_columns = train.addingColumns(train_data,test)
data,country_dict,all_data = train.addingWolrd(add_columns)
# le = preprocessing.LabelEncoder()
# Select train (real) data from March 1 to March 22nd
dates_list = ['2020-03-01', '2020-03-02', '2020-03-03', '2020-03-04', '2020-03-05', '2020-03-06', '2020-03-07', '2020-03-08', '2020-03-09',
'2020-03-10', '2020-03-11','2020-03-12','2020-03-13','2020-03-14','2020-03-15','2020-03-16','2020-03-17','2020-03-18',
'2020-03-19','2020-03-20','2020-03-21','2020-03-22','2020-03-23', '2020-03-24']
# Filter Spain, run the Linear Regression workflow
# country_name = "Spain" country_name = "Spain"
#
country_name = os.environ.get("COUNTRY")
day_start = 39
data_country = data[data['Country/Region']==country_dict[country_name]]
data_country = data_country.loc[data_country['Day_num']>=day_start]
X_train, Y_train_1, Y_train_2, X_test = train.split_data(data_country)
model, pred = train.lin_reg(X_train, Y_train_1, X_test)
# Create a df with both real cases and predictions (predictions starting on March 12th)
X_train_check = X_train.copy()
X_train_check['Target'] = Y_train_1
X_test_check = X_test.copy()
X_test_check['Target'] = pred
X_final_check = | pd.concat([X_train_check, X_test_check]) | pandas.concat |
from matplotlib.pyplot import title
import streamlit as st
import pandas as pd
import altair as alt
import pydeck as pdk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_df = pd.DataFrame()
for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'):
gnd_top_df = gnd_top_df.append(pd.read_csv(file, index_col=None))
return gnd_top_df
def sachbegriff_cloud():
#wordcloud der top 100 sachbegriffe eines auszuwählenden tages der letzten 10 werktage
st.header('TOP 100 Sachbegriffe pro Tag')
st.write('Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.')
files = glob.glob(f'{path}/../stats/*Ts-count.csv')
daten = [x[-23:-13] for x in files]
daten.sort()
daten_filter = st.select_slider('Wählen Sie ein Datum', options=daten, value=daten[-1])
df = pd.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv')
dict = df.to_dict(orient='records')
worte = {}
for record in dict:
worte.update({record['sachbegriff']:record['count']})
wc = WordCloud(background_color="white", max_words=100, width=2000, height=800, colormap='tab20')
wc.generate_from_frequencies(worte)
return st.image(wc.to_array())
def wirkungsorte():
#ranking und karte der meistverwendeten wirkungsorte aller personen in der gnd
df = pd.read_csv(f'{path}/wirkungsorte-top50.csv')
df.drop(columns=['id'], inplace=True)
df.rename(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True)
st.header('TOP Wirkungsorte von GND-Personen')
st.markdown('Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.')
#Balkendiagramm
orte_filt = st.slider('Zeige Top …', min_value=3, max_value=len(df), value=10, step=1)
graph_count = alt.Chart(df.nlargest(orte_filt, 'Anzahl', keep='all')).mark_bar().encode(
alt.X('Name:N', sort='y'),
alt.Y('Anzahl'),
alt.Color('Name:N', legend=alt.Legend(columns=2)),
tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')]
)
st.altair_chart(graph_count, use_container_width=True)
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
scatterplotlayer = pdk.Layer(
"ScatterplotLayer",
df,
pickable=True,
opacity=0.5,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
line_width_min_pixels=1,
get_position='[lon, lat]',
get_radius="Anzahl",
get_fill_color=[255, 140, 0],
get_line_color=[0, 0, 0]
)
st.pydeck_chart(pdk.Deck(
scatterplotlayer,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"}))
def wirkungsorte_musik():
#nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte
musiker_orte = pd.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn')
st.header('Wirkungszentren der Musik 1400–2010')
st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.')
limiter = st.slider('Jahresfilter', min_value=1400, max_value=int(musiker_orte['jahrzehnt'].max()), value=(1900), step=10)
musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)]
musik_filt['norm']=(musik_filt['count']-musik_filt['count'].min())/(musik_filt['count'].max()-musik_filt['count'].min())
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
musiker_scatter = pdk.Layer(
"ScatterplotLayer",
musik_filt,
opacity=0.8,
get_position='[lon, lat]',
pickable=True,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
radiusscale=100,
line_width_min_pixels=1,
get_radius="norm*50000",
get_fill_color=[50, 168, 92],
get_line_color=[39, 71, 51]
)
st.pydeck_chart(pdk.Deck(
musiker_scatter,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{name}</b>"}))
st.subheader(f'TOP 10 Wirkungszentren der {limiter}er')
col1, col2 = st.beta_columns(2)
i = 1
for index, row in musik_filt.nlargest(10, 'norm').iterrows():
if i <= 5:
with col1:
st.write(f'{i}. {row["name"]}')
elif i > 5:
with col2:
st.write(f'{i}. {row["name"]}')
i += 1
def gesamt_entity_count():
#Gesamtzahl der GND-Entitäten
with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f:
entities = f'{int(f.read()):,}'
return st.write(f"GND-Entitäten gesamt: {entities.replace(',','.')}")
def relationen():
#Top 10 der GND-Relationierungscodes
rels = pd.read_csv(f'{path}/../stats/gnd_codes_all.csv', index_col=False)
st.subheader('Relationen')
st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf).')
rels_filt = st.slider('Zeige Top ...', 5, len(rels), 10, 1)
relation_count = alt.Chart(rels.nlargest(rels_filt, 'count', keep='all')).mark_bar().encode(
alt.X('code', title='Relationierungs-Code', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('code', sort='-y', title='Relationierungscode'),
tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')]
)
st.altair_chart(relation_count, use_container_width=True)
with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f:
relations = f'{int(f.read()):,}'
st.write(f"Relationen zwischen Entitäten gesamt: {relations.replace(',','.')}")
def systematik():
#Ranking der meistverwendeten GND-Systematik-Notationen
classification = | pd.read_csv(f'{path}/../stats/gnd_classification_all.csv', index_col=False) | pandas.read_csv |
import gc
import numpy as np
import pandas as pd
import xgboost as xgb
from pandas.core.categorical import Categorical
from scipy.sparse import csr_matrix, hstack
categorical_features = ['having_IP_Address','URL_Length','Shortining_Service','having_At_Symbol','double_slash_redirecting','Prefix_Suffix','having_Sub_Domain','SSLfinal_State','Domain_registeration_length','Favicon','port','HTTPS_token','Request_URL','URL_of_Anchor','Links_in_tags','SFH','Submitting_to_email','Abnormal_URL','Redirect','on_mouseover','RightClick','popUpWidnow','Iframe','age_of_domain','DNSRecord','web_traffic','Page_Rank','Google_Index','Links_pointing_to_page','Statistical_report']
numerical_features = []
column_names = ['having_IP_Address','URL_Length','Shortining_Service','having_At_Symbol','double_slash_redirecting','Prefix_Suffix','having_Sub_Domain','SSLfinal_State','Domain_registeration_length','Favicon','port','HTTPS_token','Request_URL','URL_of_Anchor','Links_in_tags','SFH','Submitting_to_email','Abnormal_URL','Redirect','on_mouseover','RightClick','popUpWidnow','Iframe','age_of_domain','DNSRecord','web_traffic','Page_Rank','Google_Index','Links_pointing_to_page','Statistical_report','Result']
def sparse_dummies(df, column):
'''Returns sparse OHE matrix for the column of the dataframe'''
categories = Categorical(df[column])
column_names = np.array(["{}_{}".format(column, str(i)) for i in range(len(categories.categories))])
N = len(categories)
row_numbers = np.arange(N, dtype=np.int)
ones = np.ones((N,))
return csr_matrix((ones, (row_numbers, categories.codes))), column_names
data = np.loadtxt('../../../data/phishing_website/train.txt', dtype=int, delimiter=',', converters={30: lambda x: int(int(x) == 1)})
df_train = | pd.DataFrame(data, columns=column_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Structures data in ML-friendly ways."""
import re
import copy
import datetime as dt
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV
from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS
from avaml.aggregatedata.time_parameters import to_time_parameters
from varsomdata import getforecastapi as gf
from varsomdata import getmisc as gm
__author__ = 'arwi'
LABEL_PROBLEM_PRIMARY = {
"ext_attr": [
"avalanche_problem_type_id",
"avalanche_problem_type_name",
"avalanche_type_id",
"avalanche_type_name",
"avalanche_ext_id",
"avalanche_ext_name"
],
"values": {
_NONE: [0, "", 0, "", 0, ""],
"new-loose": [3, "Nysnø (løssnøskred)", 20, "Løssnøskred", 10, "Tørre løssnøskred"],
"wet-loose": [5, "Våt snø (løssnøskred)", 20, "Løssnøskred", 15, "Våte løssnøskred"],
"new-slab": [7, "Nysnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"drift-slab": [10, "Fokksnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"pwl-slab": [30, "Vedvarende svakt lag (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"wet-slab": [45, "Våt snø (flakskred)", 10, "Flakskred", 25, "Våte flakskred"],
"glide": [50, "Glideskred", 10, "Flakskred", 25, "Våte flakskred"]
}
}
LABEL_PROBLEM = {
"cause": {
"ext_attr": ["aval_cause_id", "aval_cause_name"],
"values": {
"0": [0, ""],
"new-snow": [10, "Nedføyket svakt lag med nysnø"],
"hoar": [11, "Nedsnødd eller nedføyket overflaterim"],
"facet": [13, "Nedsnødd eller nedføyket kantkornet snø"],
"crust": [14, "Dårlig binding mellom glatt skare og overliggende snø"],
"snowdrift": [15, "Dårlig binding mellom lag i fokksnøen"],
"ground-facet": [16, "Kantkornet snø ved bakken"],
"crust-above-facet": [18, "Kantkornet snø over skarelag"],
"crust-below-facet": [19, "Kantkornet snø under skarelag"],
"ground-water": [20, "Vann ved bakken/smelting fra bakken"],
"water-layers": [22, "Opphopning av vann i/over lag i snødekket"],
"loose": [24, "Ubunden snø"]
}
},
"dsize": {
"ext_attr": ["destructive_size_ext_id", "destructive_size_ext_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "1 - Små"],
'2': [2, "2 - Middels"],
'3': [3, "3 - Store"],
'4': [4, "4 - Svært store"],
'5': [5, "5 - Ekstremt store"]
}
},
"prob": {
"ext_attr": ["aval_probability_id", "aval_probability_name"],
"values": {
'0': [0, "Ikke gitt"],
'2': [2, "Lite sannsynlig"],
'3': [3, "Mulig"],
'5': [5, "Sannsynlig"],
}
},
"trig": {
"ext_attr": ["aval_trigger_simple_id", "aval_trigger_simple_name"],
"values": {
'0': [0, "Ikke gitt"],
'10': [10, "Stor tilleggsbelastning"],
'21': [21, "Liten tilleggsbelastning"],
'22': [22, "Naturlig utløst"]
}
},
"dist": {
"ext_attr": ["aval_distribution_id", "aval_distribution_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "Få bratte heng"],
'2': [2, "Noen bratte heng"],
'3': [3, "Mange bratte heng"],
'4': [4, "De fleste bratte heng"]
}
},
"lev_fill": {
"ext_attr": ["exposed_height_fill"],
"values": {
'0': [0],
'1': [1],
'2': [2],
'3': [3],
'4': [4],
}
}
}
LABEL_PROBLEM_MULTI = {
"aspect": {
"ext_attr": "valid_expositions",
}
}
LABEL_PROBLEM_REAL = {
"lev_max": {
"ext_attr": "exposed_height_1",
},
"lev_min": {
"ext_attr": "exposed_height_2",
}
}
LABEL_GLOBAL = {
"danger_level": {
"ext_attr": ["danger_level", "danger_level_name"],
"values": {
'1': [1, "1 liten"],
'2': [2, "2 Moderat"],
'3': [3, "3 Betydelig"],
'4': [4, "4 Stor"],
'5': [5, "5 Meget stor"]
}
},
"emergency_warning": {
"ext_attr": ["emergency_warning"],
"values": {
"Ikke gitt": ["Ikke gitt"],
"Naturlig utløste skred": ["Naturlig utløste skred"],
}
}
}
COMPETENCE = [0, 110, 115, 120, 130, 150]
class ForecastDataset:
def __init__(self, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), max_file_age=23):
"""
Object contains aggregated data used to generate labeled datasets.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param seasons: Tuple/list of string representations of avalanche seasons to fetch.
"""
self.seasons = sorted(list(set(seasons)))
self.date = None
self.regobs_types = regobs_types
self.weather = {}
self.regobs = {}
self.varsom = {}
self.labels = {}
self.use_label = True
for season in seasons:
varsom, labels = _get_varsom_obs(year=season, max_file_age=max_file_age)
self.varsom = merge(self.varsom, varsom)
self.labels = merge(self.labels, labels)
regobs = _get_regobs_obs(season, regobs_types, max_file_age=max_file_age)
self.regobs = merge(self.regobs, regobs)
weather = _get_weather_obs(season, max_file_age=max_file_age)
self.weather = merge(self.weather, weather)
@staticmethod
def date(regobs_types, date: dt.date, days, use_label=True):
"""
Create a dataset containing just a given day's data.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param date: Date to fetch and create dataset for.
:param days: How many days to fetch before date. This will be max for .label()'s days parameter.
"""
self = ForecastDataset(regobs_types, [])
self.date = date
self.use_label = use_label
self.regobs = _get_regobs_obs(None, regobs_types, date=date, days=days)
self.varsom, labels = _get_varsom_obs(None, date=date, days=days-1 if days > 0 else 1)
self.weather = _get_weather_obs(None, date=date, days=days-2 if days > 2 else 1)
self.labels = {}
for label_keys, label in labels.items():
if label_keys not in self.labels:
self.labels[label_keys] = {}
for (label_date, label_region), label_data in label.items():
if label_date == date.isoformat():
subkey = (label_date, label_region)
self.labels[label_keys][subkey] = label_data
return self
def label(self, days, with_varsom=True):
"""Creates a LabeledData containing relevant label and features formatted either in a flat structure or as
a time series.
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
:return: LabeledData
"""
table = {}
row_weight = {}
df = None
df_weight = None
df_label = pd.DataFrame(self.labels, dtype="U")
days_w = {0: 1, 1: 1, 2: 1}.get(days, days - 1)
days_v = {0: 1, 1: 2, 2: 2}.get(days, days)
days_r = days + 1
varsom_index = pd.DataFrame(self.varsom).index
weather_index = pd.DataFrame(self.weather).index
if len(df_label.index) == 0 and self.use_label:
raise NoBulletinWithinRangeError()
if self.date and not self.use_label:
season = gm.get_season_from_date(self.date)
regions = gm.get_forecast_regions(year=season, get_b_regions=True)
date_region = [(self.date.isoformat(), region) for region in regions]
else:
date_region = df_label.index
for monotonic_idx, entry_idx in enumerate(date_region):
date, region_id = dt.date.fromisoformat(entry_idx[0]), entry_idx[1]
def prev_key(day_dist):
return (date - dt.timedelta(days=day_dist)).isoformat(), region_id
# Just check that we can use this entry.
try:
if with_varsom:
for n in range(1, days_v):
if prev_key(n) not in varsom_index:
raise KeyError()
for n in range(0, days_w):
if prev_key(n) not in weather_index:
raise KeyError()
add_row = True
# We don't check for RegObs as it is more of the good to have type of data
except KeyError:
add_row = False
if add_row:
row = {}
for region in REGIONS:
row[(f"region_id_{region}", "0")] = float(region == region_id)
if with_varsom:
for column in self.varsom.keys():
for n in range(1, days_v):
# We try/except an extra time since single dates may run without a forecast.
row[(column, str(n))] = self.varsom[column][prev_key(n)]
for column in self.weather.keys():
for n in range(0, days_w):
try:
row[(column, str(n))] = self.weather[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
for column in self.regobs.keys():
for n in range(2, days_r):
try:
row[(column, str(n))] = self.regobs[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
try:
weight_sum = self.regobs['accuracy'][prev_key(0)]
if weight_sum < 0:
row_weight[entry_idx] = 1 / 2
elif weight_sum == 0:
row_weight[entry_idx] = 1
elif weight_sum > 0:
row_weight[entry_idx] = 2
except KeyError:
row_weight[entry_idx] = 1
# Some restructuring to make DataFrame parse the dict correctly
for key in row.keys():
if key not in table:
table[key] = {}
table[key][entry_idx] = row[key]
# Build DataFrame iteratively to preserve system memory (floats in dicts are apparently expensive).
if (monotonic_idx > 0 and monotonic_idx % 1000 == 0) or monotonic_idx == len(date_region) - 1:
df_new = pd.DataFrame(table, dtype=np.float32).fillna(0)
df_weight_new = pd.Series(row_weight)
df = df_new if df is None else pd.concat([df, df_new])
df_weight = df_weight_new if df is None else pd.concat([df_weight, df_weight_new])
table = {}
row_weight = {}
if df is None or len(df.index) == 0:
raise NoDataFoundError()
if self.use_label:
df_label = df_label.loc[df.index]
df_label.sort_index(axis=0, inplace=True)
df_label.sort_index(axis=1, inplace=True)
df.sort_index(axis=0, inplace=True)
df_weight.sort_index(axis=0, inplace=True)
else:
df_label = None
return LabeledData(df, df_label, df_weight, days, self.regobs_types, with_varsom, self.seasons)
class LabeledData:
is_normalized = False
with_regions = True
elevation_class = (False, False)
scaler = StandardScaler()
def __init__(self, data, label, row_weight, days, regobs_types, with_varsom, seasons=False):
"""Holds labels and features.
:param data: A DataFrame containing the features of the dataset.
:param label: DataFrame of labels.
:param row_weight: Series containing row weights
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param regobs_types: A tuple/list of strings of types of observations to fetch from RegObs.,
e.g., `("Faretegn")`.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
"""
self.data = data
self.row_weight = row_weight
if label is not None:
self.label = label
self.label = self.label.replace(_NONE, 0)
self.label = self.label.replace(np.nan, 0)
try: self.label['CLASS', _NONE] = self.label['CLASS', _NONE].replace(0, _NONE).values
except KeyError: pass
try: self.label['MULTI'] = self.label['MULTI'].replace(0, "0").values
except KeyError: pass
try: self.label['REAL'] = self.label['REAL'].astype(np.float)
except KeyError: pass
self.pred = label.copy()
for col in self.pred.columns:
self.pred[col].values[:] = 0
try: self.pred['CLASS', _NONE] = _NONE
except KeyError: pass
try: self.pred['MULTI'] = "0"
except KeyError: pass
else:
self.label = None
self.pred = None
self.days = days
self.with_varsom = with_varsom
self.regobs_types = regobs_types
if self.data is not None:
self.scaler.fit(self.data.values)
self.single = not seasons
self.seasons = sorted(list(set(seasons if seasons else [])))
def normalize(self, by=None):
"""Normalize the data feature-wise using MinMax.
:return: Normalized copy of LabeledData
"""
by = by if by is not None else self
if not self.is_normalized:
ld = self.copy()
data = by.scaler.transform(self.data.values)
ld.data = | pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns) | pandas.DataFrame |
import os
""" First change the following directory link to where all input files do exist """
os.chdir("D:\\Book writing\\Codes\\Chapter 5")
import numpy as np
import pandas as pd
# KNN Curse of Dimensionality
import random,math
def random_point_gen(dimension):
return [random.random() for _ in range(dimension)]
def distance(v,w):
vec_sub = [v_i-w_i for v_i,w_i in zip(v,w)]
sum_of_sqrs = sum(v_i*v_i for v_i in vec_sub)
return math.sqrt(sum_of_sqrs)
def random_distances_comparison(dimension,number_pairs):
return [distance(random_point_gen(dimension),random_point_gen(dimension))
for _ in range(number_pairs)]
def mean(x):
return sum(x) / len(x)
dimensions = range(1, 201, 5)
avg_distances = []
min_distances = []
dummyarray = np.empty((20,4))
dist_vals = pd.DataFrame(dummyarray)
dist_vals.columns = ["Dimension","Min_Distance","Avg_Distance","Min/Avg_Distance"]
random.seed(34)
i = 0
for dims in dimensions:
distances = random_distances_comparison(dims, 1000)
avg_distances.append(mean(distances))
min_distances.append(min(distances))
dist_vals.loc[i,"Dimension"] = dims
dist_vals.loc[i,"Min_Distance"] = min(distances)
dist_vals.loc[i,"Avg_Distance"] = mean(distances)
dist_vals.loc[i,"Min/Avg_Distance"] = min(distances)/mean(distances)
print(dims, min(distances), mean(distances), min(distances)*1.0 / mean(distances))
i = i+1
# Ploting Average distances for Various Dimensions
import matplotlib.pyplot as plt
plt.figure()
#plt.title('Avg. Distance Change with Number of Dimensions for 1K Obs')
plt.xlabel('Dimensions')
plt.ylabel('Avg. Distance')
plt.plot(dist_vals["Dimension"],dist_vals["Avg_Distance"])
plt.legend(loc='best')
plt.show()
# 1-Dimension Plot
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
one_d_data = np.random.rand(60,1)
one_d_data_df = pd.DataFrame(one_d_data)
one_d_data_df.columns = ["1D_Data"]
one_d_data_df["height"] = 1
plt.figure()
plt.scatter(one_d_data_df['1D_Data'],one_d_data_df["height"])
plt.yticks([])
plt.xlabel("1-D points")
plt.show()
# 2- Dimensions Plot
two_d_data = np.random.rand(60,2)
two_d_data_df = pd.DataFrame(two_d_data)
two_d_data_df.columns = ["x_axis","y_axis"]
plt.figure()
plt.scatter(two_d_data_df['x_axis'],two_d_data_df["y_axis"])
plt.xlabel("x_axis");plt.ylabel("y_axis")
plt.show()
# 3- Dimensions Plot
three_d_data = np.random.rand(60,3)
three_d_data_df = pd.DataFrame(three_d_data)
three_d_data_df.columns = ["x_axis","y_axis","z_axis"]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(three_d_data_df['x_axis'],three_d_data_df["y_axis"],three_d_data_df["z_axis"])
ax.set_xlabel('x_axis')
ax.set_ylabel('y_axis')
ax.set_zlabel('z_axis')
plt.show()
# KNN CLassifier - Breast Cancer
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score,classification_report
breast_cancer = pd.read_csv("Breast_Cancer_Wisconsin.csv")
print (breast_cancer.head())
breast_cancer['Bare_Nuclei'] = breast_cancer['Bare_Nuclei'].replace('?', np.NAN)
breast_cancer['Bare_Nuclei'] = breast_cancer['Bare_Nuclei'].fillna(breast_cancer['Bare_Nuclei'].value_counts().index[0])
breast_cancer['Cancer_Ind'] = 0
breast_cancer.loc[breast_cancer['Class']==4,'Cancer_Ind'] = 1
x_vars = breast_cancer.drop(['ID_Number','Class','Cancer_Ind'],axis=1)
y_var = breast_cancer['Cancer_Ind']
from sklearn.preprocessing import StandardScaler
x_vars_stdscle = StandardScaler().fit_transform(x_vars.values)
from sklearn.model_selection import train_test_split
x_vars_stdscle_df = pd.DataFrame(x_vars_stdscle, index=x_vars.index, columns=x_vars.columns)
x_train,x_test,y_train,y_test = train_test_split(x_vars_stdscle_df,y_var,train_size = 0.7,random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn_fit = KNeighborsClassifier(n_neighbors=3,p=2,metric='minkowski')
knn_fit.fit(x_train,y_train)
print ("\nK-Nearest Neighbors - Train Confusion Matrix\n\n",pd.crosstab(y_train,knn_fit.predict(x_train),rownames = ["Actuall"],colnames = ["Predicted"]) )
print ("\nK-Nearest Neighbors - Train accuracy:",round(accuracy_score(y_train,knn_fit.predict(x_train)),3))
print ("\nK-Nearest Neighbors - Train Classification Report\n",classification_report(y_train,knn_fit.predict(x_train)))
print ("\n\nK-Nearest Neighbors - Test Confusion Matrix\n\n",pd.crosstab(y_test,knn_fit.predict(x_test),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nK-Nearest Neighbors - Test accuracy:",round(accuracy_score(y_test,knn_fit.predict(x_test)),3))
print ("\nK-Nearest Neighbors - Test Classification Report\n",classification_report(y_test,knn_fit.predict(x_test)))
# Tuning of K- value for Train & Test data
dummyarray = np.empty((5,3))
k_valchart = | pd.DataFrame(dummyarray) | pandas.DataFrame |
#Lib for Streamlit
# Copyright(c) 2021 - AilluminateX LLC
# This is main Sofware... Screening and Tirage
# Customized to general Major Activities
# Make all the School Activities- st.write(DataFrame) ==> (outputs) Commented...
# The reason, since still we need the major calculations.
# Also the Computing is not that expensive.. So, no need to optimize at this point
import streamlit as st
import pandas as pd
#Change website title (set_page_config)
#==============
from PIL import Image
image_favicon=Image.open('Logo_AiX.jpg')
st.set_page_config(page_title='AilluminateX - Covid Platform', page_icon = 'Logo_AiX.jpg') #, layout = 'wide', initial_sidebar_state = 'auto'), # layout = 'wide',)
# favicon being an object of the same kind as the one you should provide st.image() with
#(ie. a PIL array for example) or a string (url or local file path)
#==============
#Hide footer and customize the text
#=========================
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
footer:after {
content:'Copyright(c) 2021 - AilluminateX LLC and Ailysium - Covid19 Bio-Forecasting Platform | https://www.aillumiante.com';
visibility: visible;
display: block;
position: relative;
#background-color: gray;
padding: 5px;
top: 2px;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#==============================
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from yellowbrick.classifier import ClassificationReport
from sklearn.metrics import accuracy_score
#import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import plotly.express as px
import numpy as np
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import altair as alt
import plotly.figure_factory as ff
import matplotlib
from matplotlib import cm
import seaborn as sns; sns.set()
from PIL import Image
import statsmodels.api as sm
import statsmodels.formula.api as smf
#from sklearn import model_selection, preprocessing, metrics, svm,linear_model
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, cross_validate, StratifiedKFold
from sklearn.feature_selection import SelectKBest, chi2
#from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import auc, roc_auc_score, roc_curve, explained_variance_score, precision_recall_curve,average_precision_score,accuracy_score, classification_report
#from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import boxcox
from matplotlib import pyplot
import pickle
#from sklearn.externals import joblib
import joblib
# Load Image & Logo
#====================
st.image("Logo_AiX.jpg") # Change to MSpace Logo
#st.write("https://www.ailluminate.com")
#st.image("LogoAiX1.jpg") # Change to MSpace Logo
st.markdown("<h1 style='text-align: left; color: turquoise;'>Ailysium: BioForecast Platform</h1>", unsafe_allow_html=True)
#st.markdown("<h1 style='text-align: left; color: turquoise;'>Train AI BioForecast Model (Realtime)</h1>", unsafe_allow_html=True)
#st.markdown("<h1 style='text-align: left; color: turquoise;'>Opening-Economy & Society</h1>", unsafe_allow_html=True)
#df_forecast= pd.read_csv("2021-03-27-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
df_forecast=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Load Data - The last/most recent Forecast and latest Data
#=====================
# The last two, most recent forecast
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Forcast_date="2021-03-15"
#Forecasted_dates=["3/20/2021", "3/27/2021", "4/03/2021", "4/10/2021" ]
#df_forecast= pd.read_csv("2021-03-22-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Forcast_date="2021-03-22"
#Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
#==========================================
df_forecast_previous= pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast_recent=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-29"
Forecasted_dates=["4/03/2021", "4/10/2021", "4/17/2021", "4/24/2021" ]
#================
#initialize the data
#=======================
#Models
#====================
#st.success("What Forecast Model Data to Load?")
forecast_model_Options= ['Reference Model',
'Ensemble',
'UGA-CEID',
'Columbia',
'ISU',
'UVA',
'LNQ',
'Facebook',
'JHU-APL',
'UpstateSU',
'JHU-IDD',
'LANL',
'Ensemble']
#st.success("What Date Forecast Data to Load?")
data_dates_options=['2021-01-04', '2021-01-11', '2021-01-18',
'2021-01-25', '2021-02-01', '2021-02-08',
'2021-02-15', '2021-02-22', '2021-03-01',
'2021-03-08', '2021-03-15', '2021-03-22',
'2021-03-29']
data_dates_options=['2021-03-29',
'2021-03-22', '2021-03-15', '2021-03-08',
'2021-03-01', '2021-02-22', '2021-02-15',
'2021-02-08', '2021-02-01', '2021-01-25',
'2021-01-18', '2021-01-11', '2021-01-04']
data_dates_options=['2021-04-14']
load_ai_model_options=['Reference Model',
'AI Model 1',
'AI Model 2 (L)',
'AI Model 3 (Fast)',
'AI Model 4 (Fast) (L)',
'AI Model 5',
'AI Model 6',
'AI Model 7 (VERY Slow- Do Not Use, if You have too!)',
'AI Model 8',
'AI Model 9 (Slow)',
'AI Model 10',
'AI Model 11 (L)',
'AI Model 12',
'AI Model 13',
'AI Model 14 (L)',
'AI Model 15',
'AI Model 16 (L)',
'AI Model (aggregator)']
train_ai_model_options=load_ai_model_options
#===========================
#Selectt Option Section
#============================
select_options=["AiX-ai-Forecast-Platform",
"Load Forecast Data", #Simply Check the Forecast Data
"Load AI Model",
"Train AI Model",
"AiX-Platform"]
select_options=["AiX-ai-Forecast-Platform"]
your_option=select_options
st.sidebar.success("Please Select your Option" )
option_selectbox = st.sidebar.selectbox( "Select your Option:", your_option)
select_Name=option_selectbox
#if option_selectbox=='Load Forecast Data' or option_selectbox!='Load Forecast Data':
#if select_Name=='Load Forecast Data' or select_Name!='Load Forecast Data':
if select_Name=='AiX-ai-Forecast-Platform' or select_Name!='AiX-ai-Forecast-Platform':
#Models
#====================
#st.success("What Forecast Model Data to Load?")
your_option=forecast_model_Options
st.sidebar.info("Please Select Forecast Model" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Model:", your_option)
if option_selectbox =='Reference Model':
option_selectbox='Reference Model'
option_selectbox='Ensemble'
forecast_model_Name=option_selectbox
#if option_selectbox=='Load Forecast Data' or option_selectbox!='Load Forecast Data':
if select_Name=='Load Forecast Data' or select_Name!='Load Forecast Data':
#st.success("What Date Forecast Data to Load?")
your_option=data_dates_options
st.sidebar.warning("Please Select Forecast Date" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Date:", your_option)
#if option_selectbox=='2021-03-22':
# option_selectbox= '2021-03-15'
data_dates_Name=option_selectbox
if option_selectbox==data_dates_Name:
your_option=["One(1) Week Ahead", "Two(2) Weeks Ahead", "Three(3) Weeks Ahead", "Four(4) Weeks Ahead"]
st.sidebar.warning("Please Select Forecast Week" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Weeks Ahead:", your_option)
data_week_Name=option_selectbox
if data_week_Name !="One(1) Week Ahead":
st.write("Two(2), Three(3), and Four(4) Weeks Ahead are being calculated offline currently and are not presented as realtime")
#if option_selectbox=='Load AI Model':
if select_Name=='Load AI Model':
your_option=load_ai_model_options
st.sidebar.error("Please Select AI Model to load" )
option_selectbox = st.sidebar.selectbox( "Select AI-Model to Load:", your_option)
ai_load_Name=option_selectbox
#if option_selectbox=='Train AI Model':
if select_Name=='Train AI Model':
your_option=train_ai_model_options
st.sidebar.success("Please Select AI Model to Train" )
option_selectbox = st.sidebar.selectbox( "Select AI-Model to Train:", your_option)
ai_train_Name=option_selectbox
#load_data_csv=data_dates_Name+"-all-forecasted-cases-model-data.csv"
#st.write("Data to load: ", load_data_csv)
#Load Models and Sidebar Selection
#===================================================================================# Load AI Models
#if option_selectbox=='AiX Platform':
if select_Name=='AiX Platform':
model2load=pd.read_csv('model2load.csv', engine='python', dtype=str) # dtype={"Index": int})
model_index=model2load
model_names_option=model_index.AI_Models.values
st.sidebar.success("Please Select your AI Model!" )
model_selectbox = st.sidebar.selectbox( "Select AI Model", model_names_option)
Model_Name=model_selectbox
Index_model=model2load.Index[model2load.AI_Models==Model_Name].values[0]
Index_model=int(Index_model)
pkl_model_load=model2load.Pkl_Model[model2load.AI_Models==Model_Name].values[0]
#Load Data and Model
Pkl_Filename = pkl_model_load #"Pickle_RForest.pkl"
#st.write(Pkl_Filename)
# Load the Model back from file
#****with open(Pkl_Filename, 'rb') as file: # This line to load the file
#*** Pickle_LoadModel = pickle.load(file) # This line to load the file
# Pickle_RForest = pickle.load(file)
#RForest=Pickle_RForest
load_data_csv=data_dates_Name+"-all-forecasted-cases-model-data.csv"
#st.write('Load CDC Model Data- Data to load:', ' ', load_data_csv)
load_data_csv="recent-all-forecasted-cases-model-data.csv"
#st.write('Load CDC Model Data- Data to load:', ' ', load_data_csv)
#Forecast Data is being loaded and alll sort of sidebars also created.
#===================================================
#import pandas as pd
# Load Reference Model Forecast Ensemble - Only For Visualization Purpose
#=============================================================================
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_ref=pd.DataFrame()
df_forecast_ref=pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_ref.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
df_forecast=df_drop.copy()
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
#forecast_model_Name="Ensemble"
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_ref=pd.DataFrame()
df_forecast_Ensemble_ref=df_forecast_Ensemble.copy()
# Load Previous Forecast
#=========================
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_previous=pd.DataFrame()
df_forecast_previous=pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_previous.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
df_forecast=df_drop.copy()
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model==forecast_model_Name]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_previous=pd.DataFrame()
df_forecast_Ensemble_previous=df_forecast_Ensemble.copy()
#Load Most Recent Forecast
#====================
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_recent=pd.DataFrame()
df_forecast_recent=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-29"
Forecasted_dates=["4/03/2021", "4/10/2021", "4/17/2021", "4/24/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_recent.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model==forecast_model_Name]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_recent=pd.DataFrame()
df_forecast_Ensemble_recent=df_forecast_Ensemble.copy()
#Load Actual Cases
#==========================
df_actual_cases=pd.DataFrame()
df_actual_cases=pd.read_csv("covid_confirmed_usafacts_forecast.csv", engine='python', dtype={'fips': str})
#======================Visulaization of data =======================
# ======================Compare the Forecast with actula data ================"
df_ref_temp=pd.DataFrame(np.array(df_forecast_Ensemble_ref.iloc[:,[6,7]].values), columns=["fips", "Forecast_Reference"]) # 6,7: fips and point
df_model_temp=pd.DataFrame(np.array(df_forecast_Ensemble_previous.iloc[:,[6,7]].values), columns=["fips", "Forecast_Model"]) # 6,7: fips and point
df_actual_temp=pd.DataFrame(np.array(df_actual_cases.iloc[:,[0,-2]].values), columns=["fips", "Actual_Target"]) # 0, -2: fips and most recent actual-target
df_actual_temp=pd.DataFrame(np.array(df_actual_cases.iloc[:,[0,-7,-6,-5,-4,-3, -2]].values),
columns=["fips", "TimeN5", "TimeN4", "TimeN3", "TimeN2", "TimeN1", "Actual_Target"]) # 0, -2: fips and most recent actual-target
#st.write("Last 6 Total Weekly Cases, ", df_actual_temp.head(20))
data_merge= pd.DataFrame() #df_ref_temp.copy()
data_merge= pd.merge(df_ref_temp, df_model_temp, on="fips")
data_merge_left=data_merge.copy()
data_merge= pd.merge(data_merge_left, df_actual_temp, on="fips")
#st.write("df_actual_temp:, ", data_merge.head())
#st.error("Stop for checking how many is loaded")
data_merge.iloc[:,1:] = data_merge.iloc[:,1:].astype(float)
#st.write("Data Merged: ", data_merge.head())
#data_merge = data_merge.iloc[:,[1,2,3]].astype(float)
df_forecast_target=data_merge.copy()
#df_forecast_target_Scaled = df_forecast_target_Scaled.astype(float)
len_data=len(df_forecast_target)
df_population= pd.read_csv("covid_county_population_usafacts.csv", engine='python', dtype={'fips': str, 'fips_1': str})
df_forecast_target_Scaled = df_forecast_target.copy()
i=0
while i <len_data:
fips=df_forecast_target['fips'].iloc[0]
population=df_population.population[df_population.fips==fips].values[0]
df_forecast_target_Scaled.iloc[i,1:]=df_forecast_target.iloc[i,1:]/population*1000
i=i+1
df_forecast_target_Scaled.iloc[:,1:] = df_forecast_target_Scaled.iloc[:,1:].astype(float)
#st.write("df_forecast_target_Scaled", df_forecast_target_Scaled.head())
data_viz=df_forecast_target_Scaled.copy()
#Delete All The Data Frames that we do not need!
#=======================Delete all the DataFrame we do not need ==================
df_forecast_target_Scaled=pd.DataFrame()
data_merge=pd.DataFrame()
df_forecast_target=pd.DataFrame()
df_forecast_Ensemble_previous=pd.DataFrame()
df_forecast_Ensemble_recent=pd.DataFrame()
df_forecast_Ensemble_ref=pd.DataFrame()
df_forecast=pd.DataFrame()
df_ref_temp=pd.DataFrame()
df_model_temp=pd.DataFrame()
df_actual_temp= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.stats import poisson
from cooltools.api.dotfinder import (
histogram_scored_pixels,
determine_thresholds,
annotate_pixels_with_qvalues,
extract_scored_pixels,
)
# helper functions for BH-FDR copied from www.statsmodels.org
def _fdrcorrection(pvals, alpha=0.05):
"""
pvalue correction for false discovery rate.
This covers Benjamini/Hochberg for independent or positively correlated tests.
Parameters
----------
pvals : np.ndarray
Sorted set of p-values of the individual tests.
alpha : float, optional
Family-wise error rate. Defaults to ``0.05``.
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypothesis testing to limit FDR
"""
ntests = len(pvals)
# empirical Cumulative Distribution Function for pvals:
ecdffactor = np.arange(1, ntests + 1) / float(ntests)
reject = pvals <= ecdffactor * alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected > 1] = 1
return reject, pvals_corrected
def multipletests(pvals, alpha=0.1, is_sorted=False):
"""
Test results and p-value correction for multiple tests
Using FDR Benjamini-Hochberg method (non-negative)
Parameters
----------
pvals : array_like, 1-d
uncorrected p-values. Must be 1-dimensional.
alpha : float
FWER, family-wise error rate, e.g. 0.1
is_sorted : bool
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
Returns
-------
reject : ndarray, boolean
true for hypothesis that can be rejected for given alpha
pvals_corrected : ndarray
p-values corrected for multiple tests
Notes
-----
the p-value correction is independent of the
alpha specified as argument. In these cases the corrected p-values
can also be compared with a different alpha
All procedures that are included, control FWER or FDR in the independent
case, and most are robust in the positively correlated case.
"""
pvals = np.asarray(pvals)
if not is_sorted:
sortind = np.argsort(pvals)
pvals = np.take(pvals, sortind)
reject, pvals_corrected = _fdrcorrection(pvals, alpha=alpha)
if is_sorted:
return reject, pvals_corrected
else:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[sortind] = reject
return reject_, pvals_corrected_
# mock input data to perform some p-value calculation and correction on
num_pixels = 2500
max_value = 99
# fake kernels just for the sake of their names 'd' and 'v':
fake_kernels = {
"d": np.random.randint(2, size=9).reshape(3, 3),
"v": np.random.randint(2, size=9).reshape(3, 3),
}
# table with the "scored" pixel (as if they are returned by dotfinder-scoring function)
pixel_dict = {}
# enrich fake counts to all for more significant calls
pixel_dict["count"] = np.random.randint(max_value, size=num_pixels) + 9
for k in fake_kernels:
pixel_dict[f"la_exp.{k}.value"] = max_value * np.random.random(num_pixels)
scored_df = pd.DataFrame(pixel_dict)
# design lambda-bins as in dot-calling:
num_lchunks = 6
ledges = np.r_[[-np.inf], np.linspace(0, max_value, num_lchunks), [np.inf]]
# set FDR parameter
FDR = 0.1
# helper functions working on a chunk of counts or pvals
# associated with a given lambda-bin:
def get_pvals_chunk(counts_series_lchunk):
"""
Parameters:
-----------
counts_series_lchunk : pd.Series(int)
Series of raw pixel counts where the name of the Series
is pd.Interval of the lambda-bin where the pixel belong.
I.e. counts_series_lchunk.name.right - is the upper limit of the chunk
and is used as "expected" in Poisson distribution to estimate p-value.
Returns:
--------
pvals: ndarray[float]
array of p-values for each pixel
Notes:
------
poisson.sf = 1.0 - poisson.cdf
"""
return poisson.sf(counts_series_lchunk.values, counts_series_lchunk.name.right)
def get_qvals_chunk(pvals_series_lchunk):
"""
Parameters:
-----------
pvals_series_lchunk : pd.Series(float)
Series of p-values calculated for each pixel, where the name
of the Series is pd.Interval of the lambda-bin where the pixel belong.
Returns:
--------
qvals: ndarray[float]
array of q-values, i.e. p-values corrected with the multiple hypothesis
testing procedure BH-FDR, for each pixel
Notes:
------
level of False Discore Rate (FDR) is fixed for testing
"""
_, qvals = multipletests(pvals_series_lchunk.values, alpha=FDR, is_sorted=False)
return qvals
def get_reject_chunk(pvals_series_lchunk):
"""
Parameters:
-----------
pvals_series_lchunk : pd.Series(float)
Series of p-values calculated for each pixel, where the name
of the Series is pd.Interval of the lambda-bin where the pixel belong.
Returns:
--------
rej: ndarray[bool]
array of rejection statuses, i.e. for every p-values return if corresponding
null hypothesis can be rejected or not, using multiple hypothesis testing
procedure BH-FDR.
Notes:
------
- pixels with rejected status (not null) are considered as significantly enriched
- level of False Discore Rate (FDR) is fixed for testing
"""
rej, _ = multipletests(pvals_series_lchunk.values, alpha=FDR, is_sorted=False)
return rej
# for the fake scored-pixel table calculate p-vals, q-vals, l-chunk where they belong
# and rejection status using introduced statsmodels-based helper functions:
for k in fake_kernels:
lbin = | pd.cut(scored_df[f"la_exp.{k}.value"], ledges) | pandas.cut |
##############################################################################################
# PURPOSE
# Read STARLIGHT output files
#
# CREATED BY:
# <NAME> (in R)
#
# ADAPTED BY:
# <NAME> (Conversion from R to Python and adaptation)
#
# CALLING SEQUENCE
# python read_starlight_output.py --> In terminal
#
# INPUT PARAMETERS
# output_dir --> Output directory name
# output_file --> Output filename
# starlight_output_dir --> Directory of input files
# spectra_z_file --> List of objects
# BaseAges --> Base file
# DR = 12 --> Spectra file name: DR12 or DR7 format
# age_format --> Units of ages in the base file (yr or Gyr)
# met_format = 'Zsun' --> Units of metallicitues in the base file (logZ_Zsun or Z)
# Z_sun = 0.019 --> Solar metallicity
#
# OUTPUT
# STARLIGHT_MAIN_RESULTS.csv
# Starlight_SFH_Mcor.csv
# Starlight_SFH_Mcor_cumul.csv
# Starlight_SFH_Mini.csv
# Starlight_SFH_Mini_cumul.csv
# Starlight_CEH_Lum.csv
# Starlight_CEH_Mcor.csv
# Starlight_CEH_Mini.csv
#
# REQUIRED SCRIPTS
# cosmodist.py
#
# COMMENTS
#
##############################################################################################
##############################################################################################
# INPUTS
##############################################################################################
import numpy as np
import pandas as pd
from os import path
import time
def read_starlight_sfh(starlight_file):
met_L_age = pd.DataFrame([np.zeros(n_age)]).T
met_M_age = pd.DataFrame([np.zeros(n_age)]).T
alpha_L_age = pd.DataFrame([np.zeros(n_age)]).T
alpha_M_age = pd.DataFrame([np.zeros(n_age)]).T
frac_age = pd.DataFrame([np.zeros(n_age)]).T
mini_frac_age = pd.DataFrame([np.zeros(n_age)]).T
mcor_frac_age = pd.DataFrame([np.zeros(n_age)]).T
for aa in range(n_age):
xx = abs(age/1e9 - ages[aa]/1e9) < 0.001
frac_age[0][aa] = sum(frac[xx])
mini_frac_age[0][aa] = sum(mini_frac[xx])
mcor_frac_age[0][aa] = sum(mcor_frac[xx])
if frac_age[0][aa] > 0:
met_L_age[0][aa] = (np.log10(sum(frac[xx] * met[xx]/0.019) / sum(frac[xx])))
alpha_L_age[0][aa] = (np.log10(sum(frac[xx] * 10**alpha[xx]) / sum(frac[xx])))
met_M_age[0][aa] = (np.log10(sum(mcor_frac[xx] * met[xx]/0.019) / sum(mcor_frac[xx])))
alpha_M_age[0][aa] = (np.log10(sum(mcor_frac[xx] * 10**alpha[xx]) / sum(mcor_frac[xx])))
mcor_frac_age = mcor_frac_age/sum(mcor_frac_age[0])
mini_frac_age = mini_frac_age/sum(mini_frac_age[0])
### Cumulative ###
mini_cumul_age = pd.DataFrame([np.zeros(n_age)]).T
mcor_cumul_age = pd.DataFrame([np.zeros(n_age)]).T
for aa in range(n_age):
mini_cumul_age[0][aa] = sum(mini_frac_age[0][ages >= ages[aa]])
mcor_cumul_age[0][aa] = sum(mcor_frac_age[0][ages >= ages[aa]])
### Write the output files ###
mini_frac_age = mini_frac_age.T
mini_cumul_age = mini_cumul_age.T
mcor_frac_age = mcor_frac_age.T
mcor_cumul_age = mcor_cumul_age.T
mini_frac_age.to_csv(output_dir+output_file_Mini, mode='a', sep=' ', index=False, header=False)
mini_cumul_age.to_csv(output_dir+output_file_Mini_cumul, mode='a', sep=' ', index=False, header=False)
mcor_frac_age.to_csv(output_dir+output_file_Mcor, mode='a', sep=' ', index=False, header=False)
mcor_cumul_age.to_csv(output_dir+output_file_Mcor_cumul, mode='a', sep=' ', index=False, header=False)
list = dir()
del list
### FILES & FOLDERS ###
starlight_output_dir = '../files_output_starlight/' # Directory of input files
spectra_z_file = '../aux_files/galaxy_list.csv' # Sufix of input file name
BaseAges = pd.read_csv('../starlight/csv_Base_MILES15_v10.0', sep=',', header=None) # Base file # Spectra file name: DR12 or DR7 format
output_dir = '../results_starlight/'
### OUTPUT FILES ###
output_file = 'STARLIGHT_MAIN_RESULTS.csv' # Output file
output_file_Mini_SFH = 'Starlight_SFH_Mini.out' #Output File
output_file_Mini_cumul_SFH = 'Starlight_SFH_Mini_cumul.out' #Output File
output_file_Mcor_SFH = 'Starlight_SFH_Mcor.out' #Output File
output_file_Mcor_cumul_SFH = 'Starlight_SFH_Mcor_cumul.out' #Output File
output_file_Mini_CEH = 'Starlight_CEH_Mini.out' # Output file
output_file_Mcor_CEH = 'Starlight_CEH_Mcor.out' # Output file
output_file_Lum_CEH = 'Starlight_CEH_Lum.out' # Output file
### UNITS OF MEASUREMENT ###
age_format = 'yr' # yr or Gyr
met_format = 'Z' # logZ_Zsun or Z
Z_sun = 0.019
DR = 12
flag_sdss = 0 # Apenas inicializando uma variável
exec(open("../aux_files/cosmodist.py").read())
### BASE ###
t = pd.DataFrame(BaseAges)
t.columns = ['V1','V2','V3','V4','V5','V6','V7']
ages = np.array(t.V2[np.array(t.V3) == t.V3[1]])
n_age = len(ages)
Nbase = len(t)
######################
### DATA SELECTION ###
######################
data = pd.read_csv(spectra_z_file, sep=',') # Lista de spectros e redshifts
selection = (data['onoff'] == 1) & (data['extension'] < 100) #Seleciona apenas as lcgs do sdss e as do gemini com abertura = 3 arcsec
data = data[selection]
data.index = range(len(data))
######################
### CREATING MAIN OUTPUT FILE ###
Nobj = len(data)
vvv = np.full(Nobj, 'NaN', float) #Cria um vetor onde todos os valores de i=0 até i=Nobj são nan. Vetor a ser preenchido
temp = pd.DataFrame(['lcgID', 'extension', 'ra', 'dec', 'ageL_mean', 'metL_mean', 'aFeL_mean', 'ageM_mean', 'metM_mean', 'aFeM_mean',
'ageL_gmean', 'metL_gmean', 'aFeL_gmean', 'ageM_gmean', 'metM_gmean', 'aFeM_gmean',
'chisqrt', 'Mini_log_Mo', 'Mcor_log_Mo', 'magFrac_r', 'v0', 'vd', 'av', 'z',
'DM_Mpc', 'DA_Mpc', 'DL_Mpc', 'VC_Gpc3', 'TL_Gyr', 'DM_mag',
'SN1', 'SN2', 'SN3', 'SN4']).T
temp.to_csv(output_dir+output_file, header=False, index=False, sep=',', mode='w')
### CREATING SFH & CEH OUTPUT FILES ###
temp2 = pd.DataFrame([], columns=[-1,-1,-1, ages[0]/1e9, ages[1]/1e9, ages[2]/1e9, ages[3]/1e9, ages[4]/1e9, ages[5]/1e9, ages[6]/1e9, ages[7]/1e9, ages[8]/1e9, ages[9]/1e9, ages[10]/1e9, ages[11]/1e9, ages[12]/1e9, ages[13]/1e9, ages[14]/1e9])
temp2.to_csv(output_dir+output_file_Mini_SFH, mode='w', sep=' ', index=False)
temp2.to_csv(output_dir+output_file_Mini_cumul_SFH, mode='w', sep=' ', index=False)
temp2.to_csv(output_dir+output_file_Mcor_SFH, mode='w', sep=' ', index=False)
temp2.to_csv(output_dir+output_file_Mcor_cumul_SFH, mode='w', sep=' ', index=False)
temp2.to_csv(output_dir+output_file_Mini_CEH, mode='w', sep=' ', index=False)
temp2.to_csv(output_dir+output_file_Mcor_CEH, mode='w', sep=' ', index=False)
temp2.to_csv(output_dir+output_file_Lum_CEH, mode='w', sep=' ', index=False)
progress = np.around(Nobj * np.linspace(0, 1, 101)).astype(int)
### FILE PICKER ###
for i in range(0,Nobj):
if (data.extension[i] > 10) & (data.extension[i] < 20): #Os nomes do output do starlight são todos <10, mas não quero perder a ordem, mudar output do starlight no futuro
#for r in range(0,2):
# if flag_cut == 0:
# filename = (starlight_output_dir + '/output_starlight_sdss_LCG' + str(data.lcgID[i]) + '_' + str(data.extension[i]) + '.csv')
data.extension[i] = data.extension[i] - 10
flag_sdss = 1
if data.flag_sdss[i] == 0: filename = (starlight_output_dir + '/output_starlight_gemini_LCG' + str(data.lcgID[i]) + '_' + str(data.extension[i]) + '.csv')
if data.flag_sdss[i] == 1: filename = (starlight_output_dir + '/output_starlight_sdss_LCG' + str(data.lcgID[i]) + '_' + str(data.extension[i]) + '.csv')
starlight_file = filename
check = path.exists(starlight_file)
print(starlight_file)
if check == True:
################################################
# MAIN CODE
################################################
t = | pd.read_csv(starlight_file, skiprows = 63, nrows=75, delim_whitespace=True, engine='python', header = None) | pandas.read_csv |
import logging
import pickle
import uuid
from datetime import datetime
from warnings import warn
from typing import List
import numpy as np
import pandas as pd
from aequilibrae.starts_logging import logger
from .__version__ import binary_version as VERSION
class Graph(object):
"""
Graph class
"""
def __init__(self):
self.logger = logging.getLogger("aequilibrae")
self.__integer_type = np.int64
self.__float_type = np.float64
self.required_default_fields = ["link_id", "a_node", "b_node", "direction", "id"]
self.__required_default_types = [
self.__integer_type,
self.__integer_type,
self.__integer_type,
np.int8,
self.__integer_type,
]
self.other_fields = ""
self.mode = ""
self.date = str(datetime.now())
self.description = "No description added so far"
self.num_links = -1
self.num_nodes = -1
self.num_zones = -1
self.compact_num_links = -1
self.compact_num_nodes = -1
self.network = pd.DataFrame([]) # This method will hold ALL information on the network
self.graph = pd.DataFrame([]) # This method will hold an array with ALL fields in the graph.
self.compact_graph = pd.DataFrame([]) # This method will hold an array with ALL fields in the graph.
# These are the fields actually used in computing paths
self.all_nodes = np.array(0) # Holds an array with all nodes in the original network
self.nodes_to_indices = np.array(0) # Holds the reverse of the all_nodes
self.fs = np.array([]) # This method will hold the forward star for the graph
self.cost = np.array([]) # This array holds the values being used in the shortest path routine
self.skims = None
self.compact_all_nodes = np.array(0) # Holds an array with all nodes in the original network
self.compact_nodes_to_indices = np.array(0) # Holds the reverse of the all_nodes
self.compact_fs = np.array([]) # This method will hold the forward star for the graph
self.compact_cost = np.array([]) # This array holds the values being used in the shortest path routine
self.compact_skims = None
self.capacity = np.array([]) # Array holds the capacity for links
self.free_flow_time = np.array([]) # Array holds the free flow travel time by link
# sake of the Cython code
self.skim_fields = [] # List of skim fields to be used in computation
self.cost_field = False # Name of the cost field
self.block_centroid_flows = True
self.penalty_through_centroids = np.inf
self.centroids = None # NumPy array of centroid IDs
self.g_link_crosswalk = np.array([]) # 4 a link ID in the BIG graph, a corresponding link in the compressed 1
self.__version__ = VERSION
# Randomly generate a unique Graph ID randomly
self.__id__ = uuid.uuid4().hex
def default_types(self, tp: str):
"""
Returns the default integer and float types used for computation
Args:
tp (:obj:`str`): data type. 'int' or 'float'
"""
if tp == "int":
return self.__integer_type
elif tp == "float":
return self.__float_type
else:
raise ValueError("It must be either a int or a float")
def prepare_graph(self, centroids: np.ndarray) -> None:
"""
Prepares the graph for a computation for a certain set of centroids
Under the hood, if sets all centroids to have IDs from 1 through **n**,
which should correspond to the index of the matrix being assigned.
This is what enables having any node IDs as centroids, and it relies on
the inference that all links connected to these nodes are centroid
connectors.
Args:
centroids (:obj:`np.ndarray`): Array with centroid IDs. Mandatory type Int64, unique and positive
"""
self.__network_error_checking__()
# Creates the centroids
if centroids is None or not isinstance(centroids, np.ndarray):
raise ValueError("Centroids need to be a NumPy array of integers 64 bits")
if not np.issubdtype(centroids.dtype, np.integer):
raise ValueError("Centroids need to be a NumPy array of integers 64 bits")
if centroids.shape[0] == 0:
raise ValueError("You need at least one centroid")
if centroids.min() <= 0:
raise ValueError("Centroid IDs need to be positive")
if centroids.shape[0] != np.unique(centroids).shape[0]:
raise ValueError("Centroid IDs are not unique")
self.centroids = np.array(centroids, np.uint32)
properties = self.__build_directed_graph(self.network, centroids)
self.all_nodes, self.num_nodes, self.nodes_to_indices, self.fs, self.graph = properties
# We generate IDs that we KNOW will be constant across modes
self.graph.sort_values(by=["link_id", "direction"], inplace=True)
self.graph.loc[:, "__supernet_id__"] = np.arange(self.graph.shape[0]).astype(self.__integer_type)
self.graph.sort_values(by=["a_node", "b_node"], inplace=True)
self.num_links = self.graph.shape[0]
self.__build_derived_properties()
self.__build_compressed_graph()
self.compact_num_links = self.compact_graph.shape[0]
def __build_compressed_graph(self):
# Build link index
link_idx = np.empty(self.network.link_id.max() + 1).astype(np.int)
link_idx[self.network.link_id] = np.arange(self.network.shape[0])
nodes = np.hstack([self.network.a_node.values, self.network.b_node.values])
links = np.hstack([self.network.link_id.values, self.network.link_id.values])
counts = np.bincount(nodes)
idx = np.argsort(nodes)
all_nodes = nodes[idx]
all_links = links[idx]
links_index = np.empty(all_nodes.max() + 2, np.int64)
links_index.fill(-1)
nlist = np.arange(all_nodes.max() + 2)
y, x, _ = np.intersect1d(all_nodes, nlist, assume_unique=False, return_indices=True)
links_index[y] = x[:]
links_index[-1] = all_links.shape[0]
for i in range(all_nodes.max() + 1, 0, -1):
links_index[i - 1] = links_index[i] if links_index[i - 1] == -1 else links_index[i - 1]
# We keep all centroids for sure
counts[self.centroids] = 999
truth = (counts == 2).astype(np.int)
link_edge = truth[self.network.a_node.values] + truth[self.network.b_node.values]
link_edge = self.network.link_id.values[link_edge == 1]
simplified_links = np.repeat(-1, self.network.link_id.max() + 1)
simplified_directions = np.zeros(self.network.link_id.max() + 1, np.int)
compressed_dir = np.zeros(self.network.link_id.max() + 1, np.int)
compressed_a_node = np.zeros(self.network.link_id.max() + 1, np.int)
compressed_b_node = np.zeros(self.network.link_id.max() + 1, np.int)
slink = 0
major_nodes = {}
tot = 0
tot_graph_add = 0
for pre_link in link_edge:
if simplified_links[pre_link] >= 0:
continue
ab_dir = 1
ba_dir = 1
lidx = link_idx[pre_link]
a_node = self.network.a_node.values[lidx]
b_node = self.network.b_node.values[lidx]
drc = self.network.direction.values[lidx]
n = a_node if counts[a_node] == 2 else b_node
first_node = b_node if counts[a_node] == 2 else a_node
ab_dir = 0 if (first_node == a_node and drc < 0) or (first_node == b_node and drc > 0) else ab_dir
ba_dir = 0 if (first_node == a_node and drc > 0) or (first_node == b_node and drc < 0) else ba_dir
while counts[n] == 2:
# assert (simplified_links[pre_link] >= 0), "How the heck did this happen?"
simplified_links[pre_link] = slink
simplified_directions[pre_link] = -1 if a_node == n else 1
# Gets the link from the list that is not the link we are coming from
lnk = [all_links[k] for k in range(links_index[n], links_index[n + 1]) if pre_link != all_links[k]][0]
pre_link = lnk
lidx = link_idx[pre_link]
a_node = self.network.a_node.values[lidx]
b_node = self.network.b_node.values[lidx]
drc = self.network.direction.values[lidx]
ab_dir = 0 if (n == a_node and drc < 0) or (n == b_node and drc > 0) else ab_dir
ba_dir = 0 if (n == a_node and drc > 0) or (n == b_node and drc < 0) else ba_dir
n = (
self.network.a_node.values[lidx]
if n == self.network.b_node.values[lidx]
else self.network.b_node.values[lidx]
)
if max(ab_dir, ba_dir) < 1:
tot += 1
tot_graph_add += ab_dir + ba_dir
simplified_links[pre_link] = slink
simplified_directions[pre_link] = -1 if a_node == n else 1
last_node = b_node if counts[a_node] == 2 else a_node
major_nodes[slink] = [first_node, last_node]
# Available directions are NOT indexed like the other arrays
compressed_a_node[slink] = first_node
compressed_b_node[slink] = last_node
if ab_dir > 0:
if ba_dir > 0:
compressed_dir[slink] = 0
else:
compressed_dir[slink] = 1
elif ba_dir > 0:
compressed_dir[slink] = -1
else:
compressed_dir[slink] = -999
slink += 1
links_to_remove = np.argwhere(simplified_links >= 0)
df = pd.DataFrame(self.network, copy=True)
df = df[~df.link_id.isin(links_to_remove[:, 0])]
df = df[df.a_node != df.b_node]
comp_lnk = pd.DataFrame(
{
"a_node": compressed_a_node[:slink],
"b_node": compressed_b_node[:slink],
"direction": compressed_dir[:slink],
"link_id": np.arange(slink),
}
)
max_link_id = self.network.link_id.max() * 10
comp_lnk.loc[:, "link_id"] += max_link_id
df = pd.concat([df, comp_lnk])
df = df[["id", "link_id", "a_node", "b_node", "direction"]]
properties = self.__build_directed_graph(df, self.centroids)
self.compact_all_nodes = properties[0]
self.compact_num_nodes = properties[1]
self.compact_nodes_to_indices = properties[2]
self.compact_fs = properties[3]
self.compact_graph = properties[4]
crosswalk = pd.DataFrame(
{
"link_id": np.arange(simplified_directions.shape[0]),
"link_direction": simplified_directions,
"compressed_link": simplified_links,
"compressed_direction": np.ones(simplified_directions.shape[0]).astype(np.int),
}
)
crosswalk = crosswalk[crosswalk.compressed_link >= 0]
crosswalk.loc[:, "compressed_link"] += max_link_id
cw2 = | pd.DataFrame(crosswalk, copy=True) | pandas.DataFrame |
# This code is part of the epytope distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
.. module:: Core.AResult
:synopsis: Contains relevant classes describing results of predictions.
.. moduleauthor:: schubert
"""
__author__ = 'schubert'
import abc
import numpy
from numpy.lib.arraysetops import isin
import pandas
from epytope.Core.Allele import Allele
from epytope.Core.Peptide import Peptide
from copy import deepcopy
from sys import exit
import logging
class AResult(pandas.DataFrame, metaclass=abc.ABCMeta):
"""
A :class:`~epytope.Core.Result.AResult` object is a :class:`pandas.DataFrame` with with multi-indexing.
This class is used as interface and can be extended with custom short-cuts for the sometimes often tedious
calls in pandas
"""
@abc.abstractmethod
def filter_result(self, expressions):
"""
Filter result based on a list of expressions
:param list((str, comparator, float)) expressions: A list of triples consisting of (method_name, comparator, threshold)
:return: A new filtered AResult object
:rtype: :class:`~epytope.Core.Result.AResult`
"""
raise NotImplementedError()
@abc.abstractmethod
def merge_results(self, others):
"""
Merges results of the same type and returns a merged result
:param others: A (list of) :class:`~epytope.Core.Result.AResult` object(s) of the same class
:type others: list(:class:`~epytope.Core.Result.AResult`)/:class:`~epytope.Core.Result.AResult`
:return: A new merged :class:`~epytope.Core.Result.AResult` object
:rtype: :class:`~epytope.Core.Result.AResult`
"""
raise NotImplementedError()
class EpitopePredictionResult(AResult):
"""
A :class:`~epytope.Core.Result.EpitopePredictionResult` object is a DataFrame with multi-indexing, where column
Ids are the prediction model (i.e HLA :class:`~epytope.Core.Allele.Allele` for epitope prediction), row ID the
target of the prediction (i.e. :class:`~epytope.Core.Peptide.Peptide`) and the second row ID the predictor
(e.g. BIMAS)
EpitopePredictionResult
+----------------+-------------------------------+-------------------------------+
| Allele | Allele Obj 1 | Allele Obj 2 |
+- - - - - - - - +- - - - - - - -+- - - - - - - -+- - - - - - - -+- - - - - - - -+
| Method | Method 1 | Method 2 | Method 1 | Method 2 |
+- - - - - - - - +- - - -+- - - -+- - - -+- - - -+- - - -+- - - -+- - - -+- - - -+
| ScoreType | Score | Rank | Score | Rank | Score | Rank | Score | Rank |
+- - - - - - - - +- - - -+- - - -+- - - -+- - - -+- - - -+- - - -+- - - -+- - - -+
| Peptides | | | | | | | | |
+================+=======+=======+=======+=======+=======+=======+=======+=======+
| Peptide Obj 1 | 0.03 | 57.4 | 0.05 | 51.1 | 0.08 | 49.4 | 0.73 | 3.12 |
+----------------+-------+-------+-------+-------+-------+-------+-------+-------+
| Peptide Obj 2 | 0.32 | 13.2 | 0.31 | 14.1 | 0.25 | 22.1 | 0.11 | 69.1 |
+----------------+-------+-------+-------+-------+-------+-------+-------+-------+
"""
def filter_result(self, expressions, scoretype='Score'):
"""
Filters a result data frame based on a specified expression consisting of a list of triple with
(method_name, comparator, threshold) and a str of the methods scoretype to be filtered.
The expression is applied to each row. If any of the columns fulfill the criteria the row remains.
:param list((str, comparator, float)) expressions: A list of triples consisting of (method_name, comparator, threshold)
:param str scoretype: Indicates which scoretype of the specified method should be filtered
:return: Filtered result object
:rtype: :class:`~epytope.Core.Result.EpitopePredictionResult`
"""
if isinstance(expressions, tuple):
expressions = [expressions]
df = deepcopy(self)
methods = list(set(df.columns.get_level_values(1)))
scoretypes = list(set(df.columns.get_level_values(2)))
if scoretype not in scoretypes:
raise ValueError("Specified ScoreType {} does not match ScoreTypes of data frame {}.".format(scoretype, scoretypes))
for expr in expressions:
method, comp, thr = expr
if method not in methods:
raise ValueError("Specified method {} does not match methods of data frame {}.".format(method, methods))
else:
filt = comp(df.xs(method, axis = 1, level = 1).xs(scoretype, axis = 1, level = 1), thr).values
# Only keep rows which contain values fulfilling the comparators logic in the specified method
keep_row = [bool.any() for bool in filt]
df = df.loc[keep_row]
return EpitopePredictionResult(df)
def merge_results(self, others):
"""
Merges results of type :class:`~epytope.Core.Result.EpitopePredictionResult` and returns the merged result
:param others: Another (list of) :class:`~epytope.Core.Result.EpitopePredictionResult`(s)
:type others: list(:class:`~epytope.Core.Result.EpitopePredictionResult`)/:class:`~epytope.Core.Result.EpitopePredictionResult`
:return: A new merged :class:`~epytope.Core.Result.EpitopePredictionResult` object
:rtype: :class:`~epytope.Core.Result.EpitopePredictionResult`
"""
df = self.copy(deep=False)
if type(others) == type(self):
others = [others]
# Concatenates self and to be merged dataframe(s)
for other in others:
df = pandas.concat([df, other], axis=1)
# Merge result of multiple predictors in others per allele
df_merged = pandas.concat([group[1] for group in df.groupby(level=[0,1], axis=1)], axis=1)
return EpitopePredictionResult(df_merged)
def from_dict(d, peps, method):
"""
Create :class:`~epytope.Core.Result.EpitopePredictionResult` object from dictionary holding scores for alleles, peptides and a specified method
:param d: dict with following structure: {allele: {scoretype: {peptide: score}}}
:param peps: list of :class:`~epytope.Core.Peptide.Peptide`
:param method: str specifying the prediction method
:return: A new :class:`~epytope.Core.Result.EpitopePredictionResult` object
:rtype: :class:`~epytope.Core.Result.EpitopePredictionResult`
"""
scoreType = numpy.asarray([list(m.keys()) for m in [metrics for a, metrics in d.items()]]).flatten()
alleles = numpy.asarray([numpy.repeat(a, len(set(scoreType))) for a in d]).flatten()
meth = numpy.repeat(method, len(scoreType))
multi_cols = pandas.MultiIndex.from_arrays([alleles, meth, scoreType], names=["Allele", "Method", "ScoreType"])
df = pandas.DataFrame(float(0),index= | pandas.Index(peps) | pandas.Index |
# Diffusion Maps Framework implementation as part of MSc Data Science Project of student
# <NAME> at University of Southampton, MSc Data Science course
# Script 3: Principal Component Analysis
import os, math
import string
import openpyxl
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn import preprocessing
from sklearn.decomposition import PCA
import datetime
matplotlib.style.use('ggplot')
matplotlib.rcParams['legend.scatterpoints'] = 1
datasource = './data/normalised/sqrtData.xlsx'
dtsource = './data/datetimes.xlsx'
def main():
# Read data, get names of the 6 sheets, for different types of bacteria
xlData = pd.ExcelFile(datasource)
sheetNames = xlData.sheet_names
dtExcel = pd.ExcelFile(dtsource)
# writer = pd.ExcelWriter('./data/pcaData.xlsx')
no = 321
fig = plt.figure()
for bactName in sheetNames:
# no += licycle.next()
worksheet = xlData.parse(bactName)
# Read time-date from file
dtDf = dtExcel.parse(bactName)
dt = pd.DataFrame(dtDf).as_matrix()
# Keep only the actual timeseries data, last 30 columns, and transpose
X = | pd.DataFrame(worksheet.ix[:,:29]) | pandas.DataFrame |
## GitHub: dark-teal-coder
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
from fpdf import FPDF
import datetime
import string
import os
## Get datetime information
current_datetime = datetime.datetime.now()
current_year = current_datetime.year
## Get the running script path
script_path = os.path.dirname(os.path.abspath(__file__))
## Get the current working directiory
cwd = os.path.abspath(os.getcwd())
# print(script_path, cwd)
def read_noc(noc_filepath):
"""This function reads a data file containing a table of National Occupational Classification (NOC) codes related to computer
science and information technology jobs and returns the data in DataFrame format."""
try:
## Use Pandas to read in csv file
## Python parsing engine for RegEx delimiters
df_noc = pd.read_csv(noc_filepath, sep=', ', header=0, engine='python')
except FileNotFoundError:
print(f"The following file cannot be found:", noc_filepath, sep='\n')
except:
print("An unknown error occurs while reading in the following file causing the program to exit prematurely:", noc_filepath,
sep='\n')
else:
## Unify the headers
df_noc.columns = df_noc.columns.str.lower()
## Trim leading and ending spaces in the headers
## Ref.: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rename.html
## (inplace=True) means not to return a new DataFrame
df_noc.rename(columns=lambda x: x.strip(), inplace=True)
# print(df_noc)
return df_noc
def get_page(url_code, c):
"""This function scrapes wage data of 10 tech occupations classified by NOC from Job Bank and returns the data list."""
url_base = "https://www.jobbank.gc.ca/wagereport/occupation/"
## Add URL code to the end of the base URL to go to different wage report pages on Job Bank
url = url_base + str(url_code[c])
html_response = requests.get(url)
## The .content attribute holds raw bytes, which can be decoded better than the .text attribute.
html_doc = BeautifulSoup(html_response.content, 'html.parser')
# print(html_doc)
data_list = []
# wage_table = html_doc.find(id="wage-occ-report")
# print(wage_table)
nation_wages = html_doc.find("tr", class_="areaGroup national")
data_list.append(nation_wages.text.strip().split())
province_wages = html_doc.find_all("tr", class_="areaGroup province prov")
for prov_wage in province_wages:
data_list.append(prov_wage.text.strip().rsplit(maxsplit=3))
# print([row for row in data_list])
return data_list
def write_excel(filepath_in, df_noc, url_code):
writer = pd.ExcelWriter(filepath_in, engine='xlsxwriter')
headers_nation = ['NOC', 'Occupation', 'URL Code', 'Low', 'Mid', 'High']
headers_province = ['Province', 'Low', 'Mid', 'High']
## Each iteration will scrape a webpage and change the data for 1 NOC into a DataFrame
df_tech_wages_ca = pd.DataFrame()
df_tech_wages_prov = pd.DataFrame()
for i in range(len(url_code)):
noc = f"NOC{df_noc.loc[i, 'noc']}"
data_list = get_page(url_code, i)
# print(df_noc.loc[i])
df_wage_table = pd.DataFrame(data_list, columns =['area', 'low', 'mid', 'high'])
# df_wage_table = pd.to_numeric(df_wage_table, errors='coerce')
# df_wage_table = df_wage_table.astype({'low': 'float64', 'mid': 'float64', 'high': 'float64'}, errors='ignore')
print(df_wage_table)
## Get the national wage data from the 1st row of each DataFrame
df_can_wage = df_wage_table.iloc[[0]]
df_career = df_noc.iloc[[i]].reset_index(drop=True)
df_can_wage_career = pd.concat([df_career, df_can_wage], axis=1)
df_tech_wages_ca = df_tech_wages_ca.append(df_can_wage_career, ignore_index=True)
## Drop the 1st row containing national wage data
df_wage_table = df_wage_table.drop(0)
df_wage_table.to_excel(writer, sheet_name=noc, header=headers_province, index=False)
df_wage_table['high'] = | pd.to_numeric(df_wage_table['high'], errors='coerce') | pandas.to_numeric |
'''
pyjade
A program to export, curate, and transform data from the MySQL database used by the Jane Addams Digital Edition.
'''
import os
import re
import sys
import json
import string
import datetime
import mysql.connector
from diskcache import Cache
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
from safeprint import print
'''
Options
'''
try: # Options file setup credit <NAME>
with open(os.path.join('options.json')) as env_file:
ENV = json.loads(env_file.read())
except:
print('"Options.json" not found; please add "options.json" to the current directory.')
'''
SQL Connection
'''
DB = mysql.connector.connect(
host=ENV['SQL']['HOST'],
user=ENV['SQL']['USER'],
passwd=ENV['SQL']['PASSWORD'],
database=ENV['SQL']['DATABASE']
)
CUR = DB.cursor(buffered=True)
'''
Setup
'''
BEGIN = datetime.datetime.now()
TS = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ITEM_ELEMENTS = ENV['ELEMENT_DICTIONARY']['DCTERMS_IN_USE']
ITEM_ELEMENTS.update(ENV['ELEMENT_DICTIONARY']['DESC_JADE_ELEMENTS'])
TYPES = ENV['ELEMENT_DICTIONARY']['TYPES']
OUT_DIR = 'outputs/'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
DATASET_OPTIONS = ENV['DATASET_OPTIONS']
CRUMBS = DATASET_OPTIONS['EXPORT_SEPARATE_SQL_CRUMBS']
PROP_SET_LIST = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
INCLUDE_PROPS = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
class Dataset():
def __init__(self):
'''
Start building the dataset objects by pulling IDs and types from omek_items
'''
statement = '''
SELECT omek_items.id as item_id, omek_item_types.`name` as 'jade_type', collection_id as 'jade_collection' FROM omek_items
JOIN omek_item_types on omek_items.item_type_id = omek_item_types.id
WHERE public = 1
ORDER BY item_id;
'''
self.omek_items = pd.read_sql(statement,DB)
self.omek_items = self.omek_items.set_index('item_id',drop=False)
self.objects = self.omek_items.copy()
self.objects['item_id'] = self.objects['item_id'].apply(
lambda x: self.convert_to_jade_id(x))
self.objects.rename(columns={'item_id': 'jade_id'},inplace=True)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects = self.objects[self.objects['jade_type'].isin(
['Text','Event','Person','Organization','Publication']
)]
# Noise is an alternate dataset to record property values that dont fit the regular usage
self.noise = self.objects.copy()
self.noise.drop('jade_type',axis=1)
self.noise.drop('jade_collection',axis=1)
def ingest(self,limit=None):
'''
Get the item element texts
'''
statement = f'''
SELECT et.id AS id, et.record_id AS record_id,
et.element_id AS element_id, et.`text` AS el_text,
items.item_type_id AS item_type
FROM omek_element_texts as et
JOIN omek_items AS items ON et.record_id = items.id
WHERE record_type = "Item"
ORDER BY id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.element_texts = pd.read_sql(statement,DB)
# Load environment variables
ELEMENT_IDS = list(ITEM_ELEMENTS.keys())
# Set data structure:
data = {}
noise = {}
# Iterate through the element_texts
iter = tqdm(self.element_texts.iterrows())
iter.set_description("Ingesting item attributes")
for tup in iter:
row = tup[1]
element_id = str(row.loc['element_id'])
if row.loc['record_id'] in self.omek_items.index.values:
jade_type = self.omek_items.loc[row.loc['record_id'],'jade_type']
jade_id = self.convert_to_jade_id(row.loc['record_id'])
# Filter element texts through environment variables
if element_id in ELEMENT_IDS:
if jade_type in TYPES.values():
element_label = ITEM_ELEMENTS[element_id]
# Filters property values through the sets designated in the options
if element_label in INCLUDE_PROPS[jade_type]:
compile_json(data,jade_id,element_label,row.loc['el_text'])
else:
compile_json(noise,jade_id,element_label,row.loc['el_text'])
# if CRUMBS:
# print('Excluded',element_label,'in type',jade_type)
# Add accumulated data to DataFrame
new_df = pd.DataFrame.from_dict(data,orient='index')
new_noise_df = pd.DataFrame.from_dict(noise,orient='index')
self.objects = pd.concat([self.objects,new_df],axis=1)
self.noise = pd.concat([self.noise,new_noise_df],axis=1)
# Add URLs
base_url = "https://digital.janeaddams.ramapo.edu/items/show/"
self.objects.insert(loc=1,column='jade_url',value=[
base_url+id.split('_')[-1] for id in self.objects.index.values
])
self.add_collections(limit)
self.add_tags(limit)
# Remove records with no title fields found
self.objects = self.objects.dropna(subset=['dcterms_title'])
def convert_to_jade_id(self,item_id):
'''
Prepend the type string to the SQL primary key so that locations and items are unique in the same set of relations
'''
if type(item_id) != type(str):
if item_id in self.omek_items.index.values:
the_type = self.omek_items.at[item_id,"jade_type"]
if the_type in list(TYPES.values()):
return the_type.lower()+"_"+str(item_id)
else:
return "unspecified_"+str(item_id)
else:
return "unpublished_"+str(item_id)
else:
return item_id
def add_tags(self,limit):
'''
Pull tags from the database
'''
statement = f'''
SELECT * FROM omek_records_tags
JOIN omek_tags on omek_records_tags.tag_id = omek_tags.id;
'''
self.tag_df = | pd.read_sql(statement,DB) | pandas.read_sql |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
| tm.assert_equal(result, expected) | pandas._testing.assert_equal |
# common.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""Common variables and functions used across flowsa"""
import shutil
import os
import yaml
import pandas as pd
import numpy as np
from dotenv import load_dotenv
from esupy.processed_data_mgmt import create_paths_if_missing
import flowsa.flowsa_yaml as flowsa_yaml
import flowsa.exceptions
from flowsa.schema import flow_by_activity_fields, flow_by_sector_fields, \
flow_by_sector_collapsed_fields, flow_by_activity_mapped_fields, \
flow_by_activity_wsec_fields, flow_by_activity_mapped_wsec_fields, \
activity_fields
from flowsa.settings import datapath, MODULEPATH, logoutputpath, \
sourceconfigpath, log, flowbysectormethodpath
# Sets default Sector Source Name
SECTOR_SOURCE_NAME = 'NAICS_2012_Code'
flow_types = ['ELEMENTARY_FLOW', 'TECHNOSPHERE_FLOW', 'WASTE_FLOW']
sector_level_key = {"NAICS_2": 2,
"NAICS_3": 3,
"NAICS_4": 4,
"NAICS_5": 5,
"NAICS_6": 6}
# withdrawn keyword changed to "none" over "W"
# because unable to run calculation functions with text string
WITHDRAWN_KEYWORD = np.nan
def load_api_key(api_source):
"""
Loads an API Key from "API_Keys.env" file using the
'api_name' defined in the FBA source config file. The '.env' file contains
the users personal API keys. The user must register with this
API and get the key and manually add to "API_Keys.env"
See wiki for how to get an api:
https://github.com/USEPA/flowsa/wiki/Using-FLOWSA#api-keys
:param api_source: str, name of source, like 'BEA' or 'Census'
:return: the users API key as a string
"""
load_dotenv(f'{MODULEPATH}API_Keys.env', verbose=True)
key = os.getenv(api_source)
if key is None:
raise flowsa.exceptions.APIError(api_source=api_source)
return key
def load_crosswalk(crosswalk_name):
"""
Load NAICS crosswalk between the years 2007, 2012, 2017
:return: df, NAICS crosswalk over the years
"""
cw_dict = {'sector_timeseries': 'NAICS_Crosswalk_TimeSeries',
'sector_length': 'NAICS_2012_Crosswalk',
'sector_name': 'NAICS_2012_Names',
'household': 'Household_SectorCodes',
'government': 'Government_SectorCodes',
'BEA': 'NAICS_to_BEA_Crosswalk'
}
fn = cw_dict.get(crosswalk_name)
cw = pd.read_csv(f'{datapath}{fn}.csv', dtype="str")
return cw
def load_sector_length_cw_melt():
cw_load = load_crosswalk('sector_length')
cw_melt = cw_load.melt(var_name="SectorLength", value_name='Sector'
).drop_duplicates().reset_index(drop=True)
cw_melt = cw_melt.dropna().reset_index(drop=True)
cw_melt['SectorLength'] = cw_melt['SectorLength'].str.replace(
'NAICS_', "")
cw_melt['SectorLength'] = pd.to_numeric(cw_melt['SectorLength'])
cw_melt = cw_melt[['Sector', 'SectorLength']]
return cw_melt
def return_bea_codes_used_as_naics():
"""
:return: list of BEA codes used as NAICS
"""
cw_list = []
for cw in ['household', 'government']:
df = load_crosswalk(cw)
cw_list.append(df)
# concat data into single dataframe
cw = pd.concat(cw_list, sort=False)
code_list = cw['Code'].drop_duplicates().values.tolist()
return code_list
def load_yaml_dict(filename, flowbytype=None, filepath=None):
"""
Load the information in a yaml file, from source_catalog, or FBA,
or FBS files
:return: dictionary containing all information in yaml
"""
if filename == 'source_catalog':
folder = datapath
else:
# first check if a filepath for the yaml is specified, as is the
# case with FBS method files located outside FLOWSA
if filepath is not None:
log.info(f'Loading {filename} from {filepath}')
folder = filepath
else:
if flowbytype == 'FBA':
folder = sourceconfigpath
elif flowbytype == 'FBS':
folder = flowbysectormethodpath
else:
raise KeyError('Must specify either \'FBA\' or \'FBS\'')
yaml_path = folder + filename + '.yaml'
try:
with open(yaml_path, 'r') as f:
config = flowsa_yaml.load(f, filepath)
except FileNotFoundError:
raise flowsa.exceptions.FlowsaMethodNotFoundError(
method_type=flowbytype, method=filename)
return config
def load_values_from_literature_citations_config():
"""
Load the config file that contains information on where the
values from the literature come from
:return: dictionary of the values from the literature information
"""
sfile = (f'{datapath}bibliographyinfo/'
f'values_from_literature_source_citations.yaml')
with open(sfile, 'r') as f:
config = yaml.safe_load(f)
return config
def load_fbs_methods_additional_fbas_config():
"""
Load the config file that contains information on where the
values from the literature come from
:return: dictionary of the values from the literature information
"""
sfile = f'{datapath}bibliographyinfo/fbs_methods_additional_fbas.yaml'
with open(sfile, 'r') as f:
config = yaml.safe_load(f)
return config
def load_functions_loading_fbas_config():
"""
Load the config file that contains information on where the
values from the literature come from
:return: dictionary of the values from the literature information
"""
sfile = datapath + 'bibliographyinfo/functions_loading_fbas.yaml'
with open(sfile, 'r') as f:
config = yaml.safe_load(f)
return config
def create_fill_na_dict(flow_by_fields):
"""
Dictionary for how to fill nan in different column types
:param flow_by_fields: list of columns
:return: dictionary for how to fill missing values by dtype
"""
fill_na_dict = {}
for k, v in flow_by_fields.items():
if v[0]['dtype'] == 'str':
fill_na_dict[k] = ""
elif v[0]['dtype'] == 'int':
fill_na_dict[k] = 0
elif v[0]['dtype'] == 'float':
fill_na_dict[k] = 0
return fill_na_dict
def get_flow_by_groupby_cols(flow_by_fields):
"""
Return groupby columns for a type of dataframe
:param flow_by_fields: dictionary
:return: list, column names
"""
groupby_cols = []
for k, v in flow_by_fields.items():
if v[0]['dtype'] == 'str':
groupby_cols.append(k)
elif v[0]['dtype'] == 'int':
groupby_cols.append(k)
if flow_by_fields == flow_by_activity_fields:
# Do not use description for grouping
groupby_cols.remove('Description')
return groupby_cols
fba_activity_fields = [activity_fields['ProducedBy'][0]['flowbyactivity'],
activity_fields['ConsumedBy'][0]['flowbyactivity']]
fbs_activity_fields = [activity_fields['ProducedBy'][1]['flowbysector'],
activity_fields['ConsumedBy'][1]['flowbysector']]
fba_fill_na_dict = create_fill_na_dict(flow_by_activity_fields)
fbs_fill_na_dict = create_fill_na_dict(flow_by_sector_fields)
fbs_collapsed_fill_na_dict = create_fill_na_dict(
flow_by_sector_collapsed_fields)
fba_default_grouping_fields = get_flow_by_groupby_cols(
flow_by_activity_fields)
fba_mapped_default_grouping_fields = get_flow_by_groupby_cols(
flow_by_activity_mapped_fields)
fba_mapped_wsec_default_grouping_fields = get_flow_by_groupby_cols(
flow_by_activity_mapped_wsec_fields)
fbs_default_grouping_fields = get_flow_by_groupby_cols(
flow_by_sector_fields)
fbs_grouping_fields_w_activities = (
fbs_default_grouping_fields + (['ActivityProducedBy',
'ActivityConsumedBy']))
fbs_collapsed_default_grouping_fields = get_flow_by_groupby_cols(
flow_by_sector_collapsed_fields)
fba_wsec_default_grouping_fields = get_flow_by_groupby_cols(
flow_by_activity_wsec_fields)
def clean_str_and_capitalize(s):
"""
Trim whitespace, modify string so first letter capitalized.
:param s: str
:return: str, formatted
"""
if s.__class__ == str:
s = s.strip()
s = s.lower()
s = s.capitalize()
return s
def capitalize_first_letter(string):
"""
Capitalize first letter of words
:param string: str
:return: str, modified
"""
return_string = ""
split_array = string.split(" ")
for s in split_array:
return_string = return_string + " " + s.capitalize()
return return_string.strip()
def get_flowsa_base_name(filedirectory, filename, extension):
"""
If filename does not match filename within flowsa due to added extensions
onto the filename, cycle through
name, dropping strings after each underscore until the name is found
:param filedirectory: string, path to directory
:param filename: string, name of original file searching for
:param extension: string, type of file, such as "yaml" or "py"
:return: string, corrected file path name
"""
# If a file does not exist, modify file name, dropping portion after last
# underscore. Repeat this process until the file name exists or no
# underscores are left.
while '_' in filename:
if os.path.exists(f"{filedirectory}{filename}.{extension}"):
break
filename, _ = filename.rsplit('_', 1)
return filename
def rename_log_file(filename, fb_meta):
"""
Rename the log file saved to local directory using df meta for df
:param filename: str, name of dataset
:param fb_meta: metadata for parquet
:return: modified log file name
"""
# original log file name - all log statements
log_file = f'{logoutputpath}{"flowsa.log"}'
# generate new log name
new_log_name = (f'{logoutputpath}{filename}_v'
f'{fb_meta.tool_version}'
f'{"_" + fb_meta.git_hash if fb_meta.git_hash else ""}'
f'.log')
# create log directory if missing
create_paths_if_missing(logoutputpath)
# rename the standard log file name (os.rename throws error if file
# already exists)
shutil.copy(log_file, new_log_name)
# original log file name - validation
log_file = f'{logoutputpath}{"validation_flowsa.log"}'
# generate new log name
new_log_name = (f'{logoutputpath}{filename}_v'
f'{fb_meta.tool_version}'
f'{"_" + fb_meta.git_hash if fb_meta.git_hash else ""}'
f'_validation.log')
# create log directory if missing
create_paths_if_missing(logoutputpath)
# rename the standard log file name (os.rename throws error if file
# already exists)
shutil.copy(log_file, new_log_name)
def return_true_source_catalog_name(sourcename):
"""
Drop any extensions on source name until find the name in source catalog
"""
while (load_yaml_dict('source_catalog').get(sourcename) is None) & (
'_' in sourcename):
sourcename = sourcename.rsplit("_", 1)[0]
return sourcename
def check_activities_sector_like(df_load, sourcename=None):
"""
Check if the activities in a df are sector-like,
if cannot find the sourcename in the source catalog, drop extensions on the
source name
:param df_load: df, df to determine if activities are sector-like
:param source: str, optionial, can identify sourcename to use
"""
# identify sourcename
if sourcename is not None:
s = sourcename
else:
if 'SourceName' in df_load.columns:
s = pd.unique(df_load['SourceName'])[0]
elif 'MetaSources' in df_load.columns:
s = | pd.unique(df_load['MetaSources']) | pandas.unique |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_empty_col(self):
# GH 13653
s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
class TestGenericArrayFormatter:
def test_1d_array(self):
# GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt.GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt.GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def test_2d_extension_type(self):
# GH 33770
# Define a stub extension type with just enough code to run Series.__repr__()
class DtypeStub(pd.api.extensions.ExtensionDtype):
@property
def type(self):
return np.ndarray
@property
def name(self):
return "DtypeStub"
class ExtTypeStub(pd.api.extensions.ExtensionArray):
def __len__(self):
return 2
def __getitem__(self, ix):
return [ix == 1, ix == 0]
@property
def dtype(self):
return DtypeStub()
series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_display_precision_trailing_zeroes(self):
# Issue #20359: trimming zeros while there is no decimal point
# Happens when display precision is set to zero
with option_context("display.precision", 0):
s = Series([840.0, 4200.0])
expected_output = "0 840\n1 4200\ndtype: float64"
assert str(s) == expected_output
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with option_context("display.precision", 6):
# DataFrame example from issue #9764
d = DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = DataFrame({"x": [12345.6789]})
assert str(df) == " x\n0 12345.6789"
df = DataFrame({"x": [2e6]})
assert str(df) == " x\n0 2000000.0"
df = DataFrame({"x": [12345.6789, 2e6]})
assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="sub_day")
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="long")
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1ns = pd.to_timedelta(1, unit="ns")
drepr = lambda x: x._repr_base(format="all")
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'0 days 00:00:00'"
assert result[1].strip() == "'0 days 00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'0 days 00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), NaT])
result = | fmt.Datetime64Formatter(x) | pandas.io.formats.format.Datetime64Formatter |
"""
Pull my Garmin sleep data via json requests.
This script was adapted from: https://github.com/kristjanr/my-quantified-sleep
The aforementioned code required the user to manually define
headers and cookies. It also stored all of the data within Night objects.
My modifications include using selenium to drive a Chrome browser. This avoids
the hassle of getting headers and cookies manually (the cookies would have to be updated
everytime the Garmin session expired). It also segments data requests because
Garmin will respond with an error if more than 32 days are requested at once. Lastly,
data is stored as a pandas dataframe and then written to a user-defined directory
as a pickle file.
Data is this processed and merged with older data from my Microsft smartwatch.
The merged data is also saved as pandas dataframes in pickle files.
Lastly, sunrise and sunset data is downloaded for all days in the sleep dataset.
This data is also archived as a pandas dataframe and saved as a pickle file.
The data update process hs been broken into steps so that progress can be passed
to the Dash app.
"""
# import base packages
import datetime, json, os, re, sys
from itertools import chain
from os.path import isfile
# import installed packages
import pytz, requests, chardet, brotli
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
# input variables
if os.name == "nt":
# running on my local Windows machine
ENV = "local"
else:
# running on heroku server
ENV = "heroku"
if ENV == "local":
proj_path = "C:/Users/adiad/Anaconda3/envs/SleepApp/sleep_app/" # read/write data dir
else:
proj_path = ""
GOOGLE_CHROME_PATH = '/app/.apt/usr/bin/google-chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
garmin_results_pkl_fn = "data/garmin_sleep_df.pkl" # name of pickle file to archive (combining new results with any previous Garmin) for easy updating and subsequent processing
garmin_results_json_fn = "data/new_garmin_sleep.json" # name of json file with only new raw results
garmin_results_csv_fn = "data/garmin_sleep_df.csv" # name of csv file to archive (combining new results with any previous)
all_descr_results_fn = "data/all_sleep_descr_df.pkl" # name of pickle file combining all Garmin & Microsift sleep session description data
all_event_results_fn = "data/all_sleep_event_df.pkl" # name of pickle file combining all Garmin & Microsoft event data
sun_pkl_fn = "data/sun_df.pkl" # name of pickel file to archive sunrise/sunset data
local_tz = "US/Eastern" # pytz local timezone for sunrise/sunset time conversion
sun_lat = 39.76838 # latitude where sunrise/sunset times are derived from
sun_lon = -86.15804 # longitude where sunrise/sunset times are derived from
run_browser_headless = False # will hide Firefox during execution if True
browser_action_timeout = 60 # max time (seconds) for browser wait operations
start_date = '2017-03-01' # first date to pull sleep data
end_date = str(datetime.date.today() - datetime.timedelta(days=1)) # last date to pull sleep data
user_name = "email address" # Garmin username
password = "password" # Garmin password
signin_url = "https://connect.garmin.com/signin/" # Garmin sign-in webpage
sleep_url_base = "https://connect.garmin.com/modern/sleep/" # Garmin sleep base URL (sans date)
sleep_url_json_req = "https://connect.garmin.com/modern/proxy/wellness-service/wellness/dailySleepsByDate"
def download(start_date, end_date, headers, session_id):
params = (
('startDate', start_date),
('endDate', end_date),
('_', session_id),
)
response = requests.get(sleep_url_json_req, headers=headers, params=params)
if response.status_code != 200:
print("RESPONSE ERROR RECEIVED:")
print('Status code: %d' % response.status_code)
response_dict = json.loads(response.content.decode('UTF-8'))
print('Content: %s' % response_dict["message"])
raise Exception
return response
def download_to_json(start_date, end_date, headers, session_id):
response = download(start_date, end_date, headers, session_id)
# most responses are in ascii (no encoding)
# sporadically a response will have brotli encoding
#print("The response is encoded with:", chardet.detect(response.content))
if chardet.detect(response.content)["encoding"] == 'ascii':
return json.loads(response.content)
else:
return brotli.decompress(response.content)
def converter(data, return_df=True):
# define functions which pass through None value because
# datetime functions don't accept value None
def sleep_timestamp(val):
if val is None:
return None
else:
return datetime.datetime.fromtimestamp(val / 1000, pytz.utc)
def sleep_timedelta(val):
if val is None:
return None
else:
return datetime.timedelta(seconds=val)
# initialize variables
if return_df:
nights = pd.DataFrame(columns=["Prev_Day", "Bed_Time", "Wake_Time",
"Awake_Dur", "Light_Dur", "Deep_Dur",
"Total_Dur", "Nap_Dur", "Window_Conf"])
i = 0
else:
nights = []
for d in data:
bed_time = sleep_timestamp(d['sleepStartTimestampGMT'])
wake_time = sleep_timestamp(d['sleepEndTimestampGMT'])
previous_day = datetime.date(*[int(datepart) for datepart in d['calendarDate'].split('-')]) - datetime.timedelta(days=1)
deep_duration = sleep_timedelta(d['deepSleepSeconds'])
light_duration = sleep_timedelta(d['lightSleepSeconds'])
total_duration = sleep_timedelta(d['sleepTimeSeconds'])
awake_duration = sleep_timedelta(d['awakeSleepSeconds'])
nap_duration = sleep_timedelta(d['napTimeSeconds'])
window_confirmed = d['sleepWindowConfirmed']
if return_df:
nights.loc[i] = [previous_day, bed_time, wake_time, awake_duration,
light_duration, deep_duration, total_duration,
nap_duration, window_confirmed]
i += 1
else:
night = Night(bed_time, wake_time, previous_day, deep_duration,
light_duration, total_duration, awake_duration)
nights.append(night, sort=True)
return nights
# this function returns a list of all dates in [date1, date2]
def daterange(date1, date2):
date_ls = [date1]
for n in range(int((date2 - date1).days)):
date_ls.append(date_ls[-1] + datetime.timedelta(days=1))
return date_ls
# steps to updating sleep data:
# Step 0: determine which dates are missing in the archived Garmin dataset,
# given the input start & end dates
# Step 1: Login to connect.garmin.com, get user setting credentials
# Step 2: Using credentials, download missing data from Garmin in json
# Step 3: process new Garmin data, merge it with archived data
# Step 4: download sunrise/sunset data for new dates and merge with archived data
def step0():
# make a list of all dates from first sleep date to last (fills any missing dates)
req_dates_ls = daterange(
datetime.datetime.strptime(start_date, "%Y-%m-%d").date(),
datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
)
# Look for previous results
if isfile(proj_path + garmin_results_pkl_fn):
nights_df = pd.read_pickle(proj_path + garmin_results_pkl_fn)
else:
nights_df = pd.DataFrame()
# if previous results were found, reduce requested dates to those not yet obtained
if len(nights_df) > 0:
# get list of requested dates not yet obtained
archive_dates_ls = list(nights_df["Prev_Day"])
new_req_dates_ls = np.setdiff1d(req_dates_ls, archive_dates_ls)
else:
new_req_dates_ls = req_dates_ls
#print("Archive max: ", max(archive_dates_ls))
#print("Request max: ", max(req_dates_ls))
if len(new_req_dates_ls) == 0:
msg = "Archived data is up to date, no new data is available"
else:
msg = "Current data was checked and " + str(len(new_req_dates_ls)) + " night(s) are needed"
return [msg, nights_df, new_req_dates_ls]
def step1():
opts = webdriver.ChromeOptions()
opts.add_argument('--disable-gpu')
opts.add_argument('--no-sandbox')
opts.add_argument('--disable-dev-shm-usage')
if ENV == "local":
if run_browser_headless:
opts.addArgument("headless")
assert opts.headless # Operating in headless mode
else:
opts.binary_location = GOOGLE_CHROME_PATH
# open firefox and goto Garmin's sign-in page
print("Opening Chrome browser")
driver = webdriver.Chrome(chrome_options=opts)
driver.get(signin_url)
# wait until sign-in fields are visible
wait = WebDriverWait(driver, browser_action_timeout)
wait.until(ec.frame_to_be_available_and_switch_to_it(("id","gauth-widget-frame-gauth-widget")))
wait.until(ec.presence_of_element_located(("id","username")))
# write login info to fields, then submit
print("Signing in to connect.garmin.com")
element = driver.find_element_by_id("username")
driver.implicitly_wait(5)
element.send_keys(user_name)
element = driver.find_element_by_id("password")
element.send_keys(password)
element.send_keys(Keys.RETURN)
wait.until(ec.url_changes(signin_url)) # wait until landing page is requested
driver.switch_to.default_content() # get out of iframe
# get dummy webpage to obtain all request headers
print("Loading dummy page to obtain headers")
driver.get(sleep_url_base + start_date)
request = driver.wait_for_request(sleep_url_base + start_date,
timeout=browser_action_timeout)
if (request.response.status_code != 200) | (~ hasattr(request, "headers")):
print("RESPONSE ERROR RECEIVED:")
if (request.response.status_code != 200):
print("Status code: %d" % request.response.status_code)
#response_dict = json.loads(request.content.decode('UTF-8'))
print("Reason: ", request.response.reason)
if (~ hasattr(request, "headers")):
print("Request did not have 'headers' attribute")
print("Request attributes: ", dir(request))
print("Request headers: ", request.headers)
#raise Exception
# close the Firefox browser
driver.close()
msg = "Logged in to connect.garmin.com"
return [msg, request]
def step2(request, new_req_dates_ls):
# transfer request headers
headers = {
"cookie": request.headers["Cookie"],
"referer": sleep_url_base + start_date,
"accept-encoding": request.headers["Accept-Encoding"],
"accept-language": "en-US", # request.headers["Accept-Language"],
"user-agent": request.headers["User-Agent"],
#"nk": "NT",
"accept": request.headers["Accept"],
"authority": request.headers["Host"],
#"x-app-ver": "4.25.3.0",
"upgrade-insecure-requests": request.headers["Upgrade-Insecure-Requests"]
}
# get the session id from the headers
re_session_id = re.compile("(?<=\$ses_id:)(\d+)")
session_id = re_session_id.search(str(request.headers)).group(0)
# Garmin will throw error if request time span exceeds 32 days
# therefore, request 32 days at a time
max_period_delta = datetime.timedelta(days=31)
data = [] # list of jsons, one per time period
get_dates_ls = new_req_dates_ls
while len(get_dates_ls) > 0:
period_start = min(get_dates_ls)
if (max(get_dates_ls) - period_start) > (max_period_delta - datetime.timedelta(days=1)):
period_end = period_start + max_period_delta
else:
period_end = max(get_dates_ls)
# note, this may request some dates which were already obtained
# since a contiguous period is being requested rather than 32 new dates
# duplicated dates will be dropped later
print("Getting data for period: [%s, %s]" % (period_start, period_end))
data.append(download_to_json(period_start, period_end, headers, session_id))
# trim dates list
get_dates_ls = [d for d, s in zip(get_dates_ls, np.array(get_dates_ls) > period_end) if s]
# combine list of jsons into one large json
data = list(chain.from_iterable(data))
# save raw Garmin json to project folder
with open(proj_path + garmin_results_json_fn, 'w') as fp:
json.dump(data, fp)
msg = "Data has been downloaded from Garmin"
return [msg, data]
def step3(nights_df, data, new_req_dates_ls):
# clean the new garmin data
new_nights_df = converter(data)
new_nights_df["Prev_Day"] = pd.to_datetime(new_nights_df["Prev_Day"])
if pd.to_datetime(new_nights_df["Bed_Time"]).dt.tz is None:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_convert(local_tz)
if pd.to_datetime(new_nights_df["Wake_Time"]).dt.tz is None:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_convert(local_tz)
new_nights_df["Light_Dur"] = pd.to_timedelta(new_nights_df["Light_Dur"], "days")
new_nights_df["Deep_Dur"] = pd.to_timedelta(new_nights_df["Deep_Dur"], "days")
new_nights_df["Total_Dur"] = pd.to_timedelta(new_nights_df["Total_Dur"], "days")
new_nights_df["Nap_Dur"] = pd.to_timedelta(new_nights_df["Nap_Dur"], "days")
# fill df with missing dates so that subsequent updates won't keep
# requesting data which Garmin doesn't have
new_missing_dates_ls = np.setdiff1d(new_req_dates_ls, new_nights_df["Prev_Day"].dt.date)
new_missing_row = [pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, np.NAN]
for d in new_missing_dates_ls:
new_nights_df.loc[len(new_nights_df)] = [d] + new_missing_row
# drop any nights which were already in the archived pickle file,
# then merge it with archived data
if len(nights_df) > 0:
new_nights_df = new_nights_df[~new_nights_df["Prev_Day"].isin(nights_df["Prev_Day"])]
nights_df = nights_df.append(new_nights_df, sort=True).sort_values("Prev_Day", axis=0)
else:
nights_df = new_nights_df.sort_values("Prev_Day", axis=0)
# trim most recent nights which have NaT durations because they were likely caused
# by the smartwatch not yet having synced with Garmin for those dates
unknown_nights_ls = []
i = 1
while pd.isnull(nights_df.Total_Dur.iloc[-i]) & (len(nights_df) >= i):
unknown_nights_ls.append(nights_df.Prev_Day.iloc[-i])
i += 1
nights_df = nights_df[~nights_df["Prev_Day"].isin(unknown_nights_ls)]
# save merged results
#nights_df.to_csv(proj_path + garmin_results_csv_fn)
nights_df.to_pickle(proj_path + garmin_results_pkl_fn)
# clean garmin data for dashboard
garmin_df = nights_df.drop(["Nap_Dur", "Window_Conf"], axis=1)
# calculate time of day in decimal hours of each event (asleep & wake)
garmin_df["Bed_ToD"] = garmin_df["Bed_Time"].dt.hour + garmin_df["Bed_Time"].dt.minute/60
garmin_df["Bed_ToD"] -= 24*(garmin_df["Bed_ToD"] > 12) # make PM bed times negative
garmin_df["Wake_ToD"] = garmin_df["Wake_Time"].dt.hour + garmin_df["Wake_Time"].dt.minute/60
# read & wrangle old microsoft sleep data
ms2015_df = pd.read_csv(proj_path + "data/Activity_Summary_20150101_20151231.csv")
ms2016_df = pd.read_csv(proj_path + "data/Activity_Summary_20160101_20161231.csv")
ms2017_df = pd.read_csv(proj_path + "data/Activity_Summary_20170101_20171231.csv")
ms_df = ms2015_df.append(ms2016_df).append(ms2017_df, sort=True). \
query("Event_Type == 'Sleep'")
ms2_df = pd.DataFrame()
# create microsoft dataframe which mimics the garmin dataframe
ms2_df["Prev_Day"] = pd.to_datetime(ms_df["Date"])
ms2_df["Bed_Time"] = pd.to_datetime(ms_df["Start_Time"]). \
dt.tz_localize("US/Eastern", ambiguous="NaT")
for i_row in range(len(ms2_df)-1):
# fell asleep after midnght, adjust Prev_Day back 1 day
if ms2_df.iloc[i_row, 1].hour < 12:
ms2_df.iloc[i_row, 0] -= datetime.timedelta(days=1)
ms2_df["Wake_Time"] = pd.to_datetime(ms_df["Wake_Up_Time"]). \
dt.tz_localize("US/Eastern", ambiguous="NaT")
ms2_df["Light_Dur"] = pd.to_timedelta(ms_df["Seconds_Asleep_Light"], "seconds")
ms2_df["Deep_Dur"] = pd.to_timedelta(ms_df["Seconds_Asleep_Restful"], "seconds")
ms2_df["Total_Dur"] = pd.to_timedelta(ms_df["Seconds_Awake"], "seconds") \
+ ms2_df["Light_Dur"] + ms2_df["Deep_Dur"]
ms2_df["Bed_ToD"] = ms2_df["Bed_Time"].dt.hour \
+ ms2_df["Bed_Time"].dt.minute/60
ms2_df["Bed_ToD"] -= 24*(ms2_df["Bed_ToD"] > 12) # make PM bed times negative
ms2_df["Wake_ToD"] = ms2_df["Wake_Time"].dt.hour \
+ ms2_df["Wake_Time"].dt.minute/60
brief_sleep_bool = ms2_df["Total_Dur"] < pd.Timedelta(4, unit="h")
daytime_asleep_bool = (ms2_df["Bed_ToD"] > -3) | (ms2_df["Bed_ToD"] < 7)
unknown_dur_bool = | pd.isnull(ms2_df["Total_Dur"]) | pandas.isnull |
"""PyStan utility functions
These functions validate and organize data passed to and from the
classes and functions defined in the file `stan_fit.hpp` and wrapped
by the Cython file `stan_fit.pxd`.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2015, PyStan developers
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
#-----------------------------------------------------------------------------
# REF: rstan/rstan/R/misc.R
from __future__ import unicode_literals, division
from pystan._compat import PY2, string_types
from collections import OrderedDict
if PY2:
from collections import Callable, Iterable, Sequence
else:
from collections.abc import Callable, Iterable, Sequence
import inspect
import io
import itertools
import logging
import math
from numbers import Number
import os
import random
import re
import sys
import shutil
import tempfile
import time
import numpy as np
try:
from scipy.stats.mstats import mquantiles
except ImportError:
from pystan.external.scipy.mstats import mquantiles
import pystan.chains
import pystan._misc
from pystan.constants import (MAX_UINT, sampling_algo_t, optim_algo_t,
variational_algo_t, sampling_metric_t, stan_args_method_t)
logger = logging.getLogger('pystan')
def stansummary(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2):
"""
Summary statistic table.
Parameters
----------
fit : StanFit4Model object
pars : str or sequence of str, optional
Parameter names. By default use all parameters
probs : sequence of float, optional
Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975)
digits_summary : int, optional
Number of significant digits. By default, 2
Returns
-------
summary : string
Table includes mean, se_mean, sd, probs_0, ..., probs_n, n_eff and Rhat.
Examples
--------
>>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
>>> m = StanModel(model_code=model_code, model_name="example_model")
>>> fit = m.sampling()
>>> print(stansummary(fit))
Inference for Stan model: example_model.
4 chains, each with iter=2000; warmup=1000; thin=1;
post-warmup draws per chain=1000, total post-warmup draws=4000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
y 0.01 0.03 1.0 -2.01 -0.68 0.02 0.72 1.97 1330 1.0
lp__ -0.5 0.02 0.68 -2.44 -0.66 -0.24 -0.05-5.5e-4 1555 1.0
Samples were drawn using NUTS at Thu Aug 17 00:52:25 2017.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
"""
if fit.mode == 1:
return "Stan model '{}' is of mode 'test_grad';\n"\
"sampling is not conducted.".format(fit.model_name)
elif fit.mode == 2:
return "Stan model '{}' does not contain samples.".format(fit.model_name)
n_kept = [s - w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
header = "Inference for Stan model: {}.\n".format(fit.model_name)
header += "{} chains, each with iter={}; warmup={}; thin={}; \n"
header = header.format(fit.sim['chains'], fit.sim['iter'], fit.sim['warmup'],
fit.sim['thin'], sum(n_kept))
header += "post-warmup draws per chain={}, total post-warmup draws={}.\n\n"
header = header.format(n_kept[0], sum(n_kept))
footer = "\n\nSamples were drawn using {} at {}.\n"\
"For each parameter, n_eff is a crude measure of effective sample size,\n"\
"and Rhat is the potential scale reduction factor on split chains (at \n"\
"convergence, Rhat=1)."
sampler = fit.sim['samples'][0]['args']['sampler_t']
date = fit.date.strftime('%c') # %c is locale's representation
footer = footer.format(sampler, date)
s = _summary(fit, pars, probs)
body = _array_to_table(s['summary'], s['summary_rownames'],
s['summary_colnames'], digits_summary)
return header + body + footer
def _print_stanfit(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2):
# warning added in PyStan 2.17.0
logger.warning('Function `_print_stanfit` is deprecated and will be removed in a future version. '\
'Use `stansummary` instead.', DeprecationWarning)
return stansummary(fit, pars=pars, probs=probs, digits_summary=digits_summary)
def _array_to_table(arr, rownames, colnames, n_digits):
"""Print an array with row and column names
Example:
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
beta[1,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1
beta[1,2] 0.0 0.0 1.0 -2.1 -0.7 0.0 0.7 2.0 4000 1
beta[2,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1
beta[2,2] 0.0 0.0 1.0 -1.9 -0.6 0.0 0.7 2.0 4000 1
lp__ -4.2 0.1 2.1 -9.4 -5.4 -3.8 -2.7 -1.2 317 1
"""
assert arr.shape == (len(rownames), len(colnames))
rownames_maxwidth = max(len(n) for n in rownames)
max_col_width = 7
min_col_width = 5
max_col_header_num_width = [max(max_col_width, max(len(n) + 1, min_col_width)) for n in colnames]
rows = []
for row in arr:
row_nums = []
for j, (num, width) in enumerate(zip(row, max_col_header_num_width)):
if colnames[j] == "n_eff":
num = int(round(num, 0)) if not np.isnan(num) else num
num = _format_number(num, n_digits, max_col_width - 1)
row_nums.append(num)
if len(num) + 1 > max_col_header_num_width[j]:
max_col_header_num_width[j] = len(num) + 1
rows.append(row_nums)
widths = [rownames_maxwidth] + max_col_header_num_width
header = '{:>{width}}'.format('', width=widths[0])
for name, width in zip(colnames, widths[1:]):
header += '{name:>{width}}'.format(name=name, width=width)
lines = [header]
for rowname, row in zip(rownames, rows):
line = '{name:{width}}'.format(name=rowname, width=widths[0])
for j, (num, width) in enumerate(zip(row, widths[1:])):
line += '{num:>{width}}'.format(num=num, width=width)
lines.append(line)
return '\n'.join(lines)
def _number_width(n):
"""Calculate the width in characters required to print a number
For example, -1024 takes 5 characters. -0.034 takes 6 characters.
"""
return len(str(n))
def _format_number_si(num, n_signif_figures):
"""Format a number using scientific notation to given significant figures"""
if math.isnan(num) or math.isinf(num):
return str(num)
leading, exp = '{:E}'.format(num).split('E')
leading = round(float(leading), n_signif_figures - 1)
exp = exp[:1] + exp[2:] if exp[1] == '0' else exp
formatted = '{}e{}'.format(leading, exp.lstrip('+'))
return formatted
def _format_number(num, n_signif_figures, max_width):
"""Format a number as a string while obeying space constraints.
`n_signif_figures` is the minimum number of significant figures expressed
`max_width` is the maximum width in characters allowed
"""
if max_width < 6:
raise NotImplementedError("Guaranteed formatting in fewer than 6 characters not supported.")
if math.isnan(num) or math.isinf(num):
return str(num)
# add 0.5 to prevent log(0) errors; only affects n_digits calculation for num > 0
n_digits = lambda num: math.floor(math.log10(abs(num) + 0.5)) + 1
if abs(num) > 10**-n_signif_figures and n_digits(num) <= max_width - n_signif_figures:
return str(round(num, n_signif_figures))[:max_width].rstrip('.')
elif _number_width(num) <= max_width:
if n_digits(num) >= n_signif_figures:
# the int() is necessary for consistency between Python 2 and 3
return str(int(round(num)))
else:
return str(num)
else:
return _format_number_si(num, n_signif_figures)
def _summary(fit, pars=None, probs=None, **kwargs):
"""Summarize samples (compute mean, SD, quantiles) in all chains.
REF: stanfit-class.R summary method
Parameters
----------
fit : StanFit4Model object
pars : str or sequence of str, optional
Parameter names. By default use all parameters
probs : sequence of float, optional
Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975)
Returns
-------
summaries : OrderedDict of array
Array indexed by 'summary' has dimensions (num_params, num_statistics).
Parameters are unraveled in *row-major order*. Statistics include: mean,
se_mean, sd, probs_0, ..., probs_n, n_eff, and Rhat. Array indexed by
'c_summary' breaks down the statistics by chain and has dimensions
(num_params, num_statistics_c_summary, num_chains). Statistics for
`c_summary` are the same as for `summary` with the exception that
se_mean, n_eff, and Rhat are absent. Row names and column names are
also included in the OrderedDict.
"""
if fit.mode == 1:
msg = "Stan model {} is of mode 'test_grad'; sampling is not conducted."
msg = msg.format(fit.model_name)
raise ValueError(msg)
elif fit.mode == 2:
msg = "Stan model {} contains no samples.".format(fit.model_name)
raise ValueError(msg)
if fit.sim['n_save'] == fit.sim['warmup2']:
msg = "Stan model {} contains no samples.".format(fit.model_name)
raise ValueError(msg)
# rstan checks for cached summaries here
if pars is None:
pars = fit.sim['pars_oi']
elif isinstance(pars, string_types):
pars = [pars]
pars = _remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi'])
if probs is None:
probs = (0.025, 0.25, 0.5, 0.75, 0.975)
ss = _summary_sim(fit.sim, pars, probs)
# TODO: include sem, ess and rhat: ss['ess'], ss['rhat']
s1 = np.column_stack([ss['msd'][:, 0], ss['sem'], ss['msd'][:, 1], ss['quan'], ss['ess'], ss['rhat']])
s1_rownames = ss['c_msd_names']['parameters']
s1_colnames = ((ss['c_msd_names']['stats'][0],) + ('se_mean',) +
(ss['c_msd_names']['stats'][1],) + ss['c_quan_names']['stats'] +
('n_eff', 'Rhat'))
s2 = _combine_msd_quan(ss['c_msd'], ss['c_quan'])
s2_rownames = ss['c_msd_names']['parameters']
s2_colnames = ss['c_msd_names']['stats'] + ss['c_quan_names']['stats']
return OrderedDict(summary=s1, c_summary=s2,
summary_rownames=s1_rownames,
summary_colnames=s1_colnames,
c_summary_rownames=s2_rownames,
c_summary_colnames=s2_colnames)
def _combine_msd_quan(msd, quan):
"""Combine msd and quantiles in chain summary
Parameters
----------
msd : array of shape (num_params, 2, num_chains)
mean and sd for chains
cquan : array of shape (num_params, num_quan, num_chains)
quantiles for chains
Returns
-------
msdquan : array of shape (num_params, 2 + num_quan, num_chains)
"""
dim1 = msd.shape
n_par, _, n_chains = dim1
ll = []
for i in range(n_chains):
a1 = msd[:, :, i]
a2 = quan[:, :, i]
ll.append(np.column_stack([a1, a2]))
msdquan = np.dstack(ll)
return msdquan
def _summary_sim(sim, pars, probs):
"""Summarize chains together and separately
REF: rstan/rstan/R/misc.R
Parameters are unraveled in *column-major order*.
Parameters
----------
sim : dict
dict from from a stanfit fit object, i.e., fit['sim']
pars : Iterable of str
parameter names
probs : Iterable of probs
desired quantiles
Returns
-------
summaries : OrderedDict of array
This dictionary contains the following arrays indexed by the keys
given below:
- 'msd' : array of shape (num_params, 2) with mean and sd
- 'sem' : array of length num_params with standard error for the mean
- 'c_msd' : array of shape (num_params, 2, num_chains)
- 'quan' : array of shape (num_params, num_quan)
- 'c_quan' : array of shape (num_params, num_quan, num_chains)
- 'ess' : array of shape (num_params, 1)
- 'rhat' : array of shape (num_params, 1)
Note
----
`_summary_sim` has the parameters in *column-major* order whereas `_summary`
gives them in *row-major* order. (This follows RStan.)
"""
# NOTE: this follows RStan rather closely. Some of the calculations here
probs_len = len(probs)
n_chains = len(sim['samples'])
# tidx is a dict with keys that are parameters and values that are their
# indices using column-major ordering
tidx = _pars_total_indexes(sim['pars_oi'], sim['dims_oi'], sim['fnames_oi'], pars)
tidx_colm = [tidx[par] for par in pars]
tidx_colm = list(itertools.chain(*tidx_colm)) # like R's unlist()
tidx_rowm = [tidx[par+'_rowmajor'] for par in pars]
tidx_rowm = list(itertools.chain(*tidx_rowm))
tidx_len = len(tidx_colm)
lmsdq = [_get_par_summary(sim, i, probs) for i in tidx_colm]
msd = np.row_stack([x['msd'] for x in lmsdq])
quan = np.row_stack([x['quan'] for x in lmsdq])
probs_str = tuple(["{:g}%".format(100*p) for p in probs])
msd = msd.reshape(tidx_len, 2, order='F')
quan = quan.reshape(tidx_len, probs_len, order='F')
c_msd = np.row_stack([x['c_msd'] for x in lmsdq])
c_quan = np.row_stack([x['c_quan'] for x in lmsdq])
c_msd = c_msd.reshape(tidx_len, 2, n_chains, order='F')
c_quan = c_quan.reshape(tidx_len, probs_len, n_chains, order='F')
sim_attr_args = sim.get('args', None)
if sim_attr_args is None:
cids = list(range(n_chains))
else:
cids = [x['chain_id'] for x in sim_attr_args]
c_msd_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm],
stats=("mean", "sd"),
chains=tuple("chain:{}".format(cid) for cid in cids))
c_quan_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm],
stats=probs_str,
chains=tuple("chain:{}".format(cid) for cid in cids))
ess_and_rhat = np.array([pystan.chains.ess_and_splitrhat(sim, n) for n in tidx_colm])
ess, rhat = [arr.ravel() for arr in np.hsplit(ess_and_rhat, 2)]
return dict(msd=msd, c_msd=c_msd, c_msd_names=c_msd_names, quan=quan,
c_quan=c_quan, c_quan_names=c_quan_names,
sem=msd[:, 1] / np.sqrt(ess), ess=ess, rhat=rhat,
row_major_idx=tidx_rowm, col_major_idx=tidx_colm)
def _get_par_summary(sim, n, probs):
"""Summarize chains merged and individually
Parameters
----------
sim : dict from stanfit object
n : int
parameter index
probs : iterable of int
quantiles
Returns
-------
summary : dict
Dictionary containing summaries
"""
# _get_samples gets chains for nth parameter
ss = _get_samples(n, sim, inc_warmup=False)
msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1))
qfun = lambda chain: mquantiles(chain, probs)
c_msd = np.array([msdfun(s) for s in ss]).flatten()
c_quan = np.array([qfun(s) for s in ss]).flatten()
ass = np.asarray(ss).flatten()
msd = np.asarray(msdfun(ass))
quan = qfun(np.asarray(ass))
return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan)
def _split_data(data):
data_r = {}
data_i = {}
# data_r and data_i are going to be converted into C++ objects of
# type: map<string, pair<vector<double>, vector<size_t>>> and
# map<string, pair<vector<int>, vector<size_t>>> so prepare
# them accordingly.
for k, v in data.items():
if np.issubdtype(np.asarray(v).dtype, np.integer):
data_i.update({k.encode('utf-8'): np.asarray(v, dtype=int)})
elif np.issubdtype(np.asarray(v).dtype, np.floating):
data_r.update({k.encode('utf-8'): np.asarray(v, dtype=float)})
else:
msg = "Variable {} is neither int nor float nor list/array thereof"
raise ValueError(msg.format(k))
return data_r, data_i
def _config_argss(chains, iter, warmup, thin,
init, seed, sample_file, diagnostic_file, algorithm,
control, **kwargs):
# After rstan/rstan/R/misc.R (config_argss)
iter = int(iter)
if iter < 1:
raise ValueError("`iter` should be a positive integer.")
thin = int(thin)
if thin < 1 or thin > iter:
raise ValueError("`thin should be a positive integer "
"less than `iter`.")
warmup = max(0, int(warmup))
if warmup > iter:
raise ValueError("`warmup` should be an integer less than `iter`.")
chains = int(chains)
if chains < 1:
raise ValueError("`chains` should be a positive integer.")
iters = [iter] * chains
thins = [thin] * chains
warmups = [warmup] * chains
# use chain_id argument if specified
if kwargs.get('chain_id') is None:
chain_id = list(range(chains))
else:
chain_id = [int(id) for id in kwargs['chain_id']]
if len(set(chain_id)) != len(chain_id):
raise ValueError("`chain_id` has duplicated elements.")
chain_id_len = len(chain_id)
if chain_id_len >= chains:
chain_id = chain_id
else:
chain_id = chain_id + [max(chain_id) + 1 + i
for i in range(chains - chain_id_len)]
del kwargs['chain_id']
inits_specified = False
# slight difference here from rstan; Python's lists are not typed.
if isinstance(init, Number):
init = str(init)
if isinstance(init, string_types):
if init in ['0', 'random']:
inits = [init] * chains
else:
inits = ["random"] * chains
inits_specified = True
if not inits_specified and isinstance(init, Callable):
## test if function takes argument named "chain_id"
if "chain_id" in inspect.getargspec(init).args:
inits = [init(chain_id=id) for id in chain_id]
else:
inits = [init()] * chains
if not isinstance(inits[0], dict):
raise ValueError("The function specifying initial values must "
"return a dictionary.")
inits_specified = True
if not inits_specified and isinstance(init, Sequence):
if len(init) != chains:
raise ValueError("Length of list of initial values does not "
"match number of chains.")
if not all([isinstance(d, dict) for d in init]):
raise ValueError("Initial value list is not a sequence of "
"dictionaries.")
inits = init
inits_specified = True
if not inits_specified:
raise ValueError("Invalid specification of initial values.")
## only one seed is needed by virtue of the RNG
seed = _check_seed(seed)
kwargs['method'] = "test_grad" if kwargs.get('test_grad') else 'sampling'
all_control = {
"adapt_engaged", "adapt_gamma", "adapt_delta", "adapt_kappa",
"adapt_t0", "adapt_init_buffer", "adapt_term_buffer", "adapt_window",
"stepsize", "stepsize_jitter", "metric", "int_time",
"max_treedepth", "epsilon", "error", "inv_metric"
}
all_metrics = {"unit_e", "diag_e", "dense_e"}
if control is not None:
if not isinstance(control, dict):
raise ValueError("`control` must be a dictionary")
if not all(key in all_control for key in control):
unknown = set(control) - all_control
raise ValueError("`control` contains unknown parameters: {}".format(unknown))
if control.get('metric') and control['metric'] not in all_metrics:
raise ValueError("`metric` must be one of {}".format(all_metrics))
kwargs['control'] = control
argss = [dict() for _ in range(chains)]
for i in range(chains):
argss[i] = dict(chain_id=chain_id[i],
iter=iters[i], thin=thins[i], seed=seed,
warmup=warmups[i], init=inits[i],
algorithm=algorithm)
if sample_file is not None:
sample_file = _writable_sample_file(sample_file)
if chains == 1:
argss[0]['sample_file'] = sample_file
elif chains > 1:
for i in range(chains):
argss[i]['sample_file'] = _append_id(sample_file, i)
if diagnostic_file is not None:
raise NotImplementedError("diagnostic_file not implemented yet.")
if control is not None and "inv_metric" in control:
inv_metric = control.pop("inv_metric")
metric_dir = tempfile.mkdtemp()
if isinstance(inv_metric, dict):
for i in range(chains):
if i not in inv_metric:
msg = "Invalid value for init_inv_metric found (keys={}). " \
"Use either a dictionary with chain_index as keys (0,1,2,...)" \
"or ndarray."
msg = msg.format(list(metric_file.keys()))
raise ValueError(msg)
mass_values = inv_metric[i]
metric_filename = "inv_metric_chain_{}.Rdata".format(str(i))
metric_path = os.path.join(metric_dir, metric_filename)
if isinstance(mass_values, str):
if not os.path.exists(mass_values):
raise ValueError("inverse metric file was not found: {}".format(mass_values))
shutil.copy(mass_values, metric_path)
else:
stan_rdump(dict(inv_metric=mass_values), metric_path)
argss[i]['metric_file'] = metric_path
elif isinstance(inv_metric, str):
if not os.path.exists(inv_metric):
raise ValueError("inverse metric file was not found: {}".format(inv_metric))
for i in range(chains):
metric_filename = "inv_metric_chain_{}.Rdata".format(str(i))
metric_path = os.path.join(metric_dir, metric_filename)
shutil.copy(inv_metric, metric_path)
argss[i]['metric_file'] = metric_path
elif isinstance(inv_metric, Iterable):
metric_filename = "inv_metric_chain_0.Rdata"
metric_path = os.path.join(metric_dir, metric_filename)
stan_rdump(dict(inv_metric=inv_metric), metric_path)
argss[0]['metric_file'] = metric_path
for i in range(1, chains):
metric_filename = "inv_metric_chain_{}.Rdata".format(str(i))
metric_path = os.path.join(metric_dir, metric_filename)
shutil.copy(argss[i-1]['metric_file'], metric_path)
argss[i]['metric_file'] = metric_path
else:
argss[i]['metric_file'] = ""
stepsize_list = None
if "control" in kwargs and "stepsize" in kwargs["control"]:
if isinstance(kwargs["control"]["stepsize"], Sequence):
stepsize_list = kwargs["control"]["stepsize"]
if len(kwargs["control"]["stepsize"]) == 1:
kwargs["control"]["stepsize"] = kwargs["control"]["stepsize"][0]
elif len(kwargs["control"]["stepsize"]) != chains:
raise ValueError("stepsize length needs to equal chain count.")
else:
stepsize_list = kwargs["control"]["stepsize"]
for i in range(chains):
argss[i].update(kwargs)
if stepsize_list is not None:
argss[i]["control"]["stepsize"] = stepsize_list[i]
argss[i] = _get_valid_stan_args(argss[i])
return argss
def _get_valid_stan_args(base_args=None):
"""Fill in default values for arguments not provided in `base_args`.
RStan does this in C++ in stan_args.hpp in the stan_args constructor.
It seems easier to deal with here in Python.
"""
args = base_args.copy() if base_args is not None else {}
# Default arguments, c.f. rstan/rstan/inst/include/rstan/stan_args.hpp
# values in args are going to be converted into C++ objects so
# prepare them accordingly---e.g., unicode -> bytes -> std::string
args['chain_id'] = args.get('chain_id', 1)
args['append_samples'] = args.get('append_samples', False)
if args.get('method') is None or args['method'] == "sampling":
args['method'] = stan_args_method_t.SAMPLING
elif args['method'] == "optim":
args['method'] = stan_args_method_t.OPTIM
elif args['method'] == 'test_grad':
args['method'] = stan_args_method_t.TEST_GRADIENT
elif args['method'] == 'variational':
args['method'] = stan_args_method_t.VARIATIONAL
else:
args['method'] = stan_args_method_t.SAMPLING
args['sample_file_flag'] = True if args.get('sample_file') else False
args['sample_file'] = args.get('sample_file', '').encode('ascii')
args['diagnostic_file_flag'] = True if args.get('diagnostic_file') else False
args['diagnostic_file'] = args.get('diagnostic_file', '').encode('ascii')
# NB: argument named "seed" not "random_seed"
args['random_seed'] = args.get('seed', int(time.time()))
args['metric_file_flag'] = True if args.get('metric_file') else False
args['metric_file'] = args.get('metric_file', '').encode('ascii')
if args['method'] == stan_args_method_t.VARIATIONAL:
# variational does not use a `control` map like sampling
args['ctrl'] = args.get('ctrl', dict(variational=dict()))
args['ctrl']['variational']['iter'] = args.get('iter', 10000)
args['ctrl']['variational']['grad_samples'] = args.get('grad_samples', 1)
args['ctrl']['variational']['elbo_samples'] = args.get('elbo_samples', 100)
args['ctrl']['variational']['eval_elbo'] = args.get('eval_elbo', 100)
args['ctrl']['variational']['output_samples'] = args.get('output_samples', 1000)
args['ctrl']['variational']['adapt_iter'] = args.get('adapt_iter', 50)
args['ctrl']['variational']['eta'] = args.get('eta', 1.0)
args['ctrl']['variational']['adapt_engaged'] = args.get('adapt_engaged', True)
args['ctrl']['variational']['tol_rel_obj'] = args.get('tol_rel_obj', 0.01)
if args.get('algorithm', '').lower() == 'fullrank':
args['ctrl']['variational']['algorithm'] = variational_algo_t.FULLRANK
else:
args['ctrl']['variational']['algorithm'] = variational_algo_t.MEANFIELD
elif args['method'] == stan_args_method_t.SAMPLING:
args['ctrl'] = args.get('ctrl', dict(sampling=dict()))
args['ctrl']['sampling']['iter'] = iter = args.get('iter', 2000)
args['ctrl']['sampling']['warmup'] = warmup = args.get('warmup', iter // 2)
calculated_thin = iter - warmup // 1000
if calculated_thin < 1:
calculated_thin = 1
args['ctrl']['sampling']['thin'] = thin = args.get('thin', calculated_thin)
args['ctrl']['sampling']['save_warmup'] = True # always True now
args['ctrl']['sampling']['iter_save_wo_warmup'] = iter_save_wo_warmup = 1 + (iter - warmup - 1) // thin
args['ctrl']['sampling']['iter_save'] = iter_save_wo_warmup + 1 + (warmup - 1) // thin
refresh = iter // 10 if iter >= 20 else 1
args['ctrl']['sampling']['refresh'] = args.get('refresh', refresh)
ctrl_lst = args.get('control', dict())
ctrl_sampling = args['ctrl']['sampling']
# NB: if these defaults change, remember to update docstrings
ctrl_sampling['adapt_engaged'] = ctrl_lst.get("adapt_engaged", True)
ctrl_sampling['adapt_gamma'] = ctrl_lst.get("adapt_gamma", 0.05)
ctrl_sampling['adapt_delta'] = ctrl_lst.get("adapt_delta", 0.8)
ctrl_sampling['adapt_kappa'] = ctrl_lst.get("adapt_kappa", 0.75)
ctrl_sampling['adapt_t0'] = ctrl_lst.get("adapt_t0", 10.0)
ctrl_sampling['adapt_init_buffer'] = ctrl_lst.get("adapt_init_buffer", 75)
ctrl_sampling['adapt_term_buffer'] = ctrl_lst.get("adapt_term_buffer", 50)
ctrl_sampling['adapt_window'] = ctrl_lst.get("adapt_window", 25)
ctrl_sampling['stepsize'] = ctrl_lst.get("stepsize", 1.0)
ctrl_sampling['stepsize_jitter'] = ctrl_lst.get("stepsize_jitter", 0.0)
algorithm = args.get('algorithm', 'NUTS')
if algorithm == 'HMC':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.HMC
elif algorithm == 'Metropolis':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.Metropolis
elif algorithm == 'NUTS':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.NUTS
elif algorithm == 'Fixed_param':
args['ctrl']['sampling']['algorithm'] = sampling_algo_t.Fixed_param
# TODO: Setting adapt_engaged to False solves the segfault reported
# in issue #200; find out why this hack is needed. RStan deals with
# the setting elsewhere.
ctrl_sampling['adapt_engaged'] = False
else:
msg = "Invalid value for parameter algorithm (found {}; " \
"require HMC, Metropolis, NUTS, or Fixed_param).".format(algorithm)
raise ValueError(msg)
metric = ctrl_lst.get('metric', 'diag_e')
if metric == "unit_e":
ctrl_sampling['metric'] = sampling_metric_t.UNIT_E
elif metric == "diag_e":
ctrl_sampling['metric'] = sampling_metric_t.DIAG_E
elif metric == "dense_e":
ctrl_sampling['metric'] = sampling_metric_t.DENSE_E
if ctrl_sampling['algorithm'] == sampling_algo_t.NUTS:
ctrl_sampling['max_treedepth'] = ctrl_lst.get("max_treedepth", 10)
elif ctrl_sampling['algorithm'] == sampling_algo_t.HMC:
ctrl_sampling['int_time'] = ctrl_lst.get('int_time', 6.283185307179586476925286766559005768e+00)
elif ctrl_sampling['algorithm'] == sampling_algo_t.Metropolis:
pass
elif ctrl_sampling['algorithm'] == sampling_algo_t.Fixed_param:
pass
elif args['method'] == stan_args_method_t.OPTIM:
args['ctrl'] = args.get('ctrl', dict(optim=dict()))
args['ctrl']['optim']['iter'] = iter = args.get('iter', 2000)
algorithm = args.get('algorithm', 'LBFGS')
if algorithm == "BFGS":
args['ctrl']['optim']['algorithm'] = optim_algo_t.BFGS
elif algorithm == "Newton":
args['ctrl']['optim']['algorithm'] = optim_algo_t.Newton
elif algorithm == "LBFGS":
args['ctrl']['optim']['algorithm'] = optim_algo_t.LBFGS
else:
msg = "Invalid value for parameter algorithm (found {}; " \
"require (L)BFGS or Newton).".format(algorithm)
raise ValueError(msg)
refresh = args['ctrl']['optim']['iter'] // 100
args['ctrl']['optim']['refresh'] = args.get('refresh', refresh)
if args['ctrl']['optim']['refresh'] < 1:
args['ctrl']['optim']['refresh'] = 1
args['ctrl']['optim']['init_alpha'] = args.get("init_alpha", 0.001)
args['ctrl']['optim']['tol_obj'] = args.get("tol_obj", 1e-12)
args['ctrl']['optim']['tol_grad'] = args.get("tol_grad", 1e-8)
args['ctrl']['optim']['tol_param'] = args.get("tol_param", 1e-8)
args['ctrl']['optim']['tol_rel_obj'] = args.get("tol_rel_obj", 1e4)
args['ctrl']['optim']['tol_rel_grad'] = args.get("tol_rel_grad", 1e7)
args['ctrl']['optim']['save_iterations'] = args.get("save_iterations", True)
args['ctrl']['optim']['history_size'] = args.get("history_size", 5)
elif args['method'] == stan_args_method_t.TEST_GRADIENT:
args['ctrl'] = args.get('ctrl', dict(test_grad=dict()))
args['ctrl']['test_grad']['epsilon'] = args.get("epsilon", 1e-6)
args['ctrl']['test_grad']['error'] = args.get("error", 1e-6)
init = args.get('init', "random")
if isinstance(init, string_types):
args['init'] = init.encode('ascii')
elif isinstance(init, dict):
args['init'] = "user".encode('ascii')
# while the name is 'init_list', it is a dict; the name comes from rstan,
# where list elements can have names
args['init_list'] = init
else:
args['init'] = "random".encode('ascii')
args['init_radius'] = args.get('init_r', 2.0)
if (args['init_radius'] <= 0):
args['init'] = b"0"
# 0 initialization requires init_radius = 0
if (args['init'] == b"0" or args['init'] == 0):
args['init_radius'] = 0.0
args['enable_random_init'] = args.get('enable_random_init', True)
# RStan calls validate_args() here
return args
def _check_seed(seed):
"""If possible, convert `seed` into a valid form for Stan (an integer
between 0 and MAX_UINT, inclusive). If not possible, use a random seed
instead and raise a warning if `seed` was not provided as `None`.
"""
if isinstance(seed, (Number, string_types)):
try:
seed = int(seed)
except ValueError:
logger.warning("`seed` must be castable to an integer")
seed = None
else:
if seed < 0:
logger.warning("`seed` may not be negative")
seed = None
elif seed > MAX_UINT:
raise ValueError('`seed` is too large; max is {}'.format(MAX_UINT))
elif isinstance(seed, np.random.RandomState):
seed = seed.randint(0, MAX_UINT)
elif seed is not None:
logger.warning('`seed` has unexpected type')
seed = None
if seed is None:
seed = random.randint(0, MAX_UINT)
return seed
def _organize_inits(inits, pars, dims):
"""Obtain a list of initial values for each chain.
The parameter 'lp__' will be removed from the chains.
Parameters
----------
inits : list
list of initial values for each chain.
pars : list of str
dims : list of list of int
from (via cython conversion) vector[vector[uint]] dims
Returns
-------
inits : list of dict
"""
try:
idx_of_lp = pars.index('lp__')
del pars[idx_of_lp]
del dims[idx_of_lp]
except ValueError:
pass
starts = _calc_starts(dims)
return [_par_vector2dict(init, pars, dims, starts) for init in inits]
def _calc_starts(dims):
"""Calculate starting indexes
Parameters
----------
dims : list of list of int
from (via cython conversion) vector[vector[uint]] dims
Examples
--------
>>> _calc_starts([[8, 2], [5], [6, 2]])
[0, 16, 21]
"""
# NB: Python uses 0-indexing; R uses 1-indexing.
l = len(dims)
s = [np.prod(d) for d in dims]
starts = np.cumsum([0] + s)[0:l].tolist()
# coerce things into ints before returning
return [int(i) for i in starts]
def _par_vector2dict(v, pars, dims, starts=None):
"""Turn a vector of samples into an OrderedDict according to param dims.
Parameters
----------
y : list of int or float
pars : list of str
parameter names
dims : list of list of int
list of dimensions of parameters
Returns
-------
d : dict
Examples
--------
>>> v = list(range(31))
>>> dims = [[5], [5, 5], []]
>>> pars = ['mu', 'Phi', 'eta']
>>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS
OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ...
"""
if starts is None:
starts = _calc_starts(dims)
d = OrderedDict()
for i in range(len(pars)):
l = int(np.prod(dims[i]))
start = starts[i]
end = start + l
y = np.asarray(v[start:end])
if len(dims[i]) > 1:
y = y.reshape(dims[i], order='F') # 'F' = Fortran, column-major
d[pars[i]] = y.squeeze() if y.shape == (1,) else y
return d
def _check_pars(allpars, pars):
if len(pars) == 0:
raise ValueError("No parameter specified (`pars` is empty).")
for par in pars:
if par not in allpars:
raise ValueError("No parameter {}".format(par))
def _pars_total_indexes(names, dims, fnames, pars):
"""Obtain all the indexes for parameters `pars` in the sequence of names.
`names` references variables that are in column-major order
Parameters
----------
names : sequence of str
All the parameter names.
dim : sequence of list of int
Dimensions, in same order as `names`.
fnames : sequence of str
All the scalar parameter names
pars : sequence of str
The parameters of interest. It is assumed all elements in `pars` are in
`names`.
Returns
-------
indexes : OrderedDict of list of int
Dictionary uses parameter names as keys. Indexes are column-major order.
For each parameter there is also a key `par`+'_rowmajor' that stores the
row-major indexing.
Note
----
Inside each parameter (vector or array), the sequence uses column-major
ordering. For example, if we have parameters alpha and beta, having
dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence
is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0],
beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short,
like R matrix(..., bycol=TRUE).
Example
-------
>>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__']
>>> dims_oi = [[], [], [8], [8], []]
>>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]',
... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]',
... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]',
... 'theta[8]', 'lp__']
>>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__']
>>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars)
... # doctest: +ELLIPSIS
OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ...
"""
starts = _calc_starts(dims)
def par_total_indexes(par):
# if `par` is a scalar, it will match one of `fnames`
if par in fnames:
p = fnames.index(par)
idx = tuple([p])
return OrderedDict([(par, idx), (par+'_rowmajor', idx)])
else:
p = names.index(par)
idx = starts[p] + np.arange(np.prod(dims[p]))
idx_rowmajor = starts[p] + _idx_col2rowm(dims[p])
return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))])
indexes = OrderedDict()
for par in pars:
indexes.update(par_total_indexes(par))
return indexes
def _idx_col2rowm(d):
"""Generate indexes to change from col-major to row-major ordering"""
if 0 == len(d):
return 1
if 1 == len(d):
return np.arange(d[0])
# order='F' indicates column-major ordering
idx = np.array(np.arange(np.prod(d))).reshape(d, order='F').T
return idx.flatten(order='F')
def _get_kept_samples(n, sim):
"""Get samples to be kept from the chain(s) for `n`th parameter.
Samples from different chains are merged.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
samples : array
Samples being kept, permuted and in column-major order.
"""
return pystan._misc.get_kept_samples(n, sim)
def _get_samples(n, sim, inc_warmup=True):
# NOTE: this is in stanfit-class.R in RStan (rather than misc.R)
"""Get chains for `n`th parameter.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
chains : list of array
Each chain is an element in the list.
"""
return pystan._misc.get_samples(n, sim, inc_warmup)
def _redirect_stderr():
"""Redirect stderr for subprocesses to /dev/null
Silences copious compilation messages.
Returns
-------
orig_stderr : file descriptor
Copy of original stderr file descriptor
"""
sys.stderr.flush()
stderr_fileno = sys.stderr.fileno()
orig_stderr = os.dup(stderr_fileno)
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, stderr_fileno)
os.close(devnull)
return orig_stderr
def _has_fileno(stream):
"""Returns whether the stream object seems to have a working fileno()
Tells whether _redirect_stderr is likely to work.
Parameters
----------
stream : IO stream object
Returns
-------
has_fileno : bool
True if stream.fileno() exists and doesn't raise OSError or
UnsupportedOperation
"""
try:
stream.fileno()
except (AttributeError, OSError, IOError, io.UnsupportedOperation):
return False
return True
def _append_id(file, id, suffix='.csv'):
fname = os.path.basename(file)
fpath = os.path.dirname(file)
fname2 = re.sub(r'\.csv\s*$', '_{}.csv'.format(id), fname)
if fname2 == fname:
fname2 = '{}_{}.csv'.format(fname, id)
return os.path.join(fpath, fname2)
def _writable_sample_file(file, warn=True, wfun=None):
"""Check to see if file is writable, if not use temporary file"""
if wfun is None:
wfun = lambda x, y: '"{}" is not writable; use "{}" instead'.format(x, y)
dir = os.path.dirname(file)
dir = os.getcwd() if dir == '' else dir
if os.access(dir, os.W_OK):
return file
else:
dir2 = tempfile.mkdtemp()
if warn:
logger.warning(wfun(dir, dir2))
return os.path.join(dir2, os.path.basename(file))
def is_legal_stan_vname(name):
stan_kw1 = ('for', 'in', 'while', 'repeat', 'until', 'if', 'then', 'else',
'true', 'false')
stan_kw2 = ('int', 'real', 'vector', 'simplex', 'ordered', 'positive_ordered',
'row_vector', 'matrix', 'corr_matrix', 'cov_matrix', 'lower', 'upper')
stan_kw3 = ('model', 'data', 'parameters', 'quantities', 'transformed', 'generated')
cpp_kw = ("alignas", "alignof", "and", "and_eq", "asm", "auto", "bitand", "bitor", "bool",
"break", "case", "catch", "char", "char16_t", "char32_t", "class", "compl",
"const", "constexpr", "const_cast", "continue", "decltype", "default", "delete",
"do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern",
"false", "float", "for", "friend", "goto", "if", "inline", "int", "long", "mutable",
"namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq",
"private", "protected", "public", "register", "reinterpret_cast", "return",
"short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct",
"switch", "template", "this", "thread_local", "throw", "true", "try", "typedef",
"typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile",
"wchar_t", "while", "xor", "xor_eq")
illegal = stan_kw1 + stan_kw2 + stan_kw3 + cpp_kw
if re.findall(r'(\.|^[0-9]|__$)', name):
return False
return not name in illegal
def _dict_to_rdump(data):
parts = []
for name, value in data.items():
if isinstance(value, (Sequence, Number, np.number, np.ndarray, int, bool, float)) \
and not isinstance(value, string_types):
value = np.asarray(value)
else:
raise ValueError("Variable {} is not a number and cannot be dumped.".format(name))
if value.dtype == np.bool:
value = value.astype(int)
if value.ndim == 0:
s = '{} <- {}\n'.format(name, str(value))
elif value.ndim == 1:
s = '{} <-\nc({})\n'.format(name, ', '.join(str(v) for v in value))
elif value.ndim > 1:
tmpl = '{} <-\nstructure(c({}), .Dim = c({}))\n'
# transpose value as R uses column-major
# 'F' = Fortran, column-major
s = tmpl.format(name,
', '.join(str(v) for v in value.flatten(order='F')),
', '.join(str(v) for v in value.shape))
parts.append(s)
return ''.join(parts)
def stan_rdump(data, filename):
"""
Dump a dictionary with model data into a file using the R dump format that
Stan supports.
Parameters
----------
data : dict
filename : str
"""
for name in data:
if not is_legal_stan_vname(name):
raise ValueError("Variable name {} is not allowed in Stan".format(name))
with open(filename, 'w') as f:
f.write(_dict_to_rdump(data))
def _rdump_value_to_numpy(s):
"""
Convert a R dump formatted value to Numpy equivalent
For example, "c(1, 2)" becomes ``array([1, 2])``
Only supports a few R data structures. Will not work with European decimal format.
"""
if "structure" in s:
vector_str, shape_str = re.findall(r'c\([^\)]+\)', s)
shape = [int(d) for d in shape_str[2:-1].split(',')]
if '.' in vector_str:
arr = np.array([float(v) for v in vector_str[2:-1].split(',')])
else:
arr = np.array([int(v) for v in vector_str[2:-1].split(',')])
# 'F' = Fortran, column-major
arr = arr.reshape(shape, order='F')
elif "c(" in s:
if '.' in s:
arr = np.array([float(v) for v in s[2:-1].split(',')], order='F')
else:
arr = np.array([int(v) for v in s[2:-1].split(',')], order='F')
else:
arr = np.array(float(s) if '.' in s else int(s))
return arr
def _remove_empty_pars(pars, pars_oi, dims_oi):
"""
Remove parameters that are actually empty. For example, the parameter
y would be removed with the following model code:
transformed data { int n; n <- 0; }
parameters { real y[n]; }
Parameters
----------
pars: iterable of str
pars_oi: list of str
dims_oi: list of list of int
Returns
-------
pars_trimmed: list of str
"""
pars = list(pars)
for par, dim in zip(pars_oi, dims_oi):
if par in pars and np.prod(dim) == 0:
del pars[pars.index(par)]
return pars
def read_rdump(filename):
"""
Read data formatted using the R dump format
Parameters
----------
filename: str
Returns
-------
data : OrderedDict
"""
contents = open(filename).read().strip()
names = [name.strip() for name in re.findall(r'^(\w+) <-', contents, re.MULTILINE)]
values = [value.strip() for value in re.split('\w+ +<-', contents) if value]
if len(values) != len(names):
raise ValueError("Unable to read file. Unable to pair variable name with value.")
d = OrderedDict()
for name, value in zip(names, values):
d[name.strip()] = _rdump_value_to_numpy(value.strip())
return d
def to_dataframe(fit, pars=None, permuted=False, dtypes=None, inc_warmup=False, diagnostics=True, header=True):
"""Extract samples as a pandas dataframe for different parameters.
Parameters
----------
pars : {str, sequence of str}
parameter (or quantile) name(s).
permuted : bool
If True, returned samples are permuted.
If inc_warmup is True, warmup samples have negative order.
dtypes : dict
datatype of parameter(s).
If nothing is passed, float will be used for all parameters.
inc_warmup : bool
If True, warmup samples are kept; otherwise they are
discarded.
diagnostics : bool
If True, include hmc diagnostics in dataframe.
header : bool
If True, include header columns.
Returns
-------
df : pandas dataframe
Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df],
where all groups are optional.
To exclude draws_df use `pars=[]`.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("Pandas module not found. You can install pandas with: pip install pandas")
fit._verify_has_samples()
pars_original = pars
if pars is None:
pars = fit.sim['pars_oi']
elif isinstance(pars, string_types):
pars = [pars]
if pars:
pars = pystan.misc._remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi'])
allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi']
_check_pars(allpars, pars)
if dtypes is None:
dtypes = {}
n_kept = [s if inc_warmup else s-w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
chains = len(fit.sim['samples'])
diagnostic_type = {'divergent__':int,
'energy__':float,
'treedepth__':int,
'accept_stat__':float,
'stepsize__':float,
'n_leapfrog__':int}
header_dict = OrderedDict()
if header:
idx = np.concatenate([np.full(n_kept[chain], chain, dtype=int) for chain in range(chains)])
warmup = [np.zeros(n_kept[chain], dtype=np.int64) for chain in range(chains)]
if inc_warmup:
draw = []
for chain, w in zip(range(chains), fit.sim['warmup2']):
warmup[chain][:w] = 1
draw.append(np.arange(n_kept[chain], dtype=np.int64) - w)
draw = np.concatenate(draw)
else:
draw = np.concatenate([np.arange(n_kept[chain], dtype=np.int64) for chain in range(chains)])
warmup = np.concatenate(warmup)
header_dict = OrderedDict(zip(['chain', 'draw', 'warmup'], [idx, draw, warmup]))
if permuted:
if inc_warmup:
chain_permutation = []
chain_permutation_order = []
permutation = []
permutation_order = []
for chain, p, w in zip(range(chains), fit.sim['permutation'], fit.sim['warmup2']):
chain_permutation.append(list(range(-w, 0)) + p)
chain_permutation_order.append(list(range(-w, 0)) + list(np.argsort(p)))
permutation.append(sum(n_kept[:chain])+chain_permutation[-1]+w)
permutation_order.append(sum(n_kept[:chain])+chain_permutation_order[-1]+w)
chain_permutation = np.concatenate(chain_permutation)
chain_permutation_order = np.concatenate(chain_permutation_order)
permutation = np.concatenate(permutation)
permutation_order = np.concatenate(permutation_order)
else:
chain_permutation = np.concatenate(fit.sim['permutation'])
chain_permutation_order = np.concatenate([np.argsort(item) for item in fit.sim['permutation']])
permutation = np.concatenate([sum(n_kept[:chain])+p for chain, p in enumerate(fit.sim['permutation'])])
permutation_order = np.argsort(permutation)
header_dict["permutation"] = permutation
header_dict["chain_permutation"] = chain_permutation
header_dict["permutation_order"] = permutation_order
header_dict["chain_permutation_order"] = chain_permutation_order
if header:
header_df = pd.DataFrame.from_dict(header_dict)
else:
if permuted:
header_df = pd.DataFrame.from_dict({"permutation_order" : header_dict["permutation_order"]})
else:
header_df = pd.DataFrame()
fnames_set = set(fit.sim['fnames_oi'])
pars_set = set(pars)
if pars_original is None or fnames_set == pars_set:
dfs = [pd.DataFrame.from_dict(pyholder.chains).iloc[-n:] for pyholder, n in zip(fit.sim['samples'], n_kept)]
df = pd.concat(dfs, axis=0, sort=False, ignore_index=True)
if dtypes:
if not fnames_set.issuperset(pars_set):
par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']])
for key in fit.sim['fnames_oi']:
par = key.split("[")
par = par[0]
par_keys[par].append(key)
for par, dtype in dtypes.items():
if isinstance(dtype, (float, np.float64)):
continue
for key in par_keys.get(par, [par]):
df.loc[:, key] = df.loc[:, key].astype(dtype)
elif pars:
par_keys = dict()
if not fnames_set.issuperset(pars_set):
par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']])
for key in fit.sim['fnames_oi']:
par = key.split("[")
par = par[0]
par_keys[par].append(key)
columns = []
for par in pars:
columns.extend(par_keys.get(par, [par]))
columns = list(np.unique(columns))
df = pd.DataFrame(index=np.arange(sum(n_kept)), columns=columns, dtype=float)
for key in columns:
key_values = []
for chain, (pyholder, n) in enumerate(zip(fit.sim['samples'], n_kept)):
key_values.append(pyholder.chains[key][-n:])
df.loc[:, key] = np.concatenate(key_values)
for par, dtype in dtypes.items():
if isinstance(dtype, (float, np.float64)):
continue
for key in par_keys.get(par, [par]):
df.loc[:, key] = df.loc[:, key].astype(dtype)
else:
df = pd.DataFrame()
if diagnostics:
diagnostics_dfs = []
for idx, (pyholder, permutation, n) in enumerate(zip(fit.sim['samples'], fit.sim['permutation'], n_kept), 1):
diagnostics_df = pd.DataFrame(pyholder['sampler_params'], index=pyholder['sampler_param_names']).T
diagnostics_df = diagnostics_df.iloc[-n:, :]
for key, dtype in diagnostic_type.items():
if key in diagnostics_df:
diagnostics_df.loc[:, key] = diagnostics_df.loc[:, key].astype(dtype)
diagnostics_dfs.append(diagnostics_df)
if diagnostics_dfs:
diagnostics_df = pd.concat(diagnostics_dfs, axis=0, sort=False, ignore_index=True)
else:
diagnostics_df = pd.DataFrame()
else:
diagnostics_df = | pd.DataFrame() | pandas.DataFrame |
import logging
from functools import lru_cache
from itertools import chain
# from linetimer import CodeTimer
import pandas as pd
from statistics import mean, StatisticsError
from elecsim.role.market.latest_market_data import LatestMarketData
from elecsim.market.electricity.bid import Bid
import elecsim.scenario.scenario_data
from random import sample
logger = logging.getLogger(__name__)
"""power_exchange.py: Functionality to run power exchange"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
class PowerExchange:
def __init__(self, model, demand_distribution=None):
"""
Power exchange agent which contains functionality to tender and respond to bids.
:param model: Model in which the agents are contained in.
"""
self.model = model
self.demand_distribution = demand_distribution
self.hold_duration_curve_prices = []
self.price_duration_curve = pd.DataFrame(columns=["year", "segment_hour", "segment_demand", "accepted_price"])
self.stored_bids = {}
self.stored_ordered_bids = {}
def tender_bids(self, segment_hours, segment_demand, predict=False):
"""
Function which iterates through the generator companies, requests their bids, orders them in order of price,
and accepts bids.
:param agents: All agents from simulation model.
:param segment_hours: Value for number of hours particular electricity generation is required.
:param segment_demand: Size of electricity consumption required.
:param predict: Boolean that states whether the bids being tendered are for predicting price duration curve or whether it is for real bids.
:return: None
"""
self.hold_duration_curve_prices = []
agent = self.model.schedule.agents
generator_companies = [x for x in agent if hasattr(x, 'plants')] # Selection of generation company agents
for gen_co in generator_companies:
for plant in gen_co.plants:
plant.capacity_fulfilled = dict.fromkeys(segment_hours, 0)
highest_bid = 0
for segment_hour, segment_demand in zip(segment_hours, segment_demand):
if self.model.gencos_rl:
eid_bidding = self.model.bidding_client.start_episode()
co2_price = LatestMarketData(self.model)._get_variable_data("co2")[self.model.years_from_start]
gas_price = LatestMarketData(self.model)._get_variable_data("gas")[self.model.years_from_start]
coal_price = LatestMarketData(self.model)._get_variable_data("coal")[self.model.years_from_start]
observation = [segment_hour, segment_demand, self.model.year_number, co2_price, gas_price, coal_price, highest_bid]
# logger.info("observation: {}".format(observation))
actions = self.model.bidding_client.get_action(eid_bidding, observation)
# logger.info("action: {}".format(actions))
bids = []
action_index = 0
for generation_company in generator_companies:
if generation_company.name in self.model.gencos_rl:
number_of_plants = len(generation_company.plants)
actual_bid = generation_company.calculate_bids(segment_hour, predict, actions[action_index:(action_index+number_of_plants)])
action_index += number_of_plants
else:
actual_bid = generation_company.calculate_bids(segment_hour, predict)
bids.append(actual_bid)
sorted_bids = self._sort_bids(bids)
if predict is False:
logger.debug("bids len: {}".format(len(sorted_bids)))
# logger.info("total capacity of bids: {}".format(sum(bid.capacity_bid for bid in sorted_bids)))
accepted_bids = self._respond_to_bids(sorted_bids, segment_hour, segment_demand)
highest_bid = self._accept_bids(accepted_bids)
if self.model.gencos_rl:
try:
average_accepted_price = mean([int(rl_bid.bid_accepted)*rl_bid.price_per_mwh for rl_bid in accepted_bids if rl_bid.rl_bid is True])
except StatisticsError:
average_accepted_price = 0
# logger.info("total_accepted_bids: {}".format(total_accepted_bids))
self.model.bidding_client.log_returns(eid_bidding, average_accepted_price)
self.model.bidding_client.end_episode(eid_bidding, observation)
if self.demand_distribution:
self._create_load_duration_price_curve(segment_hour, segment_demand + sample(self.demand_distribution, 1)[0], highest_bid)
else:
self._create_load_duration_price_curve(segment_hour,
segment_demand,
highest_bid)
self.price_duration_curve = | pd.DataFrame(self.hold_duration_curve_prices) | pandas.DataFrame |
import psycopg2
import psycopg2
import sqlalchemy as salc
import numpy as np
import warnings
import datetime
import pandas as pd
import json
from math import pi
from flask import request, send_file, Response
# import visualization libraries
from bokeh.io import export_png
from bokeh.embed import json_item
from bokeh.plotting import figure
from bokeh.models import Label, LabelSet, ColumnDataSource, Legend
from bokeh.palettes import Colorblind
from bokeh.layouts import gridplot
from bokeh.transform import cumsum
warnings.filterwarnings('ignore')
def create_routes(server):
def quarters(month, year):
if 1 <= month <= 3:
return '01' + '/' + year
elif 4 <= month <= 6:
return '04' + '/' + year
elif 5 <= month <= 9:
return '07' + '/' + year
elif 10 <= month <= 12:
return '10' + '/' + year
def new_contributor_data_collection(repo_id, required_contributions):
rank_list = []
for num in range(1, required_contributions + 1):
rank_list.append(num)
rank_tuple = tuple(rank_list)
contributor_query = salc.sql.text(f"""
SELECT * FROM (
SELECT ID AS
cntrb_id,
A.created_at AS created_at,
date_part('month', A.created_at::DATE) AS month,
date_part('year', A.created_at::DATE) AS year,
A.repo_id,
repo_name,
full_name,
login,
ACTION,
rank() OVER (
PARTITION BY id
ORDER BY A.created_at ASC
)
FROM
(
(
SELECT
canonical_id AS ID,
created_at AS created_at,
repo_id,
'issue_opened' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.issues
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = issues.reporter_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
repo_id = {repo_id}
AND pull_request IS NULL
GROUP BY
canonical_id,
repo_id,
issues.created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
canonical_id AS ID,
TO_TIMESTAMP( cmt_author_date, 'YYYY-MM-DD' ) AS created_at,
repo_id,
'commit' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.commits
LEFT OUTER JOIN augur_data.contributors ON cntrb_email = cmt_author_email
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date, cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
repo_id = {repo_id}
GROUP BY
repo_id,
canonical_email,
canonical_id,
commits.cmt_author_date,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
message.cntrb_id AS ID,
created_at AS created_at,
commits.repo_id,
'commit_comment' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.commit_comment_ref,
augur_data.commits,
augur_data.message
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date, cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
commits.cmt_id = commit_comment_ref.cmt_id
AND commits.repo_id = {repo_id}
AND commit_comment_ref.msg_id = message.msg_id
GROUP BY
ID,
commits.repo_id,
commit_comment_ref.created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
issue_events.cntrb_id AS ID,
issue_events.created_at AS created_at,
issues.repo_id,
'issue_closed' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.issues,
augur_data.issue_events
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = issue_events.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
issues.repo_id = {repo_id}
AND issues.issue_id = issue_events.issue_id
AND issues.pull_request IS NULL
AND issue_events.cntrb_id IS NOT NULL
AND ACTION = 'closed'
GROUP BY
issue_events.cntrb_id,
issues.repo_id,
issue_events.created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
pr_augur_contributor_id AS ID,
pr_created_at AS created_at,
pull_requests.repo_id,
'open_pull_request' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.pull_requests
LEFT OUTER JOIN augur_data.contributors ON pull_requests.pr_augur_contributor_id = contributors.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
pull_requests.repo_id = {repo_id}
GROUP BY
pull_requests.pr_augur_contributor_id,
pull_requests.repo_id,
pull_requests.pr_created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
message.cntrb_id AS ID,
msg_timestamp AS created_at,
pull_requests.repo_id as repo_id,
'pull_request_comment' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.pull_requests,
augur_data.pull_request_message_ref,
augur_data.message
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
pull_requests.repo_id = {repo_id}
AND pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
GROUP BY
message.cntrb_id,
pull_requests.repo_id,
message.msg_timestamp,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
issues.reporter_id AS ID,
msg_timestamp AS created_at,
issues.repo_id as repo_id,
'issue_comment' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
issues,
issue_message_ref,
message
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
issues.repo_id = {repo_id}
AND issue_message_ref.msg_id = message.msg_id
AND issues.issue_id = issue_message_ref.issue_id
AND issues.pull_request_id = NULL
GROUP BY
issues.reporter_id,
issues.repo_id,
message.msg_timestamp,
contributors.cntrb_full_name,
contributors.cntrb_login
)
) A,
repo
WHERE
ID IS NOT NULL
AND A.repo_id = repo.repo_id
GROUP BY
A.ID,
A.repo_id,
A.ACTION,
A.created_at,
repo.repo_name,
A.full_name,
A.login
ORDER BY
cntrb_id
) b
WHERE RANK IN {rank_tuple}
""")
df = pd.read_sql(contributor_query, server.augur_app.database)
df = df.loc[~df['full_name'].str.contains('bot', na=False)]
df = df.loc[~df['login'].str.contains('bot', na=False)]
df = df.loc[~df['cntrb_id'].isin(df[df.duplicated(['cntrb_id', 'created_at', 'repo_id', 'rank'])]['cntrb_id'])]
# add yearmonths to contributor
df[['month', 'year']] = df[['month', 'year']].astype(int).astype(str)
df['yearmonth'] = df['month'] + '/' + df['year']
df['yearmonth'] = pd.to_datetime(df['yearmonth'])
# add column with every value being one, so when the contributor df is concatenated
# with the months df, the filler months won't be counted in the sums
df['new_contributors'] = 1
# add quarters to contributor dataframe
df['month'] = df['month'].astype(int)
df['quarter'] = df.apply(lambda x: quarters(x['month'], x['year']), axis=1, result_type='reduce')
df['quarter'] = pd.to_datetime(df['quarter'])
return df
def months_data_collection(start_date, end_date):
# months_query makes a df of years and months, this is used to fill
# the months with no data in the visualizations
months_query = salc.sql.text(f"""
SELECT *
FROM
(
SELECT
date_part( 'year', created_month :: DATE ) AS year,
date_part( 'month', created_month :: DATE ) AS MONTH
FROM
(SELECT *
FROM (
SELECT created_month :: DATE
FROM generate_series (TIMESTAMP '{start_date}', TIMESTAMP '{end_date}', INTERVAL '1 month' ) created_month ) d ) x
) y
""")
months_df = pd.read_sql(months_query, server.augur_app.database)
# add yearmonths to months_df
months_df[['year', 'month']] = months_df[['year', 'month']].astype(float).astype(int).astype(str)
months_df['yearmonth'] = months_df['month'] + '/' + months_df['year']
months_df['yearmonth'] = pd.to_datetime(months_df['yearmonth'])
# filter months_df with start_date and end_date, the contributor df is filtered in the visualizations
months_df = months_df.set_index(months_df['yearmonth'])
months_df = months_df.loc[start_date: end_date].reset_index(drop=True)
# add quarters to months dataframe
months_df['month'] = months_df['month'].astype(int)
months_df['quarter'] = months_df.apply(lambda x: quarters(x['month'], x['year']), axis=1)
months_df['quarter'] = pd.to_datetime(months_df['quarter'])
return months_df
def get_repo_id_start_date_and_end_date():
now = datetime.datetime.now()
repo_id = int(request.args.get('repo_id'))
start_date = str(request.args.get('start_date', "{}-01-01".format(now.year - 1)))
end_date = str(request.args.get('end_date', "{}-{}-{}".format(now.year, now.month, now.day)))
return repo_id, start_date, end_date
def filter_out_repeats_without_required_contributions_in_required_time(repeat_list, repeats_df, required_time,
first_list):
differences = []
for i in range(0, len(repeat_list)):
time_difference = repeat_list[i] - first_list[i]
total = time_difference.days * 86400 + time_difference.seconds
differences.append(total)
repeats_df['differences'] = differences
# remove contributions who made enough contributions, but not in a short enough time
repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400]
return repeats_df
def compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions, required_time, start_date):
# create a copy of contributor dataframe
driver_df = input_df.copy()
# remove first time contributors before begin date, along with their second contribution
mask = (driver_df['yearmonth'] < start_date)
driver_df = driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])]
# determine if contributor is a drive by by finding all the cntrb_id's that do not have a second contribution
repeats_df = driver_df.copy()
repeats_df = repeats_df.loc[repeats_df['rank'].isin([1, required_contributions])]
# removes all the contributors that only have a first contirbution
repeats_df = repeats_df[
repeats_df['cntrb_id'].isin(repeats_df.loc[driver_df['rank'] == required_contributions]['cntrb_id'])]
repeat_list = repeats_df.loc[driver_df['rank'] == required_contributions]['created_at'].tolist()
first_list = repeats_df.loc[driver_df['rank'] == 1]['created_at'].tolist()
repeats_df = repeats_df.loc[driver_df['rank'] == 1]
repeats_df['type'] = 'repeat'
repeats_df = filter_out_repeats_without_required_contributions_in_required_time(
repeat_list, repeats_df, required_time, first_list)
repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400]
repeat_cntrb_ids = repeats_df['cntrb_id'].to_list()
drive_by_df = driver_df.loc[~driver_df['cntrb_id'].isin(repeat_cntrb_ids)]
drive_by_df = drive_by_df.loc[driver_df['rank'] == 1]
drive_by_df['type'] = 'drive_by'
return drive_by_df, repeats_df
def add_caption_to_visualizations(caption, required_contributions, required_time, plot_width):
caption_plot = figure(width=plot_width, height=200, margin=(0, 0, 0, 0))
caption_plot.add_layout(Label(
x=0,
y=160,
x_units='screen',
y_units='screen',
text='{}'.format(caption.format(required_contributions, required_time)),
text_font='times',
text_font_size='15pt',
render_mode='css'
))
caption_plot.outline_line_color = None
return caption_plot
def format_new_cntrb_bar_charts(plot, rank, group_by_format_string):
plot.xgrid.grid_line_color = None
plot.y_range.start = 0
plot.axis.minor_tick_line_color = None
plot.outline_line_color = None
plot.title.align = "center"
plot.title.text_font_size = "18px"
plot.yaxis.axis_label = 'Second Time Contributors' if rank == 2 else 'New Contributors'
plot.xaxis.axis_label = group_by_format_string
plot.xaxis.axis_label_text_font_size = "18px"
plot.yaxis.axis_label_text_font_size = "16px"
plot.xaxis.major_label_text_font_size = "16px"
plot.xaxis.major_label_orientation = 45.0
plot.yaxis.major_label_text_font_size = "16px"
return plot
def add_charts_and_captions_to_correct_positions(chart_plot, caption_plot, rank, contributor_type,
row_1, row_2, row_3, row_4):
if rank == 1 and (contributor_type == 'All' or contributor_type == 'repeat'):
row_1.append(chart_plot)
row_2.append(caption_plot)
elif rank == 2 or contributor_type == 'drive_by':
row_3.append(chart_plot)
row_4.append(caption_plot)
def get_new_cntrb_bar_chart_query_params():
group_by = str(request.args.get('group_by', "quarter"))
required_contributions = int(request.args.get('required_contributions', 4))
required_time = int(request.args.get('required_time', 365))
return group_by, required_contributions, required_time
def remove_rows_before_start_date(df, start_date):
mask = (df['yearmonth'] < start_date)
result_df = df[~df['cntrb_id'].isin(df.loc[mask]['cntrb_id'])]
return result_df
def remove_rows_with_null_values(df, not_null_columns=[]):
"""Remove null data from pandas df
Parameters
-- df
description: the dataframe that will be modified
type: Pandas Dataframe
-- list_of_columns
description: columns that are searched for NULL values
type: list
default: [] (means all columns will be checked for NULL values)
IMPORTANT: if an empty list is passed or nothing is passed it will check all columns for NULL values
Return Value
-- Modified Pandas Dataframe
"""
if len(not_null_columns) == 0:
not_null_columns = df.columns.to_list()
total_rows_removed = 0
for col in not_null_columns:
rows_removed = len(df.loc[df[col].isnull() == True])
if rows_removed > 0:
print(f"{rows_removed} rows have been removed because of null values in column {col}")
total_rows_removed += rows_removed
df = df.loc[df[col].isnull() == False]
if total_rows_removed > 0:
print(f"\nTotal rows removed because of null data: {total_rows_removed}");
else:
print("No null data found")
return df
def get_needed_columns(df, list_of_columns):
"""Get only a specific list of columns from a Pandas Dataframe
Parameters
-- df
description: the dataframe that will be modified
type: Pandas Dataframe
-- list_of_columns
description: columns that will be kept in dataframe
type: list
Return Value
-- Modified Pandas Dataframe
"""
return df[list_of_columns]
def filter_data(df, needed_columns, not_null_columns=[]):
"""Filters out the unneeded rows in the df, and removed NULL data from df
Parameters
-- df
description: the dataframe that will be modified
type: Pandas Dataframe
-- needed_columns
description: the columns to keep in the dataframe
-- not_null_columns
description: columns that will be searched for NULL data,
if NULL values are found those rows will be removed
default: [] (means all columns in needed_columns list will be checked for NULL values)
IMPORTANT: if an empty list is passed or nothing is passed it will check
all columns in needed_columns list for NULL values
Return Value
-- Modified Pandas Dataframe
"""
if all(x in needed_columns for x in not_null_columns):
df = get_needed_columns(df, needed_columns)
df = remove_rows_with_null_values(df, not_null_columns)
return df
else:
print("Developer error, not null columns should be a subset of needed columns")
return df
@server.app.route('/{}/contributor_reports/new_contributors_bar/'.format(server.api_version), methods=["GET"])
def new_contributors_bar():
repo_id, start_date, end_date = get_repo_id_start_date_and_end_date()
group_by, required_contributions, required_time = get_new_cntrb_bar_chart_query_params()
input_df = new_contributor_data_collection(repo_id=repo_id, required_contributions=required_contributions)
months_df = months_data_collection(start_date=start_date, end_date=end_date)
# TODO remove full_name from data for all charts since it is not needed in vis generation
not_null_columns = ['cntrb_id', 'created_at', 'month', 'year', 'repo_id', 'repo_name', 'login', 'action',
'rank', 'yearmonth', 'new_contributors', 'quarter']
input_df = remove_rows_with_null_values(input_df, not_null_columns)
if len(input_df) == 0:
return Response(response="There is no data for this repo, in the database you are accessing",
mimetype='application/json',
status=200)
repo_dict = {repo_id: input_df.loc[input_df['repo_id'] == repo_id].iloc[0]['repo_name']}
contributor_types = ['All', 'repeat', 'drive_by']
ranks = [1, 2]
row_1, row_2, row_3, row_4 = [], [], [], []
all_df = remove_rows_before_start_date(input_df, start_date)
drive_by_df, repeats_df = compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions,
required_time, start_date)
for rank in ranks:
for contributor_type in contributor_types:
# do not display these visualizations since drive-by's do not have second contributions, and the
# second contribution of a repeat contributor is the same thing as the all the second time contributors
if (rank == 2 and contributor_type == 'drive_by') or (rank == 2 and contributor_type == 'repeat'):
continue
if contributor_type == 'repeat':
driver_df = repeats_df
caption = """This graph shows repeat contributors in the specified time period. Repeat contributors
are contributors who have made {} or more contributions in {} days and their first contribution is
in the specified time period. New contributors are individuals who make their first contribution
in the specified time period."""
elif contributor_type == 'drive_by':
driver_df = drive_by_df
caption = """This graph shows fly by contributors in the specified time period. Fly by contributors
are contributors who make less than the required {} contributions in {} days. New contributors are
individuals who make their first contribution in the specified time period. Of course, then, “All
fly-by’s are by definition first time contributors”. However, not all first time contributors are
fly-by’s."""
elif contributor_type == 'All':
if rank == 1:
driver_df = all_df
# makes df with all first time contributors
driver_df = driver_df.loc[driver_df['rank'] == 1]
caption = """This graph shows all the first time contributors, whether they contribute once, or
contribute multiple times. New contributors are individuals who make their first contribution
in the specified time period."""
if rank == 2:
driver_df = all_df
# creates df with all second time contributors
driver_df = driver_df.loc[driver_df['rank'] == 2]
caption = """This graph shows the second contribution of all
first time contributors in the specified time period."""
# y_axis_label = 'Second Time Contributors'
# filter by end_date, this is not done with the begin date filtering because a repeat contributor
# will look like drive-by if the second contribution is removed by end_date filtering
mask = (driver_df['yearmonth'] < end_date)
driver_df = driver_df.loc[mask]
# adds all months to driver_df so the lists of dates will include all months and years
driver_df = pd.concat([driver_df, months_df])
data = pd.DataFrame()
if group_by == 'year':
data['dates'] = driver_df[group_by].unique()
# new contributor counts for y-axis
data['new_contributor_counts'] = driver_df.groupby([group_by]).sum().reset_index()[
'new_contributors']
# used to format x-axis and title
group_by_format_string = "Year"
elif group_by == 'quarter' or group_by == 'month':
# set variables to group the data by quarter or month
if group_by == 'quarter':
date_column = 'quarter'
group_by_format_string = "Quarter"
elif group_by == 'month':
date_column = 'yearmonth'
group_by_format_string = "Month"
# modifies the driver_df[date_column] to be a string with year and month,
# then finds all the unique values
data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit='M'))
# new contributor counts for y-axis
data['new_contributor_counts'] = driver_df.groupby([date_column]).sum().reset_index()[
'new_contributors']
# if the data set is large enough it will dynamically assign the width, if the data set is
# too small it will by default set to 870 pixel so the title fits
if len(data['new_contributor_counts']) >= 15:
plot_width = 46 * len(data['new_contributor_counts'])
else:
plot_width = 870
# create a dict convert an integer number into a word
# used to turn the rank into a word, so it is nicely displayed in the title
numbers = ['Zero', 'First', 'Second']
num_conversion_dict = {}
for i in range(1, len(numbers)):
num_conversion_dict[i] = numbers[i]
number = '{}'.format(num_conversion_dict[rank])
# define pot for bar chart
p = figure(x_range=data['dates'], plot_height=400, plot_width=plot_width,
title="{}: {} {} Time Contributors Per {}".format(repo_dict[repo_id],
contributor_type.capitalize(), number,
group_by_format_string),
y_range=(0, max(data['new_contributor_counts']) * 1.15), margin=(0, 0, 10, 0))
p.vbar(x=data['dates'], top=data['new_contributor_counts'], width=0.8)
source = ColumnDataSource(
data=dict(dates=data['dates'], new_contributor_counts=data['new_contributor_counts']))
# add contributor_count labels to chart
p.add_layout(LabelSet(x='dates', y='new_contributor_counts', text='new_contributor_counts', y_offset=4,
text_font_size="13pt", text_color="black",
source=source, text_align='center'))
plot = format_new_cntrb_bar_charts(p, rank, group_by_format_string)
caption_plot = add_caption_to_visualizations(caption, required_contributions, required_time, plot_width)
add_charts_and_captions_to_correct_positions(plot, caption_plot, rank, contributor_type, row_1,
row_2, row_3, row_4)
# puts plots together into a grid
grid = gridplot([row_1, row_2, row_3, row_4])
filename = export_png(grid)
return send_file(filename)
@server.app.route('/{}/contributor_reports/new_contributors_stacked_bar/'.format(server.api_version),
methods=["GET"])
def new_contributors_stacked_bar():
repo_id, start_date, end_date = get_repo_id_start_date_and_end_date()
group_by, required_contributions, required_time = get_new_cntrb_bar_chart_query_params()
input_df = new_contributor_data_collection(repo_id=repo_id, required_contributions=required_contributions)
months_df = months_data_collection(start_date=start_date, end_date=end_date)
needed_columns = ['cntrb_id', 'created_at', 'month', 'year', 'repo_id', 'repo_name', 'login', 'action',
'rank', 'yearmonth', 'new_contributors', 'quarter']
input_df = filter_data(input_df, needed_columns)
if len(input_df) == 0:
return Response(response="There is no data for this repo, in the database you are accessing",
mimetype='application/json',
status=200)
repo_dict = {repo_id: input_df.loc[input_df['repo_id'] == repo_id].iloc[0]['repo_name']}
contributor_types = ['All', 'repeat', 'drive_by']
ranks = [1, 2]
row_1, row_2, row_3, row_4 = [], [], [], []
all_df = remove_rows_before_start_date(input_df, start_date)
drive_by_df, repeats_df = compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions,
required_time, start_date)
for rank in ranks:
for contributor_type in contributor_types:
# do not display these visualizations since drive-by's do not have second contributions,
# and the second contribution of a repeat contributor is the same thing as the all the
# second time contributors
if (rank == 2 and contributor_type == 'drive_by') or (rank == 2 and contributor_type == 'repeat'):
continue
if contributor_type == 'repeat':
driver_df = repeats_df
caption = """This graph shows repeat contributors in the specified time period. Repeat contributors
are contributors who have made {} or more contributions in {} days and their first contribution is
in the specified time period. New contributors are individuals who make their first contribution in
the specified time period."""
elif contributor_type == 'drive_by':
driver_df = drive_by_df
caption = """This graph shows fly by contributors in the specified time period. Fly by contributors
are contributors who make less than the required {} contributions in {} days. New contributors are
individuals who make their first contribution in the specified time period. Of course, then, “All
fly-by’s are by definition first time contributors”. However, not all first time contributors are
fly-by’s."""
elif contributor_type == 'All':
if rank == 1:
driver_df = all_df
# makes df with all first time contributors
driver_df = driver_df.loc[driver_df['rank'] == 1]
caption = """This graph shows all the first time contributors, whether they contribute once, or
contribute multiple times. New contributors are individuals who make their first contribution in
the specified time period."""
if rank == 2:
driver_df = all_df
# creates df with all second time contributor
driver_df = driver_df.loc[driver_df['rank'] == 2]
caption = """This graph shows the second contribution of all first time
contributors in the specified time period."""
# y_axis_label = 'Second Time Contributors'
# filter by end_date, this is not done with the begin date filtering because a repeat contributor will
# look like drive-by if the second contribution is removed by end_date filtering
mask = (driver_df['yearmonth'] < end_date)
driver_df = driver_df.loc[mask]
# adds all months to driver_df so the lists of dates will include all months and years
driver_df = pd.concat([driver_df, months_df])
actions = ['open_pull_request', 'pull_request_comment', 'commit', 'issue_closed', 'issue_opened',
'issue_comment']
data = pd.DataFrame()
if group_by == 'year':
# x-axis dates
data['dates'] = driver_df[group_by].unique()
for contribution_type in actions:
data[contribution_type] = \
pd.concat([driver_df.loc[driver_df['action'] == contribution_type], months_df]).groupby(
group_by).sum().reset_index()['new_contributors']
# new contributor counts for all actions
data['new_contributor_counts'] = driver_df.groupby([group_by]).sum().reset_index()[
'new_contributors']
# used to format x-axis and graph title
group_by_format_string = "Year"
elif group_by == 'quarter' or group_by == 'month':
# set variables to group the data by quarter or month
if group_by == 'quarter':
date_column = 'quarter'
group_by_format_string = "Quarter"
elif group_by == 'month':
date_column = 'yearmonth'
group_by_format_string = "Month"
# modifies the driver_df[date_column] to be a string with year and month,
# then finds all the unique values
data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit='M'))
# new_contributor counts for each type of action
for contribution_type in actions:
data[contribution_type] = \
pd.concat([driver_df.loc[driver_df['action'] == contribution_type], months_df]).groupby(
date_column).sum().reset_index()['new_contributors']
print(data.to_string())
# new contributor counts for all actions
data['new_contributor_counts'] = driver_df.groupby([date_column]).sum().reset_index()[
'new_contributors']
# if the data set is large enough it will dynamically assign the width, if the data set is too small it
# will by default set to 870 pixel so the title fits
if len(data['new_contributor_counts']) >= 15:
plot_width = 46 * len(data['new_contributor_counts']) + 200
else:
plot_width = 870
# create list of values for data source dict
actions_df_references = []
for action in actions:
actions_df_references.append(data[action])
# created dict with the actions as the keys, and the values as the values from the df
data_source = {actions[i]: actions_df_references[i] for i in range(len(actions))}
data_source.update({'dates': data['dates'], 'New Contributor Counts': data['new_contributor_counts']})
colors = Colorblind[len(actions)]
source = ColumnDataSource(data=data_source)
# create a dict convert an integer number into a word
# used to turn the rank into a word, so it is nicely displayed in the title
numbers = ['Zero', 'First', 'Second']
num_conversion_dict = {}
for i in range(1, len(numbers)):
num_conversion_dict[i] = numbers[i]
number = '{}'.format(num_conversion_dict[rank])
# y_max = 20
# creates plot to hold chart
p = figure(x_range=data['dates'], plot_height=400, plot_width=plot_width,
title='{}: {} {} Time Contributors Per {}'.format(repo_dict[repo_id],
contributor_type.capitalize(), number,
group_by_format_string),
toolbar_location=None, y_range=(0, max(data['new_contributor_counts']) * 1.15))
# max(data['new_contributor_counts'])* 1.15), margin = (0, 0, 0, 0))
vbar = p.vbar_stack(actions, x='dates', width=0.8, color=colors, source=source)
# add total count labels
p.add_layout(LabelSet(x='dates', y='New Contributor Counts', text='New Contributor Counts', y_offset=4,
text_font_size="14pt",
text_color="black", source=source, text_align='center'))
# add legend
legend = Legend(items=[(date, [action]) for (date, action) in zip(actions, vbar)], location=(0, 120),
label_text_font_size="16px")
p.add_layout(legend, 'right')
plot = format_new_cntrb_bar_charts(p, rank, group_by_format_string)
caption_plot = add_caption_to_visualizations(caption, required_contributions, required_time, plot_width)
add_charts_and_captions_to_correct_positions(plot, caption_plot, rank, contributor_type, row_1,
row_2, row_3, row_4)
# puts plots together into a grid
grid = gridplot([row_1, row_2, row_3, row_4])
filename = export_png(grid)
return send_file(filename)
@server.app.route('/{}/contributor_reports/returning_contributors_pie_chart/'.format(server.api_version),
methods=["GET"])
def returning_contributor_pie_chart():
repo_id, start_date, end_date = get_repo_id_start_date_and_end_date()
required_contributions = int(request.args.get('required_contributions', 4))
required_time = int(request.args.get('required_time', 365))
input_df = new_contributor_data_collection(repo_id=repo_id, required_contributions=required_contributions)
needed_columns = ['cntrb_id', 'created_at', 'month', 'year', 'repo_id', 'repo_name', 'login', 'action',
'rank', 'yearmonth', 'new_contributors', 'quarter']
input_df = filter_data(input_df, needed_columns)
if len(input_df) == 0:
return Response(response="There is no data for this repo, in the database you are accessing",
mimetype='application/json',
status=200)
repo_dict = {repo_id: input_df.loc[input_df['repo_id'] == repo_id].iloc[0]['repo_name']}
drive_by_df, repeats_df = compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions,
required_time, start_date)
print(repeats_df.to_string())
driver_df = pd.concat([drive_by_df, repeats_df])
# filter df by end date
mask = (driver_df['yearmonth'] < end_date)
driver_df = driver_df.loc[mask]
# first and second time contributor counts
drive_by_contributors = driver_df.loc[driver_df['type'] == 'drive_by'].count()['new_contributors']
repeat_contributors = driver_df.loc[driver_df['type'] == 'repeat'].count()['new_contributors']
# create a dict with the # of drive-by and repeat contributors
x = {'Drive_By': drive_by_contributors,
'Repeat': repeat_contributors}
# turn dict 'x' into a dataframe with columns 'contributor_type', and 'counts'
data = pd.Series(x).reset_index(name='counts').rename(columns={'index': 'contributor_type'})
data['angle'] = data['counts'] / data['counts'].sum() * 2 * pi
data['color'] = ('#0072B2', '#E69F00')
data['percentage'] = ((data['angle'] / (2 * pi)) * 100).round(2)
# format title
title = "{}: Number of Returning " \
"Contributors out of {} from {} to {}" \
.format(repo_dict[repo_id], drive_by_contributors + repeat_contributors, start_date, end_date)
title_text_font_size = 18
plot_width = 850
# sets plot_width to width of title if title is wider than 850 pixels
if len(title) * title_text_font_size / 2 > plot_width:
plot_width = int(len(title) * title_text_font_size / 2)
# creates plot for chart
p = figure(plot_height=450, plot_width=plot_width, title=title,
toolbar_location=None, x_range=(-0.5, 1.3), tools='hover', tooltips="@contributor_type",
margin=(0, 0, 0, 0))
p.wedge(x=0.87, y=1, radius=0.4, start_angle=cumsum('angle', include_zero=True),
end_angle=cumsum('angle'), line_color=None, fill_color='color',
legend_field='contributor_type', source=data)
start_point = 0.88
for i in range(0, len(data['percentage'])):
# percentages
p.add_layout(Label(x=-0.17, y=start_point + 0.13 * (len(data['percentage']) - 1 - i),
text='{}%'.format(data.iloc[i]['percentage']),
render_mode='css', text_font_size='15px', text_font_style='bold'))
# contributors
p.add_layout(Label(x=0.12, y=start_point + 0.13 * (len(data['percentage']) - 1 - i),
text='{}'.format(data.iloc[i]['counts']),
render_mode='css', text_font_size='15px', text_font_style='bold'))
# percentages header
p.add_layout(
Label(x=-0.22, y=start_point + 0.13 * (len(data['percentage'])), text='Percentages', render_mode='css',
text_font_size='15px', text_font_style='bold'))
# legend header
p.add_layout(
Label(x=-0.43, y=start_point + 0.13 * (len(data['percentage'])), text='Category', render_mode='css',
text_font_size='15px', text_font_style='bold'))
# contributors header
p.add_layout(
Label(x=0, y=start_point + 0.13 * (len(data['percentage'])), text='# Contributors', render_mode='css',
text_font_size='15px', text_font_style='bold'))
p.axis.axis_label = None
p.axis.visible = False
p.grid.grid_line_color = None
p.title.align = "center"
p.title.text_font_size = "{}px".format(title_text_font_size)
p.legend.location = "center_left"
p.legend.border_line_color = None
p.legend.label_text_font_style = 'bold'
p.legend.label_text_font_size = "15px"
plot = p
caption = """This pie chart shows the percentage of new contributors who were fly-by or repeat contributors.
Fly by contributors are contributors who make less than the required {0} contributions in {1} days.
New contributors are individuals who make their first contribution in the specified time period.
Repeat contributors are contributors who have made {0} or more contributions in {1} days and their
first contribution is in the specified time period."""
caption_plot = add_caption_to_visualizations(caption, required_contributions, required_time, plot_width)
# put graph and caption plot together into one grid
grid = gridplot([[plot], [caption_plot]])
filename = export_png(grid)
return send_file(filename)
@server.app.route('/{}/contributor_reports/returning_contributors_stacked_bar/'.format(server.api_version),
methods=["GET"])
def returning_contributor_stacked_bar():
repo_id, start_date, end_date = get_repo_id_start_date_and_end_date()
group_by = str(request.args.get('group_by', "quarter"))
required_contributions = int(request.args.get('required_contributions', 4))
required_time = int(request.args.get('required_time', 365))
input_df = new_contributor_data_collection(repo_id=repo_id, required_contributions=required_contributions)
months_df = months_data_collection(start_date=start_date, end_date=end_date)
needed_columns = ['cntrb_id', 'created_at', 'month', 'year', 'repo_id', 'repo_name', 'login', 'action',
'rank', 'yearmonth', 'new_contributors', 'quarter']
input_df = filter_data(input_df, needed_columns)
if len(input_df) == 0:
return Response(response="There is no data for this repo, in the database you are accessing",
mimetype='application/json',
status=200)
repo_dict = {repo_id: input_df.loc[input_df['repo_id'] == repo_id].iloc[0]['repo_name']}
drive_by_df, repeats_df = compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions,
required_time, start_date)
driver_df = pd.concat([drive_by_df, repeats_df, months_df])
# filter by end_date
mask = (driver_df['yearmonth'] < end_date)
driver_df = driver_df.loc[mask]
# create df to hold data needed for chart
data = | pd.DataFrame() | pandas.DataFrame |
from cbs import cbs
import pandas as pd
import pytest
#Get the CBS RB dataframe
@pytest.fixture(scope="module")
def RB():
return cbs.Cbs().parser('RB')
def test_cbs_rb_columns(RB):
assert RB.columns.tolist() == ['Name', 'pass_att', 'pass_cmp', 'pass_yds', 'pass_td', 'intercept', 'rate', 'rush_att',
'rush_yds', 'rush_avg', 'rush_td', 'rec_tgt', 'recept', 'rec_yds', 'rec_avg', 'rec_td', '2pt',
'fum_lost']
#Test top and bottom names
def test_cbs_rb_projection1(RB):
assert type(RB.iloc[0].Name) == str
def test_cbs_rb_projection2(RB):
assert type(RB.iloc[1].Name) == str
def test_cbs_rb_projection3(RB):
assert type(RB.iloc[120].Name) == str
def test_cbs_rb_projection4(RB):
assert type(RB.iloc[121].Name) == str
#Test top and bottom Stats
def test_cbs_rb_projection5(RB):
assert | pd.to_numeric(RB.iloc[0].rush_td, errors='ignore') | pandas.to_numeric |
#!/usr/bin/env python
"""
I use this script to determine the ratio of measurements of fluxes compared to
the number of temperature measurements for FLUXNET and LaThuille sites.
This is done for latent heat, sensible heat and NEE. I focus on
extreme temperatures (lower and upper 2.2% of the temperature distribution
of each site )
"""
__author__ = "<NAME>"
__version__ = "1.0 (25.10.2018)"
__email__ = "<EMAIL>"
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import pandas as pd
import glob
import xarray as xr
import os
def main(files_met, files_flux, ofname1, ofname2, ofname3, ofname4, plot_dir):
# empty lists to add data in for the table/figures
results_LH = []
results_SH = []
results_NEE = []
lons = []
lats = []
# creating dataframes for barplots of global temperature distribution
df_temp = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import urllib.request
import numpy as np
import shapefile
from datetime import datetime
from zipfile import ZipFile
import pandasql as ps
import requests
import json
import pkg_resources
def softmax(x):
if np.max(x) > 1:
e_x = np.exp(x/np.max(x))
else:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
## getProvinceBoundaryBox function is to get the cordinate details from Mapbox API for ITALY
## Parameter Needed - Province Name
def getProvinceBoundaryBox(provinceName):
Place_Details = requests.get(
'http://api.mapbox.com/geocoding/v5/mapbox.places/' + provinceName + '%20province%20Italy.json?access_token=<KEY>').json()[
'features']
for eachPlace in Place_Details:
try:
if eachPlace['context'][0]['text'] == 'Italy' or eachPlace['context'][1]['text'] == 'Italy':
getBbox = eachPlace['bbox']
except:
continue
return getBbox
# The below function used to get the USA Patient Data Automatically from HARVARD DATABASE COVID Patient Database and will create a timeseries patient file along with population of the Area at county along with a USA County file
## Parameter Needed - Target Directory to save the File
def fetch_us_patientdata(tgtdir):
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/7NWUDK'
urllib.request.urlretrieve(url,tgtdir+'/us_county_confirmed_cases.tab')
latest_data = pd.read_csv(tgtdir+'/us_county_confirmed_cases.tab',sep='\t')
allcols = list(latest_data.columns)
datecols = allcols[allcols.index('HHD10')+1:]
latest_data = latest_data[['COUNTY', 'NAME']+datecols]
datecolsmod=[datetime.strptime(i,'%m/%d/%Y').strftime('%Y%m%d') for i in datecols]
latest_data.columns = ['cfips', 'county']+datecolsmod
latest_data = latest_data.melt(id_vars=['cfips', 'county'], var_name='data_date', value_name='no_pat')
latest_data['county']=latest_data['county'].apply(lambda x : x.split(' County')[0])
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/OFVFPY'
urllib.request.urlretrieve(url,tgtdir+'/COUNTY_MAP.zip')
zip = ZipFile(tgtdir+'/COUNTY_MAP.zip')
zip.extractall(tgtdir)
sf = shapefile.Reader(tgtdir+"/CO_CARTO")
shape_df = | pd.DataFrame() | pandas.DataFrame |
import concurrent.futures
import pandas as pd
from equities import static as STATIC
from solaris.api import Client as SolarisClient
from pytrends.request import TrendReq as GoogleTrendClient
import yfinance as YahooFinanceClient
__version__ = STATIC.__version__
__author__ = STATIC.__author__
class Client(object):
""" equities composed clients """
def __init__(self,verbose=False):
try:
# sets verbosity level and prints begin init text
self.verbose = verbose; STATIC.initialize(self.verbose)
# connects clients
self._sol = SolarisClient(verbose=self.verbose)
self._pytrends = GoogleTrendClient(hl='en-US', tz=360)
# sets stock identification variables
self.tickers = list(set(self._sol.cik_to_ticker.values()))
self.names = list(set(self._sol.cik_to_name.values()))
self.ciks = list(set(self._sol.cik_to_name.keys()))
# prints end init text
messages = self._sol._fetch_equities_messages()
STATIC.initialized(self.verbose,messages,len(self.ciks))
except Exception as e:
STATIC.failed(e)
def __len__(self):
return len(self.ciks)
def __str__(self):
return str(self._sol.name_to_cik)
def _query_y_finance(self,cik_or_ticker):
""" returns a yfinance Ticker object by quering YahooFinance
for a given cik or ticker
"""
try:
cik = self._convert_to_cik(cik_or_ticker)
ticker = self._sol.cik_to_ticker[cik]
except:
ticker = cik_or_ticker
return YahooFinanceClient.Ticker(ticker)
def _convert_to_cik(self,cik_or_ticker):
""" returns a cik from a query/ticker into the corresponding
cik number with cleaning.
"""
symbol = cik_or_ticker.lower().replace(' ','')
try:
if symbol in self.tickers:
return self._sol.ticker_to_cik[symbol]
elif str(int(cik_or_ticker)) in self.ciks:
return str(int(cik_or_ticker))
else:
print('Error Could not Convert: %s'%cik_or_ticker)
return cik_or_ticker
except:
return cik_or_ticker
def _set_verbose(self,verbose):
"""sets universes' stdout level of verbosity"""
self.verbose = verbose
self._sol.verbose = verbose
def _invert_dict(self,to_invert):
"""inverts an arbitrary python dictionary"""
return {v:k for k,v in to_invert.items()}
def cik_to_name(self):
"""returns a dict mapping ciks to company names"""
return dict(zip(
self._sol.cik_to_name.keys(),self._sol.cik_to_name.values()
))
def cik_to_ticker(self):
"""returns a dict mapping ciks to tickers"""
return dict(zip(
self._sol.cik_to_ticker.keys(),self._sol.cik_to_ticker.values()
))
def ticker_to_cik(self):
"""returns a dict mapping tickers to ciks"""
return self._invert_dict(self.cik_to_name)
def name_to_cik(self):
"""returns a dict mapping names to ciks"""
return self._invert_dict(self.cik_to_name)
def prices(self,cik_or_ticker,period='max'):
"""returns a price dataframe for the given cik or tickers and period.
period must be a string contained in the following list:
['1d','5d','1mo','3mo','6mo','1y','2y','5y','10y','ytd','max'].
"""
try:
return self._query_y_finance(cik_or_ticker).history(period=period)
except Exception as e:
return pd.DataFrame()
def actions(self,cik_or_ticker):
"""returns a corporate actions dataframe for the given cik or tickers"""
try:
return self._query_y_finance(cik_or_ticker).actions
except Exception as e:
return pd.DataFrame()
def dividends(self,cik_or_ticker):
"""returns a dividends dataframe for the given cik or tickers"""
try:
return self._query_y_finance(cik_or_ticker).dividends
except Exception as e:
return pd.DataFrame()
def splits(self,cik_or_ticker):
"""returns a splits dataframe for the given cik or tickers"""
try:
return self._query_y_finance(cik_or_ticker).splits
except Exception as e:
return pd.DataFrame()
def major_holders(self,cik_or_ticker):
"""returns a dataframe of major holders for the given cik or ticker"""
try:
return self._query_y_finance(cik_or_ticker).major_holders
except Exception as e:
return pd.DataFrame()
def institutional_holders(self,cik_or_ticker):
"""returns a dataframe of instiutional holders for the given cik or ticker"""
try:
return self._query_y_finance(cik_or_ticker).institutional_holders
except Exception as e:
return pd.DataFrame()
def events(self,cik_or_ticker):
"""returns a dataframe of earnings events for the given cik or ticker"""
try:
return self._query_y_finance(cik_or_ticker).calendar
except Exception as e:
return pd.DataFrame()
def recommendations(self,cik_or_ticker):
"""returns a dataframe buy/sell side recommendations for the given cik or ticker"""
try:
return self._query_y_finance(cik_or_ticker).recommendations
except Exception as e:
return pd.DataFrame()
def esg(self,cik_or_ticker):
"""returns a dataframe esg metrics for the given cik or ticker"""
try:
return self._query_y_finance(cik_or_ticker).sustainability
except Exception as e:
return pd.DataFrame()
def financial_statement(self,cik_or_ticker,kind):
"""returns a financial statement dataframe for the given cik or ticker
and kind of statement. kind must be a string contained in the following
list: ['income','balance','cash','equity'].
"""
try:
cik = self._convert_to_cik(cik_or_ticker)
return self._sol.financial_statement(cik,kind,df=True)
except Exception as e:
#print(e)
return | pd.DataFrame() | pandas.DataFrame |
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC,LinearSVC
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.preprocessing import Imputer,Normalizer,scale
from sklearn.cross_validation import train_test_split,StratifiedKFold
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
import numpy as np
import pandas as pd
train=pd.read_csv('train.csv');
test=pd.read_csv('test.csv')
full=train.append(test,ignore_index=True)
titanic=full[:891]
del train,test
print('Datasets:','full',full.shape,'titanic:',titanic.shape)
print(titanic.head())
print(titanic.describe())
sex=pd.Series(np.where(full['Sex']=='male',1,0),name='Sex')
embarked=pd.get_dummies(full.Embarked,prefix='Embarked')
print(embarked.head())
pclass=pd.get_dummies(full.Pclass,prefix='Pclass')
print(pclass.head())
#fill in the missing values
imputed=pd.DataFrame()
imputed['Age']=full.Age.fillna(full.Age.mean())
imputed['Fare']=full.Fare.fillna(full.Fare.mean())
print(imputed.head())
title=pd.DataFrame()
title['Title']=full['Name'].map(lambda name:name.split(',')[1].split('.')[0].strip())
title= | pd.get_dummies(title.Title) | pandas.get_dummies |
import pandas as pd
import numpy as np
import pytest
from .arcsine import main
def test_numeric():
assert main(data=0.0)["result"] == pytest.approx(0.0, rel=1e-5)
def test_series():
pd.testing.assert_series_equal(
main(
data=pd.Series(
{
"2019-08-01T15:20:12": 0.0,
"2019-08-01T15:44:12": 1.0,
"2019-08-03T16:20:15": 0.0,
"2019-08-05T12:00:34": -1.0,
"2019-08-05T12:00:55": 0.0,
}
)
)["result"],
pd.Series(
{
"2019-08-01T15:20:12": 0.0,
"2019-08-01T15:44:12": np.pi / 2,
"2019-08-03T16:20:15": 0.0,
"2019-08-05T12:00:34": -np.pi / 2,
"2019-08-05T12:00:55": 0.0,
}
),
)
def test_none():
pd.testing.assert_series_equal(
main(
data=pd.Series(
{
"2019-08-01T15:20:12": 0.0,
"2019-08-01T15:44:12": None,
"2019-08-03T16:20:15": np.nan,
}
)
)["result"],
pd.Series(
{
"2019-08-01T15:20:12": 0.0,
"2019-08-01T15:44:12": np.nan,
"2019-08-03T16:20:15": np.nan,
}
),
)
def test_df():
pd.testing.assert_frame_equal(
main(
data=pd.DataFrame(
{"a": [0.0, 1.0, 0.0, -1.0], "b": [0.0, 0.5, 0.0, -0.5]},
index=[
"2019-08-01T15:20:12",
"2019-08-01T15:44:12",
"2019-08-03T16:20:15",
"2019-08-05T12:00:34",
],
)
)["result"],
pd.DataFrame(
{
"a": [0.0, np.pi / 2, 0.0, -np.pi / 2],
"b": [0.0, np.pi / 6, 0.0, -np.pi / 6],
},
index=[
"2019-08-01T15:20:12",
"2019-08-01T15:44:12",
"2019-08-03T16:20:15",
"2019-08-05T12:00:34",
],
),
)
def test_empty_series():
assert main(data=pd.Series(dtype=float))["result"].empty
def test_empty_df():
assert main(data= | pd.DataFrame(dtype=float) | pandas.DataFrame |
import anndata as ad
import logging
import numpy as np
import os
import time
import pandas as pd
import yaml
from pathlib import Path
from collections import namedtuple
from const import PATH, OUT_PATH
#logging.basicConfig(level=logging.INFO)
try:
import git
except:
pass
def get_tasks(phase):
assert phase in ['phase1v2','phase2']
tasks = [
"GEX2ADT",
"ADT2GEX",
"GEX2ATAC",
"ATAC2GEX"
]
task2name = {
"ADT2GEX":f"openproblems_bmmc_cite_{phase}_mod2",
"GEX2ADT":f"openproblems_bmmc_cite_{phase}_rna",
"ATAC2GEX":f"openproblems_bmmc_multiome_{phase}_mod2",
"GEX2ATAC":f"openproblems_bmmc_multiome_{phase}_rna"
}
return tasks, task2name
def get_y_dim(data_path):
if '_cite_' in data_path:
if 'mod2' in data_path:
return 13953,"ADT2GEX"
elif 'rna' in data_path:
return 134,"GEX2ADT"
else:
assert 0
elif '_multiome_' in data_path:
if 'mod2' in data_path:
return 13431,"ATAC2GEX"
elif 'rna' in data_path:
return 10000,"GEX2ATAC"
else:
assert 0
def get_par(path,phase):
par = {
"input_solution" : f"{path}/datasets_{phase}/predict_modality",
"input_prediction" : f"{path}/predictions/predict_modality",
}
return par
def get_train_test_paths(name,phase,path = "./output"):
par = get_par(path,phase)
train_mod1 = f"{par['input_solution']}/{name}/{name}.censor_dataset.output_train_mod1.h5ad"
train_mod2 = train_mod1.replace('mod1','mod2')
test_mod1 = train_mod1.replace('train','test')
test_mod2 = test_mod1.replace('mod1','mod2')
assert os.path.exists(train_mod1) and os.path.exists(train_mod2)
if phase == 'phase1v2':
assert os.path.exists(test_mod1) and os.path.exists(test_mod2)
return train_mod1,train_mod2,test_mod1,test_mod2
def get_data_paths(task,phase,data_type='train_test',path='./output'):
assert data_type in ['train_test','gt_pred']
tasks, task2name = get_tasks(phase)
name = task2name[task]
if data_type == 'train_test':
return get_train_test_paths(name,phase,path)
else:
return get_gt_pred_paths(name,path)
def get_gt_pred_paths(name,path = "./output"):
par = get_par(path,'phase1v2')
gt = f"{par['input_solution']}/{name}/{name}.censor_dataset.output_test_mod2.h5ad"
pred = f"{par['input_prediction']}/{name}/{name}.method.output.h5ad"
print(gt)
print(pred)
assert os.path.exists(gt) and os.path.exists(pred)
return gt, pred
def eval_one_file(name):
gt, pred = get_gt_pred_paths(name)
logging.info("Reading solution file")
ad_sol = ad.read_h5ad(gt)
logging.info("Reading prediction file")
ad_pred = ad.read_h5ad(pred)
logging.info("Check prediction format")
if ad_sol.uns["dataset_id"] != ad_pred.uns["dataset_id"]:
raise ValueError("Prediction and solution have differing dataset_ids")
if ad_sol.shape != ad_pred.shape:
raise ValueError("Dataset and prediction anndata objects should have the same shape / dimensions.")
logging.info("Computing MSE metrics")
tmp = ad_sol.X - ad_pred.X
rmse = np.sqrt(tmp.power(2).mean())
mae = np.abs(tmp).mean()
return rmse
def eval_all():
start = time.time()
tasks, task2name = get_tasks(phase='phase1v2')
s = 0
res = {}
for task in tasks:
name = task2name[task]
score = eval_one_file(name)
s += score
res[task] = score
res['overall'] = s/len(tasks)
print_res(res)
duration = time.time() - start
logging.critical(f" Total time: {duration:.1f} seconds")
def print_res(res):
for i,j in res.items():
logging.critical(f" {i} {j:.4f}")
def check_column_mean_var_all(path='./output',phase='phase2'):
tasks, task2name = get_tasks(phase=phase)
if phase == 'phase2':
names = ['train_mod1', 'train_mod2']
else:
names = ['train_mod1', 'train_mod2', 'test_mod1', 'test_mod2']
logging.info("[min, max, mean]")
res = []
ms = []
ns = []
for task in tasks:
data_names = get_data_paths(task,phase=phase,path=path)
logging.info(f"task:{task}")
for d,n in zip(data_names, names):
logging.info(n)
data = ad.read_h5ad(d)
msg,dd = check_column_mean_var(data)
logging.info('\n'+msg)
res.append(dd)
ms.append(task)
ns.append(n)
dg = pd.DataFrame({'task':ms,'type':ns})
res = np.array(res)
c1 = ['mu','var']
c2 = ['min','max','mean']
df = pd.DataFrame(res,columns = [f'{i}_{j}' for i in c1 for j in c2]+['rows','cols'])
df = | pd.concat([dg,df],axis=1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import duckdb
import pandas as pd
import numpy
# Join from pandas not matching identical strings #1767
class TestIssue1767(object):
def test_unicode_join_pandas(self, duckdb_cursor):
A = pd.DataFrame({"key": ["a", "п"]})
B = pd.DataFrame({"key": ["a", "п"]})
con = duckdb.connect(":memory:")
arrow = con.register("A", A).register("B", B)
q = arrow.query("""SELECT key FROM "A" FULL JOIN "B" USING ("key") ORDER BY key""")
result = q.df()
d = {'key': ["a", "п"]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 4 16:15:31 2017
This creates the CollegePrograms Dashboard. It calls the Career Bridge class and Matches it to a SOC based on the listed occupation, industry, keywords and lookups.
This requires selenium.
@author: carrie
"""
from selenium import webdriver
import pandas as pd
import requests, sqlalchemy, datetime
import numpy as np
from pyvirtualdisplay import Display
import os
key = 'fill in'
from web_scrapers.CareerBridgeClass import CareerBridge
from DatabaseConnection import DBConnection
from Email import SendEmail
class MatchConstructionSOCtoCollegePrograms:
'''This will call the Career Bridge web scraper and match the items to SOC Codes'''
#Todays Date
now = datetime.datetime.now()
formatTime = now.strftime("%Y-%m-%d %H:%M")
formatDate = now.strftime("%Y-%m-%d")
formatHourMin = now.strftime("%H:%M")
def callBrowser(self):
ubuntu = False
browser = ""
if ubuntu:
display = Display(visible=0, size=(800, 800))
display.start()
path_to_chromedriver = "/usr/bin/chromedriver"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
browser = webdriver.Chrome(path_to_chromedriver, chrome_options=chrome_options)
urlTechCollege = 'http://www.careerbridge.wa.gov/Search_Program.aspx?cmd=txt&adv=true&txt='
browser.get(urlTechCollege)
browser.implicitly_wait(10)
else:
#If windows use the following
path_to_chromedriver = r"\chromedriver.exe"
browser = webdriver.Chrome(executable_path = path_to_chromedriver )
urlTechCollege = 'http://www.careerbridge.wa.gov/Search_Program.aspx?cmd=txt&adv=true&txt='
browser.get(urlTechCollege)
return browser
def callCareerBridge(self):
#Open Intial Website with Selenium and Chrome, you will need to upate this and get appropriate Linux versions for Ec2
browserCB = self.callBrowser()
#Call the HTML Scraper for College Programs
technicalCollegeResult = CareerBridge()
techCollegePrograms = pd.DataFrame()
occupationsPerProgam = pd.DataFrame()
#Try to do call the scraper for Career Bridge
try:
technicalCollegeResult.type = "technicalCollege"
techResults = technicalCollegeResult.href_per_school(browserCB)
cleaned_pages, occupation_pages = technicalCollegeResult.download_tech_college_programs(techResults, browserCB)
#Concatenate dataframes into final exports for csv
dir_path = os.path.dirname(os.path.realpath(__file__))
careerBridge_CollegePrograms = os.path.join(os.path.sep, dir_path, 'backups_hardcoded','CareerBridge_CollegePrograms.csv')
careerBridge_CollegePrograms_Occupations = os.path.join(os.path.sep, dir_path, 'backups_hardcoded','CareerBridge_CollegePrograms_Occupations.csv')
technicalCollegeResult.merge_export_to_csv(techResults, cleaned_pages, careerBridge_CollegePrograms)
technicalCollegeResult.export_career_data_to_csv(occupation_pages, careerBridge_CollegePrograms_Occupations)
#Hardcoded export
techCollegePrograms = pd.read_csv(careerBridge_CollegePrograms, encoding = "ISO-8859-1")
occupationsPerProgam = pd.read_csv(careerBridge_CollegePrograms_Occupations, encoding = "ISO-8859-1")
#Log success
df = pd.DataFrame([['CareerBridge', 'College Programs Dashboard', '1 of 1', 'Career Bridge Web Scraper College data', self.formatDate, self.formatHourMin, 'Successful Download']])
df.to_csv('Succesful_Download_Log.csv', mode='a', header=False, index=False)
#If the scraper can not be called log the issue
except:
log = open("Error_Data.txt","a")
log.write("Error calling CareerBridge webscrapper. Location:CollegePrograms.py Date: " + self.formatTime + "\n")
df = pd.DataFrame([['CareerBridge', 'College Programs Dashboard', '1 of 1', 'Career Bridge Web Scraper College data', 'Will use last dowload', 0, 'FAILED Download']])
df.to_csv('Succesful_Download_Log.csv', mode='a', header=False, index=False)
browserCB.close()
final = self.identify_construction_programs(techCollegePrograms, occupationsPerProgam)
return final
#Check to see if any of the addresses are already geocoded
def checkAddressLookup(self,results):
#Remove stuff in parenthesis at the end from the address
results['address'] = results['address'].str.replace(r"\(.*\)","").str.strip()
#Pull in previously geocoded addresses
dir_path = os.path.dirname(os.path.realpath(__file__))
geoCodedAddressesLookup = os.path.join(os.path.sep, dir_path, 'LookupTables','GeoCodedAddressesLookup.csv')
previousGeocoded = pd.read_csv(geoCodedAddressesLookup, encoding = "ISO-8859-1").dropna(subset=['address'])
prevAddressesLat = previousGeocoded.set_index('address')['latitude'].to_dict()
prevAddressesLong = previousGeocoded.set_index('address')['longitude'].to_dict()
prevAddressesCity = previousGeocoded.set_index('address')['city'].to_dict()
results['latitude'] = results['address'].map(prevAddressesLat)
results['longitude'] = results['address'].map(prevAddressesLong)
results['city'] = results['address'].map(prevAddressesCity)
#Find those We Still need to Geocode
notGeocoded = results.loc[ (results['longitude'].isnull()) & ( results['address'].notnull() ) , ]
allreadyGeocoded = results.loc[ (results['longitude'].notnull()) | ( results['address'].isnull() ) , ]
#Geocode those that have an address but are not yet in the lookuptable
newGeocoded = self.geocodeDataFrame(notGeocoded)
#Now update the results with all the addresses you found
newGeocodedLat = newGeocoded.set_index('address')['latitude'].to_dict()
newGeocodedLong = newGeocoded.set_index('address')['longitude'].to_dict()
newGeocodedCity = newGeocoded.set_index('address')['city'].to_dict()
notGeocoded['latitude'] = notGeocoded['address'].map(newGeocodedLat)
notGeocoded['longitude'] = notGeocoded['address'].map(newGeocodedLong)
notGeocoded['city'] = notGeocoded['address'].map(newGeocodedCity)
allResultsGeocoded = pd.concat([notGeocoded, allreadyGeocoded])
#Update the Geocode LookupTable, Make sure to only pass Unique Addresses
allGeocoded = pd.concat([newGeocoded, previousGeocoded])
saveNewGeocodeLookup = allGeocoded.loc[ :, ['address', 'latitude', 'longitude', 'city']]
saveNewGeocodeLookup.to_csv(geoCodedAddressesLookup)
#If the geocoding is way off override
allResultsGeocoded.loc[allResultsGeocoded['latitude'] <= 45.0, 'latitude'] = 0
allResultsGeocoded.loc[allResultsGeocoded['longitude'] >= -116.0, 'longitude'] = 0
allResultsGeocoded.loc[(allResultsGeocoded['longitude'] >= -116.0) & (allResultsGeocoded['address'] == 'S. 16th Ave. & Nob Hill Blvd, Yakima, WA 98902'), 'city'] = 'Yakima'
return allResultsGeocoded
#Find those that are not geocoded, drop duplicates, call the address geocoder, then update the csv geocode lookup for the next time
def geocodeDataFrame(self, notGeocoded):
#Drop Dupe Addresses
notGeocoded = notGeocoded.drop_duplicates(['address'], keep='first')
if notGeocoded.empty:
print("Nothing new to geocode")
else:
#Geocode and put results in a list
notGeocoded['place'] = notGeocoded['address'].apply(self.geocodeAddress)
newAddresses = pd.DataFrame(notGeocoded.place.values.tolist(), index=notGeocoded.index)
#Parse the returned place info list into the appropriate columns
notGeocoded[['latitude','longitude', 'city', 'place_id', 'addressfound']] = newAddresses
return notGeocoded
def geocodeAddress(self, address):
google_url = "https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}".format(address, key)
response_geocode = requests.get(google_url).json()
lat,lng,place_id, city = 0,0,"None", ""
#if results were returned take the first one
if len(response_geocode['results']) > 0:
r = response_geocode['results'][0]
findcity = r[u'address_components']
for f in findcity:
if 'locality' in f[u'types']:
city = f[u'long_name']
lat = r[u'geometry'][u'location'][u'lat']
lng = r[u'geometry'][u'location'][u'lng']
place_id = r[u'place_id']
#print([lat, lng, city, place_id, address])
return [lat, lng, city, place_id, address]
#Identify Construction in College Programs
def identify_construction_programs(self, df_cleaned, occ_df):
df = df_cleaned.dropna(how='all')
df['match_construction_description'] = 'No'
df['match_construction_name'] = 'No'
#print(df.head(5))
#First Tag
#If construction in description or name add field identifying a match to construction occupations
#df['description'] = df['description'].fillna("No")
df.loc[df['program'].str.contains('Construction', na=False), 'match_construction_name'] = "Yes"
df.loc[df['description'].str.contains('construction', na=False), 'match_construction_description'] = "Yes"
#Second Tag
#Pull in SOC Lookup Table to Match the SOC to Occupation List per Program
dir_path = os.path.dirname(os.path.realpath(__file__))
sOCCodeLookup = os.path.join(os.path.sep, dir_path, 'LookupTables','SOCCodeLookup.csv')
socLookUp = pd.read_csv(sOCCodeLookup, encoding="utf-8", index_col=0)
#soc = socLookUp.set_index('SOCName')['SOCID'].to_dict()
soc = socLookUp.ix[:,0].to_dict()
#construction = socLookUp.set_index('SOCName')['Construction'].to_dict()
construction = socLookUp.ix[:,1].to_dict()
occ_df['soc_codes_by_occupation'] = occ_df['occupation'].map(soc)
occ_df['match_construction_occ'] = occ_df['occupation'].map(construction)
occ_df['match_construction_occ'] = occ_df['match_construction_occ'].fillna("No")
#Remove any programs from the match through the description based on Program Key Words that We Do Not want
dir_path = os.path.dirname(os.path.realpath(__file__))
remove_NonConstruction_KeyWords = os.path.join(os.path.sep, dir_path, 'LookupTables','Remove_NonConstruction_KeyWords.csv')
removeLookup = pd.read_csv(remove_NonConstruction_KeyWords)
df['remove_keyword'] = df['program_name'].apply(lambda x: [key for key in removeLookup['remove'] if key in str(x) ])
df['remove_keyword'] = df['remove_keyword'].str[0]
df['remove_keyword'] = df['remove_keyword'].fillna("None")
df['match_construction_final'] = "None"
df.loc[ df['remove_keyword'] != "None" , 'match_construction_final'] = "No"
#Pull in Keyword Search in order to Associate Programs to SOCS and Get the First Best Match to the Program Name
cAIConstructionKeywordLookup = os.path.join(os.path.sep, dir_path, 'LookupTables','CAIConstructionKeywordLookup.csv')
keywordsDetailed = pd.read_csv(cAIConstructionKeywordLookup)
df['program_keyword_join'] = df['program'].str.lower()
#df['match_keyword'] = df['program_keyword_join'].apply(lambda x: difflib.get_close_matches(x, keywordsDetailed['keyword1'], 1))
df['match_keyword'] = df['program_keyword_join'].apply(lambda x: [key for key in keywordsDetailed['keyword_if_matched'] if key in str(x) ])
df['match_keyword'] = df['match_keyword'].str[0]
#If there is a keyword match but no mention of construction in the name or description remove keyword
df['match_keyword'] = df['match_keyword'].fillna("None")
df.loc[ (df['match_keyword'] != "None") & ((df['match_construction_description'] == "No") & (df['match_construction_name'] == "No")), 'match_keyword'] = "None"
#Add Soc Codes based on Key Words
soc_keyword = keywordsDetailed.set_index('keyword_if_matched')['soc_codes'].to_dict()
df['soc_codes_by_keyword'] = df['match_keyword'].map(soc_keyword)
df['soc_codes_by_keyword'] = df['soc_codes_by_keyword'].fillna("None")
#occ_df.to_csv("FINAL_CollegePrograms_Occupations.csv")
#Filter One, Filter the ocupation dataframe by only those occupations that are construction
occ_construction = occ_df.loc[(occ_df.match_construction_occ == "Yes")]
occ_construction = occ_construction[['program_id', 'soc_codes_by_occupation','match_construction_occ']]
#Set the index to the program id, format column order, and remove duplicates from occupations, Keep the First that Matches The SOCS We Want
occ_construction_i = occ_construction.set_index('program_id')
df = df.set_index('program_id')
occupation_con = occ_construction_i[~occ_construction_i.index.duplicated(keep='first')]
#print(occupation_con)
#Pull in Occupation SOCS for The SOCS We Don't Want that Do Not Match Contruction, to Use to as a Filter Later
occ_df = occ_df.loc[(occ_df.match_construction_occ == "No")]
occ_df['soc_codes_by_occupation_not_construction'] = occ_df['soc_codes_by_occupation']
occ_df = occ_df[['program_id', 'soc_codes_by_occupation_not_construction']]
occ_df = occ_df.set_index('program_id')
occupation_not_matched = occ_df[~occ_df.index.duplicated(keep='first')]
occupation_full = | pd.concat([ occupation_con,occupation_not_matched], axis=1) | pandas.concat |
from backlight.trades import trades as module
import pytest
import pandas as pd
@pytest.fixture
def symbol():
return "usdjpy"
@pytest.fixture
def trades(symbol):
data = [1.0, -2.0, 1.0, 2.0, -4.0, 2.0, 1.0, 0.0, 1.0, 0.0]
index = pd.date_range(start="2018-06-06", freq="1min", periods=len(data))
trades = []
for i in range(0, len(data), 2):
trade = pd.Series(index=index[i : i + 2], data=data[i : i + 2], name="amount")
trades.append(trade)
trades = module.make_trades(symbol, trades)
return trades
def test_trades_ids(trades):
expected = [0, 1, 2, 3, 4]
assert trades.ids == expected
def test_trades_amount(trades):
data = [1.0, -2.0, 1.0, 2.0, -4.0, 2.0, 1.0, 0.0, 1.0, 0.0]
index = pd.date_range(start="2018-06-06", freq="1min", periods=len(data))
expected = pd.Series(data=data, index=index, name="amount")
pd.testing.assert_series_equal(trades.amount, expected)
def test_trades_get_any(trades):
data = [1.0, -2.0, -4.0, 2.0]
index = [
pd.Timestamp("2018-06-06 00:00:00"),
pd.Timestamp("2018-06-06 00:01:00"),
pd.Timestamp("2018-06-06 00:04:00"),
pd.Timestamp("2018-06-06 00:05:00"),
]
expected = pd.Series(data=data, index=index, name="amount")
result = trades.get_any(trades.index.minute.isin([0, 4, 5]))
pd.testing.assert_series_equal(result.amount, expected)
def test_trades_get_all(trades):
data = [-4.0, 2.0]
index = [pd.Timestamp("2018-06-06 00:04:00"), pd.Timestamp("2018-06-06 00:05:00")]
expected = pd.Series(data=data, index=index, name="amount")
result = trades.get_all(trades.index.minute.isin([0, 4, 5]))
pd.testing.assert_series_equal(result.amount, expected)
def test_trades_get_trade(trades):
data = [1.0, -2.0]
index = pd.date_range(start="2018-06-06", freq="1min", periods=len(data))
expected = pd.Series(data=data, index=index, name="amount")
pd.testing.assert_series_equal(trades.get_trade(0), expected)
def test_make_trade():
periods = 2
dates = pd.date_range(start="2018-12-01", periods=periods)
amounts = range(periods)
t00 = module.Transaction(timestamp=dates[0], amount=amounts[0])
t11 = module.Transaction(timestamp=dates[1], amount=amounts[1])
t01 = module.Transaction(timestamp=dates[0], amount=amounts[1])
trade = module.make_trade([t00, t11])
expected = pd.Series(index=dates, data=amounts[:2], name="amount")
pd.testing.assert_series_equal(trade, expected)
trade = module.make_trade([t00, t01])
expected = pd.Series(
index=[dates[0]], data=[amounts[0] + amounts[1]], name="amount"
)
pd.testing.assert_series_equal(trade, expected)
trade = module.make_trade([t11, t01, t00])
expected = pd.Series(
index=dates, data=[amounts[0] + amounts[1], amounts[1]], name="amount"
)
| pd.testing.assert_series_equal(trade, expected) | pandas.testing.assert_series_equal |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.feature_selection.correlation_filter import CorrelationFilter
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
max_corr = 0.8
X = pd.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = X[["B", "D", "F"]].copy()
obj = CorrelationFilter(max_corr=max_corr).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_ks():
max_corr = 0.8
X = ks.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = X[["B", "D", "F"]].to_pandas().copy()
obj = CorrelationFilter(max_corr=max_corr).fit(X)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |