prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
import os
import requests
from tqdm import tqdm
import pandas as pd
from ts_datasets.base import BaseDataset
logger = logging.getLogger(__name__)
class M4(BaseDataset):
"""
The M4 Competition data is an extended and diverse set of time series to
identify the most accurate forecasting method(s) for different types
of domains, including Business, financial and economic forecasting,
and different type of granularity, including Yearly (23,000 sequences),
Quarterly (24,000 sequences), Monthly (48,000 sequences),
Weekly(359 sequences), Daily (4,227 sequences) and Hourly (414 sequences)
data.
- source: https://github.com/Mcompetitions/M4-methods/tree/master/Dataset
- timeseries sequences: 100,000
"""
valid_subsets = ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"]
url = "https://github.com/Mcompetitions/M4-methods/raw/master/Dataset/{}.csv"
def __init__(self, subset="Hourly", rootdir=None):
super().__init__()
self.subset = subset
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "M4")
# download dataset if it is not found in root dir
if not os.path.isdir(rootdir):
logger.info(
f"M4 {subset} dataset cannot be found from {rootdir}.\n"
f"M4 {subset} dataset will be downloaded from {self.url}.\n"
)
download(rootdir, self.url, "M4-info")
# extract starting date from meta-information of dataset
info_dataset = pd.read_csv(os.path.join(rootdir, "M4-info.csv"), delimiter=",").set_index("M4id")
if subset == "Yearly":
logger.warning(
"the max length of yearly data is 841 which is too big to convert to "
"timestamps, we fallback to quarterly frequency"
)
freq = "13W"
elif subset == "Quarterly":
freq = "13W"
elif subset == "Monthly":
freq = "30D"
else:
freq = subset[0]
train_csv = os.path.join(rootdir, f"train/{subset}-train.csv")
if not os.path.isfile(train_csv):
download(os.path.join(rootdir, "train"), self.url, f"{subset}-train", "Train")
test_csv = os.path.join(rootdir, f"test/{subset}-test.csv")
if not os.path.isfile(test_csv):
download(os.path.join(rootdir, "test"), self.url, f"{subset}-test", "Test")
train_set = pd.read_csv(train_csv).set_index("V1")
test_set = pd.read_csv(test_csv).set_index("V1")
for i in tqdm(range(train_set.shape[0])):
ntrain = train_set.iloc[i, :].dropna().shape[0]
sequence = pd.concat((train_set.iloc[i, :].dropna(), test_set.iloc[i, :].dropna()))
# raw data do not follow consistent timestamp format
sequence.index = pd.date_range(start=0, periods=sequence.shape[0], freq=freq)
sequence = sequence.to_frame()
metadata = | pd.DataFrame({"trainval": sequence.index < sequence.index[ntrain]}, index=sequence.index) | pandas.DataFrame |
"""
Fractional differentiation is a technique to make a time series stationary but also
retain as much memory as possible. This is done by differencing by a positive real
number. Fractionally differenced series can be used as a feature in machine learning
process.
"""
import numpy as np
import pandas as pd
class FractionalDifferentiation:
""" FractionalDifferentiation class encapsulates the functions that can
be used to compute fractionally differentiated series.
"""
@staticmethod
def get_weights(diff_amt, size):
"""
Source: Chapter 5, AFML (section 5.4.2, page 79)
The helper function generates weights that are used to compute fractionally
differentiated series. It computes the weights that get used in the computation
of fractionally differentiated series. This generates a non-terminating series
that approaches zero asymptotically. The side effect of this function is that
it leads to negative drift "caused by an expanding window's added weights"
(see page 83 AFML)
When diff_amt is real (non-integer) positive number then it preserves memory.
The book does not discuss what should be expected if d is a negative real
number. Conceptually (from set theory) negative d leads to set of negative
number of elements. And that translates into a set whose elements can be
selected more than once or as many times as one chooses (multisets with
unbounded multiplicity) - see http://faculty.uml.edu/jpropp/msri-up12.pdf.
:param diff_amt: (float) differencing amount
:param size: (int) length of the series
:return: (ndarray) weight vector
"""
# The algorithm below executes the iterative estimation (section 5.4.2, page 78)
weights = [1.] # create an empty list and initialize the first element with 1.
for k in range(1, size):
weights_ = -weights[-1] * (diff_amt - k + 1) / k # compute the next weight
weights.append(weights_)
# Now, reverse the list, convert into a numpy column vector
weights = np.array(weights[::-1]).reshape(-1, 1)
return weights
@staticmethod
def frac_diff(series, diff_amt, thresh=0.01):
"""
Source: Chapter 5, AFML (section 5.5, page 82);
References:
https://www.wiley.com/en-us/Advances+in+Financial+Machine+Learning-p-9781119482086
https://wwwf.imperial.ac.uk/~ejm/M3S8/Problems/hosking81.pdf
https://en.wikipedia.org/wiki/Fractional_calculus
The steps are as follows:
- Compute weights (this is a one-time exercise)
- Iteratively apply the weights to the price series and generate output points
This is the expanding window variant of the fracDiff algorithm
Note 1: For thresh-1, nothing is skipped
Note 2: diff_amt can be any positive fractional, not necessarility bounded [0, 1]
:param series: (pd.Series) a time series that needs to be differenced
:param diff_amt: (float) Differencing amount
:param thresh: (float) threshold or epsilon
:return: (pd.DataFrame) data frame of differenced series
"""
# 1. Compute weights for the longest series
weights = get_weights(diff_amt, series.shape[0])
# 2. Determine initial calculations to be skipped based on weight-loss threshold
weights_ = np.cumsum(abs(weights))
weights_ /= weights_[-1]
skip = weights_[weights_ > thresh].shape[0]
# 3. Apply weights to values
output_df = {}
for name in series.columns:
series_f = series[[name]].fillna(method='ffill').dropna()
output_df_ = pd.Series(index=series.index)
for iloc in range(skip, series_f.shape[0]):
loc = series_f.index[iloc]
# At this point all entries are non-NAs so no need for the following check
# if np.isfinite(series.loc[loc, name]):
output_df_[loc] = np.dot(weights[-(iloc + 1):, :].T, series_f.loc[:loc])[0, 0]
output_df[name] = output_df_.copy(deep=True)
output_df = pd.concat(output_df, axis=1)
return output_df
@staticmethod
def get_weights_ffd(diff_amt, thresh, lim):
"""
Source: Chapter 5, AFML (section 5.4.2, page 83)
The helper function generates weights that are used to compute fractionally
differentiate dseries. It computes the weights that get used in the computation
of fractionally differentiated series. The series is of fixed width and same
weights (generated by this function) can be used when creating fractional
differentiated series.
This makes the process more efficient. But the side-effect is that the
fractionally differentiated series is skewed and has excess kurtosis. In
other words, it is not Gaussian any more.
The discussion of positive and negative d is similar to that in get_weights
(see the function get_weights)
:param diff_amt: (float) differencing amount
:param thresh: (float) threshold for minimum weight
:param lim: (int) maximum length of the weight vector
:return: (ndarray) weight vector
"""
weights = [1.]
k = 1
# The algorithm below executes the iterativetive estimation (section 5.4.2, page 78)
# The output weights array is of the indicated length (specified by lim)
ctr = 0
while True:
# compute the next weight
weights_ = -weights[-1] * (diff_amt - k + 1) / k
if abs(weights_) < thresh:
break
weights.append(weights_)
k += 1
ctr += 1
if ctr == lim - 1: # if we have reached the size limit, exit the loop
break
# Now, reverse the list, convert into a numpy column vector
weights = np.array(weights[::-1]).reshape(-1, 1)
return weights
@staticmethod
def frac_diff_ffd(series, diff_amt, thresh=1e-5):
"""
Source: Chapter 5, AFML (section 5.5, page 83);
References:
https://www.wiley.com/en-us/Advances+in+Financial+Machine+Learning-p-9781119482086
https://wwwf.imperial.ac.uk/~ejm/M3S8/Problems/hosking81.pdf
https://en.wikipedia.org/wiki/Fractional_calculus
The steps are as follows:
- Compute weights (this is a one-time exercise)
- Iteratively apply the weights to the price series and generate output points
Constant width window (new solution)
Note 1: thresh determines the cut-off weight for the window
Note 2: diff_amt can be any positive fractional, not necessarity bounded [0, 1].
:param series: (pd.Series)
:param diff_amt: (float) differencing amount
:param thresh: (float) threshold for minimum weight
:return: (pd.DataFrame) a data frame of differenced series
"""
# 1) Compute weights for the longest series
weights = get_weights_ffd(diff_amt, thresh, series.shape[0])
width = len(weights) - 1
# 2) Apply weights to values
# 2.1) Start by creating a dictionary to hold all the fractionally differenced series
output_df = {}
# 2.2) compute fractionally differenced series for each stock
for name in series.columns:
series_f = series[[name]].fillna(method='ffill').dropna()
temp_df_ = | pd.Series(index=series.index) | pandas.Series |
import numpy
import matplotlib.pyplot as plt
import tellurium as te
from rrplugins import Plugin
auto = Plugin("tel_auto2000")
from te_bifurcation import model2te, run_bf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
sf = ScalarFormatter()
sf.set_scientific(False)
import re
import seaborn as sns
import os
from pickle import dump, load
from sympy import *
import lhsmdu
import sobol_seq
import pickle
# Define symbolic variables for symbolic Jacobian
R, r, C1, C2, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, sR, a1, a2, b1, b2, A = symbols('R r C1 C2 mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k s_R a1 a2 b1 b2 A', positive=True, real=True)
c1A, c1B, c2, rev, koff, kR, sR0, sR, g, s, C = symbols('c1A c1B c2 rev koff kR sR0 sR g s C', positive=True, real=True)
R, r, C, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, kR, A = \
symbols('R r C mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k k_R A', positive=True, real=True)
# Samples of parameter values
n = int(1E2) # Production run 1E5
ss = sobol_seq.i4_sobol_generate(4, n)
l = np.power(2, -3 + (4+3)*ss[:,:2])
a1sp, b1sp = l[:,0], l[:,1]
Ksp = 10**(ss[:,-2]*(np.log10(70000)-np.log10(7)) + np.log10(7))
gsp = 10**(ss[:,-1]*(np.log10(2)-np.log10(0.02)) + np.log10(0.02))
# Model definition
model_mmi1_full = {
'pars': {
'sR': 0.0,
'a1' : 1,
'b1' : 1,
'sR0': 0.0,
'g': 1.0,
'K' : 10000,
'koff': 100,
},
'vars': {
'r': '1 - koff*K*R*r + koff*C - g*r + a1*C',
'R': 'sR0 + sR - koff*K*R*r + koff*C - R + b1*g*C',
'C': 'koff*K*R*r - koff*C - a1*C - b1*g*C',
},
'fns': {}, 'aux': [], 'name': 'mmi1_full'}
ics_1_mmi1_full = {'r': 0.9, 'R': 0.0, 'C': 0.0}
# Symbolic Jacobian
eqnD = {}
for k, v in model_mmi1_full['vars'].items():
eqnD[k] = parsing.sympy_parser.parse_expr(v, locals())
JnD = Matrix([eqnD['R'], eqnD['r'], eqnD['C']]).jacobian(Matrix([R, r, C]))
fJnD = lambdify((K, R, r, C, a1, b1, g, koff), JnD, 'numpy')
# Tellurium object
r = model2te(model_mmi1_full, ics=ics_1_mmi1_full)
uplim = 120
if 1:
# A new run
hb_cts, hbi, hbnds = 0, [], []
data_all = []
inuerr = []
for i in range(n):
print(i)
for j, p in enumerate(['a1', 'b1']):
r[p] = l[i,j]
r['g'], r['K'] = gsp[i], Ksp[i]
data, bounds, boundsh = run_bf(r, auto, dirc="+", par="sR", lims=[0,uplim],
ds=1E-2, dsmin=1E-5, dsmax=0.1)
if data.r.iloc[-1] < -1:
data, bounds, boundsh = run_bf(r, auto, dirc="+", par="sR", lims=[0,uplim],
ds=1E-2, dsmin=1E-5, dsmax=0.01)
data_all.append(data)
if len(boundsh) > 0:
print('HB point found')
hb_cts += 1
hbi.append(i)
hbnds.append(boundsh)
if 1: # Save the output
fn = './te_data/bf_data_MMI1.tebf'
specs = {'model':model_mmi1_full, 'n':n, 'uplim':uplim, 'Ksp':Ksp,
'gsp':gsp,
'a1sp':a1sp, 'b1sp':b1sp }
with open(fn, 'wb') as f:
pickle.dump({'data_all': data_all, 'specs': specs}, f)
print('Sets with HB: ', hb_cts)
print('Numerical errors', len(inuerr))
else:
# Reading a single file
fn = './te_data/bf_data_MMI1.tebf'
print('Reading', fn)
with open(fn, 'rb') as f:
f_cont = pickle.load(f)
data_all, specs = f_cont['data_all'], f_cont['specs']
n, uplim, Ksp, gsp = specs['n'], specs['uplim'], specs['Ksp'], specs['gsp']
a1sp, b1sp = specs['a1sp'], specs['b1sp']
print('Curves: '+str(n)+'\t','uplim: '+str(uplim))
for sp in ['Ksp', 'gsp', 'a1sp', 'b1sp']:
print(sp + ' is between %.4f and %.4f'%(specs[sp].min(), specs[sp].max()))
print('\n')
# More detailed analysis of the continuation output
oui = [] # Spiral sinks
hbi = [] # Hopf
mxi = [] # Hopf and SN
inuerr = []
binned_Rs = []
binned_Rts = []
binned_cons = []
hist_imag = np.zeros(60)
nR = 62
do_pars = []
for i, data in enumerate(data_all[:]):
if ((i+1) % 10000) == 0:
print(i+1)
if len(data) == 0:
inuerr.append(i)
continue
if data.PAR.iloc[-1] < (uplim-1) or data.PAR.iloc[-1] > (uplim+1):
mxi.append(i)
if (data.TY == 3).sum()>0:
hbi.append(i)
Rsp, rsp, Csp = data.R.values, data.r.values, data.C.values
JnDsp = fJnD(Ksp[i], Rsp, rsp, Csp, a1sp[i], b1sp[i], gsp[i], 100.0)
Jsp = np.zeros((JnDsp.shape[0], JnDsp.shape[0], Rsp.shape[0]))
for p in range(JnDsp.shape[0]):
for q in range(JnDsp.shape[1]):
Jsp[p,q,:] = JnDsp[p,q]
Jsp = np.swapaxes(np.swapaxes(Jsp, 0, 2), 1,2)
w, v = np.linalg.eig(Jsp)
#print(w)
if_imag = np.imag(w) != 0
imags = ((if_imag).sum(axis=1)>0) & (Rsp>-10) & (rsp>-10)
igt = np.where(Rsp>0.01)[0]
if (len(igt) > 0):
sRthr = data.PAR[igt[0]]
std_sigs = np.linspace(sRthr*0.0, sRthr*3.1, nR)
ids = np.searchsorted(data.PAR, std_sigs)
binned_R, binned_Rt = np.empty(nR), np.empty(nR)
binned_R[:], binned_Rt[:] = np.NaN, np.NaN
R_data = Rsp[[x for x in ids if x < Rsp.size]]
Rt_data = R_data + Csp[[x for x in ids if x < Rsp.size]]
binned_R[:R_data.size] = R_data
binned_Rt[:R_data.size] = Rt_data
binned_Rs.append(binned_R)
binned_Rts.append(binned_Rt)
binned_cons.append(std_sigs)
if imags.sum() > 0:
if (a1sp[i]>1 and b1sp[i]>1) or (a1sp[i]<1 and b1sp[i]<1):
continue
rmax, imax = np.real(w).min(axis=1), np.imag(w).max(axis=1)
oui.append(i)
imagi = np.where(imags>0)[0]
if len(igt) > 0:
hs, bins = np.histogram(data.PAR[imagi], bins=np.linspace(sRthr*0.0, sRthr*3.0, hist_imag.size+1))
hist_imag = hist_imag + ((hs>0)+0)
fig, ax = plt.subplots(figsize=(3,3))
fig.subplots_adjust(bottom=0.2, right=0.78, left=0.15)
ax2 = ax.twinx()
ax2.bar(range(hist_imag.size), hist_imag/n, color='y', zorder=-10, width=1.0, alpha=0.5)
dfl = pd.DataFrame(binned_Rs).melt()
sns.lineplot(x="variable", y="value", data=dfl, color='k', ax=ax, ci=99.9, palette="flare")
ax.set_ylabel(r'Steady state $\it{R}$ (A.U.)')
ax.set_xlabel(r'$\sigma_R$')
ax.set_xticks([0, 20, 40, 60])
ltr = r'$\hat{\it{\sigma_R}}$'
ax.set_xticklabels([0, ltr, r'2$\times$'+ltr, r'3$\times$'+ltr])
ax.set_xlim(0, 40)
ax.spines['right'].set_color('y')
ax2.spines['right'].set_color('y')
ax2.yaxis.label.set_color('y')
ax2.tick_params(axis='y', colors='y')
ax2.set_ylabel(r'Frequency (spiral sink)')
plt.show()
figc, axc = plt.subplots(figsize=(4,3))
figc.subplots_adjust(bottom=0.2, right=0.90, left=0.25)
sns.lineplot(x="variable", y="value", data=dfl, color='k', ax=axc, ci=99.9, palette="flare", label=r'$\it{R}$')
dft = | pd.DataFrame(binned_Rts) | pandas.DataFrame |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._
#
# ---
# # Assignment 4
# In[3]:
import networkx as nx
import pandas as pd
import numpy as np
import pickle
# ---
#
# ## Part 1 - Random Graph Identification
#
# For the first part of this assignment you will analyze randomly generated graphs and determine which algorithm created them.
# In[4]:
P1_Graphs = pickle.load(open('A4_graphs','rb'))
P1_Graphs
# <br>
# `P1_Graphs` is a list containing 5 networkx graphs. Each of these graphs were generated by one of three possible algorithms:
# * Preferential Attachment (`'PA'`)
# * Small World with low probability of rewiring (`'SW_L'`)
# * Small World with high probability of rewiring (`'SW_H'`)
#
# Anaylze each of the 5 graphs and determine which of the three algorithms generated the graph.
#
# *The `graph_identification` function should return a list of length 5 where each element in the list is either `'PA'`, `'SW_L'`, or `'SW_H'`.*
# In[5]:
def degree_distribution(G):
degrees = G.degree()
degree_values = sorted(set(degrees.values()))
histogram = [list(degrees.values()).count(i) / float(nx.number_of_nodes(G))
for i in degree_values]
return histogram
# In[6]:
degree_distribution(P1_Graphs[2])
# In[7]:
nx.average_clustering(P1_Graphs[1])
# In[9]:
nx.average_shortest_path_length(P1_Graphs[1])
# In[10]:
for G in P1_Graphs:
print(nx.average_clustering(G), nx.average_shortest_path_length(G),
len(degree_distribution(G)))
# In[11]:
def graph_identification():
methods = []
for G in P1_Graphs:
clustering = nx.average_clustering(G)
shortest_path = nx.average_shortest_path_length(G)
degree_hist = degree_distribution(G)
if len(degree_hist) > 10:
methods.append('PA')
elif clustering < 0.1:
methods.append('SW_H')
else:
methods.append('SW_L')
return methods
# In[12]:
graph_identification()
# ---
#
# ## Part 2 - Company Emails
#
# For the second part of this assignment you will be working with a company's email network where each node corresponds to a person at the company, and each edge indicates that at least one email has been sent between two people.
#
# The network also contains the node attributes `Department` and `ManagementSalary`.
#
# `Department` indicates the department in the company which the person belongs to, and `ManagementSalary` indicates whether that person is receiving a management position salary.
# In[13]:
G = nx.read_gpickle('email_prediction.txt')
print(nx.info(G))
# In[14]:
G.nodes(data = True)[:10]
# ### Part 2A - Salary Prediction
#
# Using network `G`, identify the people in the network with missing values for the node attribute `ManagementSalary` and predict whether or not these individuals are receiving a management position salary.
#
# To accomplish this, you will need to create a matrix of node features using networkx, train a sklearn classifier on nodes that have `ManagementSalary` data, and predict a probability of the node receiving a management salary for nodes where `ManagementSalary` is missing.
#
#
#
# Your predictions will need to be given as the probability that the corresponding employee is receiving a management position salary.
#
# The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
#
# Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.88 or higher will receive full points, and with an AUC of 0.82 or higher will pass (get 80% of the full points).
#
# Using your trained classifier, return a series of length 252 with the data being the probability of receiving management salary, and the index being the node id.
#
# Example:
#
# 1 1.0
# 2 0.0
# 5 0.8
# 8 1.0
# ...
# 996 0.7
# 1000 0.5
# 1001 0.0
# Length: 252, dtype: float64
# In[15]:
G.nodes(data = True)[0][1]
# In[16]:
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
def salary_predictions():
def is_management(node):
managementSalary = node[1]['ManagementSalary']
if managementSalary == 0:
return 0
elif managementSalary == 1:
return 1
else:
return None
df = pd.DataFrame(index=G.nodes())
df['clustering'] = pd.Series(nx.clustering(G))
df['degree'] = pd.Series(G.degree())
df['degree_centrality'] = pd.Series(nx.degree_centrality(G))
df['closeness'] = pd.Series(nx.closeness_centrality(G, normalized=True))
df['betweeness'] = pd.Series(nx.betweenness_centrality(G, normalized=True))
df['pr'] = pd.Series(nx.pagerank(G))
df['is_management'] = pd.Series([is_management(node) for node in G.nodes(data=True)])
df_train = df[~pd.isnull(df['is_management'])]
df_test = df[pd.isnull(df['is_management'])]
features = ['clustering', 'degree', 'degree_centrality', 'closeness', 'betweeness', 'pr']
X_train = df_train[features]
Y_train = df_train['is_management']
X_test = df_test[features]
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = MLPClassifier(hidden_layer_sizes = [10, 5], alpha = 5,
random_state = 0, solver='lbfgs', verbose=0)
clf.fit(X_train_scaled, Y_train)
test_proba = clf.predict_proba(X_test_scaled)[:, 1]
return pd.Series(test_proba,X_test.index)
# prediction = salary_predictions()
# In[17]:
salary_predictions()
# ### Part 2B - New Connections Prediction
#
# For the last part of this assignment, you will predict future connections between employees of the network. The future connections information has been loaded into the variable `future_connections`. The index is a tuple indicating a pair of nodes that currently do not have a connection, and the `Future Connection` column indicates if an edge between those two nodes will exist in the future, where a value of 1.0 indicates a future connection.
# In[18]:
future_connections = pd.read_csv('Future_Connections.csv', index_col=0, converters={0: eval})
future_connections.head(10)
# Using network `G` and `future_connections`, identify the edges in `future_connections` with missing values and predict whether or not these edges will have a future connection.
#
# To accomplish this, you will need to create a matrix of features for the edges found in `future_connections` using networkx, train a sklearn classifier on those edges in `future_connections` that have `Future Connection` data, and predict a probability of the edge being a future connection for those edges in `future_connections` where `Future Connection` is missing.
#
#
#
# Your predictions will need to be given as the probability of the corresponding edge being a future connection.
#
# The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
#
# Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.88 or higher will receive full points, and with an AUC of 0.82 or higher will pass (get 80% of the full points).
#
# Using your trained classifier, return a series of length 122112 with the data being the probability of the edge being a future connection, and the index being the edge as represented by a tuple of nodes.
#
# Example:
#
# (107, 348) 0.35
# (542, 751) 0.40
# (20, 426) 0.55
# (50, 989) 0.35
# ...
# (939, 940) 0.15
# (555, 905) 0.35
# (75, 101) 0.65
# Length: 122112, dtype: float64
# In[19]:
future_connections.head()
# In[20]:
G.node[1]
# In[22]:
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
def new_connections_predictions():
for node in G.nodes():
G.node[node]['community'] = G.node[node]['Department']
preferential_attachment = list(nx.preferential_attachment(G))
df = pd.DataFrame(index=[(x[0], x[1]) for x in preferential_attachment])
df['preferential_attachment'] = [x[2] for x in preferential_attachment]
cn_soundarajan_hopcroft = list(nx.cn_soundarajan_hopcroft(G))
df_cn_soundarajan_hopcroft = pd.DataFrame(index=[(x[0], x[1]) for x in cn_soundarajan_hopcroft])
df_cn_soundarajan_hopcroft['cn_soundarajan_hopcroft'] = [x[2] for x in cn_soundarajan_hopcroft]
df = df.join(df_cn_soundarajan_hopcroft,how='outer')
df['cn_soundarajan_hopcroft'] = df['cn_soundarajan_hopcroft'].fillna(value=0)
df['resource_allocation_index'] = [x[2] for x in list(nx.resource_allocation_index(G))]
df['jaccard_coefficient'] = [x[2] for x in list(nx.jaccard_coefficient(G))]
df = future_connections.join(df,how='outer')
df_train = df[~ | pd.isnull(df['Future Connection']) | pandas.isnull |
"""
XGBoost regressor for construction machine price prediction
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.model_selection import train_test_split
from pandas_profiling import ProfileReport
pd.set_option('display.max_rows', 500)
plt.style.use('ggplot')
def load_data(path: str) -> pd.DataFrame:
"""loads data
Args:
path (str): path to data
Returns:
pd.DataFrame: DataFrame of data
"""
data_df = pd.read_csv(
path,
parse_dates=["saledate"],
low_memory=False
)
return data_df
def analyze_data(data_df: pd.DataFrame, profile_title='') -> None:
"""analyze data and make Pandas Profile
Args:
data_df (pd.DataFrame): Data
profile_title (str, optional): Profile title. Defaults to ''.
"""
print(data_df.describe())
data_df.dtypes.value_counts()
if profile_title != '':
profile = ProfileReport(
data_df, title=profile_title, explorative=True)
profile.to_file(f"figures/{profile_title.split(' ', 1)[0]}_report.html")
def transform_data(raw_df: pd.DataFrame) -> pd.DataFrame:
"""Transform data to ML-ready format
Args:
raw_df (pd.DataFrame): Raw data
Returns:
pd.DataFrame: Transformed data
"""
raw_df["saleYear"] = raw_df.saledate.dt.year
raw_df["saleMonth"] = raw_df.saledate.dt.month
raw_df["saleDay"] = raw_df.saledate.dt.day
raw_df["saleDayOfWeek"] = raw_df.saledate.dt.dayofweek
raw_df["saleDayOfYear"] = raw_df.saledate.dt.dayofyear
raw_df.drop("saledate", axis=1, inplace=True)
for label, content in raw_df.items():
if | pd.api.types.is_string_dtype(content) | pandas.api.types.is_string_dtype |
import requests
import pandas as pd
import numpy as np
import pathlib
import zipline as zl
import logbook
import datetime
import os
from azul import price_manager_registry, BasePriceManager
log = logbook.Logger('PolygonPriceManager')
@price_manager_registry.register('polygon')
class PolygonPriceManager(BasePriceManager):
def __init__(self):
super().__init__()
api_key = os.getenv('AZUL_POLYGON_API_KEY')
if not api_key or not isinstance(api_key, str):
raise ValueError('The Polygon API key must be provided '
'through the environment variable '
'AZUL_POLYGON_API_KEY')
self._api_key = api_key
def _url(self, path):
return 'https://api.polygon.io' + path
# def get_stock_symbols(self, start_page=1, otc=False):
#
# symbols = []
# still_getting_pages = True
# page = start_page
# isOTC = 'true' if otc else 'false'
#
# while still_getting_pages:
#
# params = {
# 'apikey': self._api_key,
# 'type': 'cs',
# 'perpage': 50,
# 'page': page,
# 'isOTC': isOTC
# }
# url = self._url('/v1/meta/symbols')
# response = requests.get(url, params=params)
#
# if response.status_code in[401, 404, 409]:
# log.error('Error getting symbols. Response code: {}'.format(response.status_code))
# still_getting_pages = False
# continue
# elif response.status_code != 200:
# still_getting_pages = False
# continue
#
# try:
# json_dict = response.json()
# symbol_object_list = json_dict['symbols']
# except Exception as e:
# log.info('Could not read symbols.')
# log.info('Exception: {}', e)
# log.info('Status Code: {}'.format(response.status_code))
#
# if not symbol_object_list:
# still_getting_pages = False
# continue
#
# for symbol_object in symbol_object_list:
# symbols.append(symbol_object['symbol'])
#
# log.info('Getting symbols page: {}'.format(page))
# page += 1
#
# return symbols
# def _list_date(self, ticker):
#
# list_date = None
#
# params = {
# 'apikey': self._api_key,
# }
#
# url = self._url('/v1/meta/symbols/{}/company'.format(ticker))
# response = requests.get(url, params=params)
#
# if response.status_code != 200:
# log.info('Error getting company information for {}.'.format(ticker))
# log.info('Response status code: {}'.format(response.status_code))
# return list_date
#
# try:
# json_dict = response.json()
# list_date = json_dict['listdate']
# except Exception as e:
# log.info('Did not get company list date information for: {}'.format(ticker))
#
# if list_date is None:
# log.info('No list date provided for {}'.format(ticker))
# else:
# log.info('The list date for {} was {}'.format(ticker, list_date))
#
# return list_date
def _minute_dataframe_for_date(self, ticker: str, start_timestamp: pd.Timestamp) -> pd.DataFrame:
# https://api.polygon.io/v1/historic/agg/minute/{symbol}?from=2000-01-03&to=2000-01-04&apikey=xxx
end_timestamp = start_timestamp.replace(hour=23, minute=59)
df = | pd.DataFrame() | pandas.DataFrame |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
dataset = | pd.read_csv(path) | pandas.read_csv |
#======================================================
# Model Utility Functions
#======================================================
'''
Info: Utility functions for model building.
Version: 2.0
Author: <NAME>
Created: Saturday, 13 April 2019
'''
# Import modules
import os
import uuid
import copy
import time
import random
import numpy as np
import pandas as pd
from subprocess import call
import itertools
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from sklearn.utils import resample
from sklearn.tree import export_graphviz
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score as roc
from sklearn.metrics import f1_score
#------------------------------
# Utility Functions
#------------------------------
# Set section
def set_section(string):
# Check if string is too long
string_size = len(string)
max_length = 100
if string_size > max_length:
print('TITLE TOO LONG')
else:
full_buffer_len = string_size
print('\n')
print(full_buffer_len * '-')
print(string)
print(full_buffer_len * '-'+'\n')
def downsample_df(df, labels_df, random_seed):
num_of_yt = sum(labels_df)
random.seed(random_seed+1)
downsample_bad_ix = random.sample(np.where(labels_df == 0)[0], num_of_yt)
good_ix = np.where(labels_df == 1)[0]
downsampled_full_ix = np.append(downsample_bad_ix, good_ix)
df_ds = pd.concat([df.iloc[[index]] for index in downsampled_full_ix])
return df_ds
def upsample(df, groupby_cols, random_seed, max_sample_ratio=1.5):
max_sample_size = df.groupby(groupby_cols).agg('count').max().max()
dfs = []
for i, df_ in df.groupby(groupby_cols):
dfs.append(resample(df_, replace=True, n_samples=int(max_sample_size * max_sample_ratio), random_state=random_seed))
upsampled_df = | pd.concat(dfs, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 14:47:20 2018
@author: Greydon
"""
import os
import re
import numpy as np
import pandas as pd
from scipy.signal import welch, hanning, butter, lfilter, resample
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.ticker as mticker
import pywt
import tables
import subprocess
import scipy.io as spio
import h5py
import json
##############################################################################
# HELPER FUNCTIONS #
##############################################################################
def sorted_nicely(data, reverse = False):
"""
Sorts the given iterable in the way that is expected.
Parameters
----------
data: array-like
The iterable to be sorted.
Returns
-------
The sorted list.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(data, key = alphanum_key, reverse=reverse)
def downsample(data, oldFS, newFS):
"""
Resample data from oldFS to newFS using the scipy 'resample' function.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
oldFS: int
The sampling frequency of the data.
newFS: int
The new sampling frequency.
Returns
-------
newData: array-like
The downsampled dataset.
"""
newNumSamples = int((len(data) / oldFS) * newFS)
newData = np.array(resample(data, newNumSamples))
return newData
##############################################################################
# FILTERS #
##############################################################################
def butter_bandpass(lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butterBandpass(d, lowcut, highcut, fs, order):
b, a = butter_bandpass(lowcut, highcut, fs, order)
y = lfilter(b, a, d)
return y
##############################################################################
# TIME DOMAIN FEATURES #
##############################################################################
def MAV(data):
"""
Mean absolute value: the average of the absolute value of the signal.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
MAVData: 1D numpy array containing average absolute value
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
MAVData = sum(abs(data))/len(data)
return MAVData
def MAVS(data1, data2):
"""
Mean Absolute Value Slope: the difference between MAVs in adjacent
segments.
Parameters
----------
data1: array-like
2D matrix of shape (time, data)
data2: array-like
2D matrix of shape (time, data) of subsequent segment to x1
Returns
-------
MAVSlope: 1D numpy array containing MAV for adjacent signals
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
MAV1Data = sum(abs(data1))/len(data1)
MAV2Data = sum(abs(data2))/len(data2)
MAVSlope = MAV2Data - MAV1Data
return MAVSlope
def MMAV1(data):
"""
Modified Mean Absolute Value 1: an extension of MAV using a weighting
window function on data below 25% and above 75%.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
MMAV1Data: 1D numpy array containing modified MAV for given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
w1 = 0.5
segment = int(len(data)*0.25)
start = abs(data[0:segment,])*w1
middle = abs(data[segment:(len(data)-segment),])
end = abs(data[(len(data)-segment):,])*w1
combined = np.concatenate((start, middle, end))
MMAV1Data = sum(abs(combined))/len(combined)
return MMAV1Data
def MMAV2(data):
"""
Modified Mean Absolute Value 2: the smooth window is improved by using
a continuous weighting window function on data below 25% and above 75%.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
MMAV2Data: 1D numpy array containg modified MAV for signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
segment = int(len(data)*0.25)
a = []
b = []
for i in range(segment):
endIdx = (len(data)-segment)+i
a.append((4*i)/len(data))
b.append((4*(len(data)-endIdx))/len(data))
start = abs(data[0:segment,])*a
middle = abs(data[segment:(len(data)-segment),])
end = abs(data[(len(data)-segment):,])*b
combined = np.concatenate((start,middle,end))
MMAV2Data = sum(abs(combined))/len(combined)
return MMAV2Data
def RMS(data):
"""
Root mean square: the root mean square of a given recording.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
RMSData: 1D numpy array containing root mean square of the signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
RMSData = (sum(data*data)/len(data))**0.5
return RMSData
def VAR(data):
"""
Variance: deviation of the signal from it's mean.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
varianceData: 1D numpy array containg the signal variance
Reference
---------
<NAME>., & <NAME>. (2000). DSP-based controller for a
multi-degree prosthetic hand. Robotics and Automation, 2000. …,
2(April), 1378–1383.
"""
meanData = sum(data)/len(data)
varianceData = sum((data-meanData)*(data-meanData))/len(data)
return varianceData
def curveLen(data):
"""
Curve length: the cumulative length of the waveform over the time segment.
This feature is related to the waveform amplitude, frequency and time.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
curveLenData: 1D numpy array containing the average curve length for
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
data1 = data[1:]
data2 = data[:-1]
curveLenData = sum(abs(data2-data1))/(len(data)-1)
return curveLenData
def zeroCross(data, threshold):
"""
Zero crossings: Calculates the number of times the signal amplitude
crosses the zero y-axis.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
zeroCrossData: 1D numpy array containing total number of zero crossings
in the given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
sign = lambda z: (1, -1)[z < 0]
i = abs(np.array([sign(x) for x in data[1:]]) - np.array([sign(x) for x in data[:-1]]))
zeroCrossData = sum(i)/(len(data))
return zeroCrossData
def slopeSign(data):
"""
Slope Sign Change: The number of changes between positive and negative
slope among three consecutive segments are performed
with the threshold function.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
slopeSignData: 1D numpy array containing the total slope sign changes
for a given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
i = (data[1:-1]-data[:-2])
j = (data[1:-1]-data[2:])
slopeSignData = len(np.where((i*j) > 10)[0])
return slopeSignData
def threshold(data):
"""
Threshold: measure of how scattered the sign is (deviation).
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
thresholdData: 1D numpy array containing the total threshold value for a
given signal
Reference
---------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011).
Characterization of subcortical structures during deep brain stimulation utilizing
support vector machines. Conference Proceedings: Annual International Conference of
the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine
and Biology Society. Annual Conference, 2011, 7949–7952.
"""
i = data-(sum(data)/len(data))
j = sum(i*i)
thresholdData = (3*(j**(1/2)))/(len(data)-1)
return thresholdData
def WAMP(data, threshold):
"""
Willison Amplitude: the number of times that the difference between signal
amplitude among two adjacent segments that exceeds a predefined
threshold to reduce noise effects.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
threshold: int
threshold level in uV (generally use 10 microvolts)
Returns
-------
WAMPData: 1D numpy array containing total number of times derivative
was above threshold in a given signal
Reference
---------
<NAME>., & <NAME>. (2000). DSP-based controller for a
multi-degree prosthetic hand. Robotics and Automation, 2000. …,
2(April), 1378–1383.
"""
i = abs(data[:-1]-data[1:])
j = i[i > threshold]
WAMPData = len(j)
return WAMPData
def SSI(data):
"""
Simple Square Integral: uses the energy of signal as a feature.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
SSIData: 1D numpy array containing the summed absolute square of the
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
SSIData = sum(abs(data*data))
return SSIData
def powerAVG(data):
"""
Average power: the amount of work done, amount energy transferred per
unit time.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
powerAvgData: 1D numpy array containing average power in a given signal
"""
powerAvgData = sum(data*data)/len(data)
return powerAvgData
def peaksNegPos(data):
"""
Peaks: the number of positive peaks in the data window per unit time.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
peaksNegPosData: 1D numpy array containing total number of peaks in given
signal
"""
sign = lambda z: (1, -1)[z < 0]
i = [sign(z) for z in (data[2:]-data[1:-1])]
j = [sign(z) for z in (data[1:-1]-data[:-2])]
k = [a_i - b_i for a_i, b_i in zip(j, i)]
peaksNegPosData = [max([0,z]) for z in k]
peaksNegPosData = sum(peaksNegPosData)/(len(data)-2)
return peaksNegPosData
def peaksPos(data):
"""
Peak Density: calculates the density of peaks within the current locality.
A peak is defined as a point higher in amplitude than the two points
to its left and right side.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
peaksPosData: 1D numpy array containing the average number of peaks
in a given signal
"""
data1 = data[1:-1]
data2 = data[0:-2]
data3 = data[2:]
data4 = data1 - data2
data5 = data1 - data3
peakcount = 0
for i in range(len(data)-2):
if data4[i] > 0 and data5[i]>0:
peakcount += 1
peaksPosData = peakcount/(len(data)-2)
return peaksPosData
def tkeoTwo(data):
"""
Teager-Kaiser Energy Operator: is analogous to the total
(kinetic and potential) energy of a signal. This variation uses
the second derivative.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
tkeoTwoData: 1D numpy array containing total teager energy of a given
signal using two samples
Reference
---------
1. <NAME>. (1990). On a simple algorithm to calculate the
“energy” of a signal. In International Conference on Acoustics,
Speech, and Signal Processing (Vol. 2, pp. 381–384). IEEE.
2. <NAME>., <NAME>., & <NAME>. (2007). Teager-Kaiser energy
operation of surface EMG improves muscle activity onset detection.
Annals of Biomedical Engineering, 35(9), 1532–8.
"""
i = data[1:-1]*data[1:-1]
j = data[2:]*data[:-2]
tkeoTwoData = sum(i-j)/(len(data)-2)
return tkeoTwoData
def tkeoFour(data):
"""
Teager-Kaiser Energy Operator: is analogous to the total
(kinetic and potential) energy of a signal. This variation uses
the 4th order derivative.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
tkeoFourData: 1D numpy array containing total teager energy of a given
signal using 4 samples
Reference
---------
1. <NAME>. (1990). On a simple algorithm to calculate the
“energy” of a signal. In International Conference on Acoustics,
Speech, and Signal Processing (Vol. 2, pp. 381–384). IEEE.
2. <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., … <NAME>. (2008). Automated
neonatal seizure detection mimicking a human observer reading EEG.
Clinical Neurophysiology : Official Journal of the International
Federation of Clinical Neurophysiology, 119(11), 2447–54.
"""
l = 1
p = 2
q = 0
s = 3
tkeoFourData = sum(data[l:-p]*data[p:-l]-data[q:-s]*data[s:])/(len(data)-3)
return tkeoFourData
def KUR(data):
"""
Kurtosis: calculates the degree to which the signal has 'tails'. Heavy-tail
would mean many outliers. A normal distribution kurtosis value is 3.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
kurtosisData: 1D numpy array containing the total kurtosis for a given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2000). Flexible
Independent Component Analysis. Journal of VLSI Signal Processing
Systems for Signal, Image and Video Technology, 26(1), 25–38.
"""
meanX = sum(data)/len(data)
diff = [z - meanX for z in data]
sq_differences = [d**2 for d in diff]
var = sum(sq_differences)/len(data)
stdData = var**0.5
i = sum((data-meanX)**4)
j = (len(data)-1)*(stdData)**4
kurtosisData = i/j
return kurtosisData
def SKW(data):
"""
Skewness: measures symmetry in the signal, the data is symmetric if it
looks the same to the left and right of the center point. A skewness
of 0 would indicate absolutely no skew.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
skewnessData: 1D numpy array containing the total skewness for a given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
meanX = sum(data)/len(data)
diff = [z - meanX for z in data]
sq_differences = [d**2 for d in diff]
var = sum(sq_differences)/len(data)
stdX = var**0.5
i = sum((data-meanX)**3)
j = (len(data)-1)*(stdX)**3
skewnessData = i/j
return skewnessData
def crestF(data):
"""
Crest factor: the relation between the peak amplitude and the RMS of the
signal.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
crestFactorData: 1D numpy array containing the total crest factor for a given
signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
DC_remove = data - (sum(data)/len(data))
peakAmp = max(abs(DC_remove))
RMS = (sum(DC_remove*DC_remove)/len(DC_remove))**0.5
crestFactorData = peakAmp/RMS
return crestFactorData
def entropy(data):
"""
Entropy: is an indicator of disorder or unpredictability. The entropy is
smaller inside STN region because of its more rhythmic firing compared
to the mostly noisy background activity in adjacent regions.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
entropyData: 1D numpy array containing the total entropy for a given
signal
Reference
---------
<NAME>., & <NAME>. (2004). Entropy And Entropy-based
Features In Signal Processing. Laboratory of Intelligent Communication
Systems, Dept. of Computer Science and Engineering, University of West
Bohemia, Plzen, Czech Republic, 1–2.
"""
ent = 0
m = np.mean(data)
for i in range(len(data)):
quo = abs(data[i] - m)
ent = ent + (quo* np.log10(quo))
entropyData = -ent
return entropyData
def shapeFactor(data):
"""
Shape Factor: value affected by objects shape but is independent of its
dimensions.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
shapeFactorData: 1D numpy array containing shape factor value for a
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
RMS = (sum(data*data)/len(data))**0.5
shapeFactorData = RMS/(sum(abs(data))/len(data))
return shapeFactorData
def impulseFactor(data):
"""
Impulse Factor:
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
impulseFactorData: 1D numpy array containing impulse factor value for a
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
impulseFactorData = max(abs(data))/(sum(abs(data))/len(data))
return impulseFactorData
def clearanceFactor(data):
"""
Clearance Factor:
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
clearanceFactorData: 1D numpy array containing impulse factor value for a
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
clearanceFactorData = max(abs(data))/((sum(abs(data)**0.5)/len(data))**2)
return clearanceFactorData
##############################################################################
# FREQUENCY DOMAIN #
##############################################################################
def computeFFT(data, Fs, normalize=False):
"""
Compute the FFT of `data` and return. Also returns the axis in Hz for
further plot.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling frequency in Hz.
Returns
-------
fAx: array-like
Axis in Hz to plot the FFT.
fftData: array-like
Value of the fft.
"""
N = data.shape[0]
fAx = np.arange(N/2) * Fs/N
if normalize:
Y = np.fft.fft(data)/int(len(data))
fftData = abs(Y[range(int(len(data)/2))])
else:
Y = np.abs(np.fft.fft(data))
fftData = 2.0/N * np.abs(Y[0:N//2])
return fAx, fftData
def wrcoef(data, coef_type, coeffs, wavename, level):
N = np.array(data).size
a, ds = coeffs[0], list(reversed(coeffs[1:]))
if coef_type =='a':
return pywt.upcoef('a', a, wavename, level=level)[:N]
elif coef_type == 'd':
return pywt.upcoef('d', ds[level-1], wavename, level=level)[:N]
else:
raise ValueError("Invalid coefficient type: {}".format(coef_type))
def wavlet(data, nLevels, waveletName, timewindow, windowSize, Fs):
"""
Wavelet Transform: captures both frequency and time information.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
nLevels: int
Number of levels for the wavlet convolution
waveletName: str
Name of the wavelet to be used
timewindow: boolean
Option to split the given signal into discrete time bins
windowSize: int
If timewindow is TRUE then provide the size of the time
window
Fs: int
If timewindow is TRUE then provide the sampling rate of the given
signal
Returns
-------
waveletData: 1D numpy array containing the standard deviation of the
wavelet convolution for a given signal
"""
if timewindow == True:
windowsize = windowSize*Fs
n = int(len(data))
windown=int(np.floor(n/windowsize))
waveletData=[]
for i in range(windown-1):
xSeg = data[windowsize*i:windowsize*(i+1)]
coeffs = pywt.wavedec(xSeg, waveletName, level=nLevels)
waveletData.append(np.std(wrcoef(xSeg, 'd', coeffs, waveletName, nLevels)))
else:
coeffs = pywt.wavedec(data, waveletName, level=nLevels)
waveletData = np.std(wrcoef(data, 'd', coeffs, waveletName, nLevels))
return waveletData
def computeAvgDFFT(data, Fs, windowLength = 256, windowOverlapPrcnt = 50, Low=500, High=5000):
"""
Fast Fourier Transform: captures the frequency information within a signal.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling rate of the given signal
Low: int
The highpass frequency cutoff
High: int
The lowpass frequency cutoff
Returns
-------
averagePxxWelch: average power in defined passband
"""
# Defining hanning window
win = hanning(windowLength, True)
welchNoverlap = int(windowLength*windowOverlapPrcnt/100.0)
f, Pxxf = welch(data, Fs, window=win, noverlap=welchNoverlap, nfft=windowLength, return_onesided=True)
indexLow = np.where(f == min(f, key=lambda x:abs(x-Low)))[0][0]
indexHigh = np.where(f == min(f, key=lambda x:abs(x-High)))[0][0]
averagePxxWelch = np.mean(Pxxf[indexLow:indexHigh])
return averagePxxWelch
def meanFrq(data, Fs):
"""
Mean Frequency: calculated as the sum of the product of the spectrogram
intensity (in dB) and the frequency, divided by the total sum of
spectrogram intensity.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling rate of the given signal
Returns
-------
meanFrqData: 1D numpy array containing the mean frequency of a given
signal
Reference
---------
<NAME>., & <NAME>. (2006). GA-based Feature Subset
Selection for Myoelectric Classification. In 2006 IEEE International
Conference on Robotics and Biomimetics (pp. 1465–1470). IEEE.
"""
win = 4 * Fs
freqs, psd = welch(data, Fs, nperseg=win, scaling='density')
meanFrqData = sum(freqs*psd)/sum(psd)
return meanFrqData
def freqRatio(data, Fs):
"""
Frequency Ratio: ratio between power in lower frequencies and power in
higher frequencies
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling rate of the given signal
Returns
-------
freqRatioData:
Reference
---------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., &
<NAME>. (2000). New EMG pattern recognition based on soft computing
techniques and its application to control of a rehabilitation robotic
arm. Proc. of 6th International Conference on Soft Computing
(IIZUKA2000), 890–897.
"""
win = 4 * Fs
freqs, psd = welch(data, Fs, nperseg=win, scaling='density')
freqRatioData = abs(psd[:int(len(freqs)/2)])/abs(psd[int(len(freqs)/2):-1])
return freqRatioData
def meanAmpFreq(data, windowSize, Fs):
"""
Mean Frequency Amplitude:
Parameters
----------
data: array-like
2D matrix of shape (time, data)
windowSize: int
Size of the window
Fs: int
Sampling rate of the given signal
Returns
-------
meanAmpFreqData: 1D numpy array containing
"""
window = windowSize*Fs
n = int(len(data))
windown=int(np.floor(n/window))
meanAmpFreqData=[]
for i in range(windown-1):
xSeg = data[window*i:window*(i+1)]
meanAmpFreqData.append(np.median(abs(np.fft.fft(xSeg))))
return meanAmpFreqData
##############################################################################
# VISUALIZATION #
##############################################################################
channelLabels = {1:"Center", 2:"Anterior", 3:"Posterior", 4:"Medial", 5:"Lateral"}
class MathTextSciFormatter(mticker.Formatter):
def __init__(self, fmt="%1.2e"):
self.fmt = fmt
def __call__(self, x, pos=None):
s = self.fmt % x
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
significand = tup[0].rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
s = r'\bf %s{\times}%s' % (significand, exponent)
else:
s = r'\bf %s%s' % (significand, exponent)
return "${}$".format(s)
def axFormat(a):
a.yaxis.set_major_formatter(MathTextSciFormatter("%1.2e"))
a.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
for tick in a.xaxis.get_major_ticks():
tick.label1.set_fontweight('bold')
# for tick in a.yaxis.get_major_ticks():
# tick.label1.set_fontweight('bold')
def axFormaty(a):
a.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))
a.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
for tick in a.yaxis.get_major_ticks():
tick.label1.set_fontweight('bold')
def plotting(x, showOnly, timeWindow, processedFolder):
featureLabels = pd.DataFrame([{'mav': 'Mean Absolute Value',
'mavSlope': 'Mean Absolute Value Slope',
'variance': 'Variance',
'mmav1': 'Mean Absolute Value 1',
'mmav2': 'Mean Absolute Value 2',
'rms': 'Root Mean Square',
'curveLength': 'Curve Length',
'zeroCross': 'Zero Crossings',
'slopeSign': 'Slope Sign',
'threshold': 'Threshold',
'wamp': 'Willison Amplitude',
'ssi': 'Simple Square Integral',
'power': 'Power',
'peaksNegPos': 'Peaks - Negative and Positive',
'peaksPos': 'Peaks - Positive',
'tkeoTwo': 'Teager-Kaiser Energy Operator - Two Samples',
'tkeoFour': 'Teager-Kaiser Energy Operator - Four Samples',
'kurtosis': 'Kurtosis',
'skew': 'Skewness',
'crestF': 'Crest Factor',
'meanF': 'Mean Frequency',
'binData': 'Raw Data',
'AvgPowerMU': 'Bandpass Power (500-1000Hz)',
'AvgPowerSU': 'Bandpass Power (1000-3000Hz)',
'entropy': 'Signal Entropy',
'waveletStd': 'STD of Wavlet Convolution',
'spikeISI': 'Inter-Spike Interval',
'meanISI': 'Mean of ISI',
'stdISI': 'STD of ISI',
'burstIndex': 'Burst Index',
'pauseIndex': 'Pause Index',
'pauseRatio': 'Pause Ratio',
'spikeDensity': 'Spike Density'}])
subList = np.unique(x['subject'])
for isub in range(len(subList)):
if timeWindow==True:
outputDir = processedFolder + '/sub-' + str(subList[isub]) + '/timeWindow/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
else:
outputDir = processedFolder + '/sub-' + str(subList[isub]) + '/depthWindow/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
numSides = np.unique(x[(x['subject']==subList[isub])]['side'])
for iside in range(len(numSides)):
numChans = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['channel'])
numFeatures = list(x.drop(['subject','side','channel','depth','labels', 'chanChosen'], axis=1))
if np.isnan(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen']).any():
chanSel = np.nan
else:
chanSel = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen'])
for ifeatures in range(len(numFeatures)):
if 'binData' in numFeatures[ifeatures]:
fileName = 'sub-' + str(subList[isub]) + '_side-' + numSides[iside] + '_' + featureLabels[numFeatures[ifeatures]].values[0].replace(" ", "")
plotRaw(x,subList[isub],numSides[iside], numChans, chanSel, fileName, outputDir, 24000)
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'feature:', featureLabels[numFeatures[ifeatures]].values[0])
elif 'spikeISI' in numFeatures[ifeatures]:
nothing = []
elif numFeatures[ifeatures] in {'PositiveSpikes','PositiveTimes','NegativeSpikes','NegativeTimes'}:
nothing = []
else:
fig, axs = plt.subplots(len(numChans),1, sharex=True, sharey=False)
fig.subplots_adjust(hspace=0.1, wspace=0)
titleLab = 'Sub-' + str(subList[isub]) + ' ' + numSides[iside] + ' Side: ' + featureLabels[numFeatures[ifeatures]].values[0]
fileName = 'sub-' + str(subList[isub]) + '_side-' + numSides[iside] + '_' + featureLabels[numFeatures[ifeatures]].values[0].replace(" ", "")
for ichan in range(len(numChans)):
feature = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])][numFeatures[ifeatures]])
depths = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['depth'])
labels = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['labels'])
channel = channelLabels.get(numChans[ichan])
muA = np.mean(feature)
if timeWindow==False:
if len(numChans) ==1:
axs.plot(depths, feature)
axs.set_xlim(depths[0,],depths[-1])
else:
axs[ichan].plot(depths, feature)
axs[ichan].set_xlim(depths[0,],depths[-1])
else:
if len(numChans) ==1:
axs.plot(np.arange(0,x.shape[1],1), feature)
axs.set_xlim(0,(feature.shape[1]))
else:
axs[ichan].plot(np.arange(0,x.shape[1],1), feature)
axs[ichan].set_xlim(0,(feature.shape[1]))
if len(numChans) ==1:
axs.plot(axs.get_xlim(), [muA,muA], ls= 'dashed', c='black')
if ~np.isnan(chanSel):
if numChans[ichan] == chanSel:
axs.annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold', color='red')
else:
axs.annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
else:
axs.annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
if timeWindow==False:
xticlabs = np.arange(depths[0],depths[-1],1)
axs.xaxis.set_ticks(xticlabs)
axs.xaxis.set_ticklabels(xticlabs, rotation = 45)
else:
xticlabs = np.arange(0,len(feature),5)
axs.xaxis.set_ticks(xticlabs)
axs.xaxis.set_ticklabels((xticlabs*2).astype(int), rotation = 45)
axFormat(axs)
if np.size(np.where(labels==1)) != 0:
inDepth = depths[np.min(np.where(labels==1))]
outDepth = depths[np.max(np.where(labels==1))]
axs.axvspan(inDepth, outDepth, color='purple', alpha=0.2)
for xc in depths:
axs.axvline(x=xc, color='k', linestyle='--', alpha=0.2)
else:
axs[ichan].plot(axs[ichan].get_xlim(), [muA,muA], ls= 'dashed', c='black')
if ~np.isnan(chanSel):
if numChans[ichan] == chanSel:
axs[ichan].annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold', color='red')
else:
axs[ichan].annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
else:
axs[ichan].annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
if timeWindow==False:
xticlabs = np.arange(depths[0],depths[-1],1)
axs[ichan].xaxis.set_ticks(xticlabs)
axs[ichan].xaxis.set_ticklabels(xticlabs, rotation = 45)
else:
xticlabs = np.arange(0,len(feature),5)
axs[ichan].xaxis.set_ticks(xticlabs)
axs[ichan].xaxis.set_ticklabels((xticlabs*2).astype(int), rotation = 45)
axFormat(axs[ichan])
if np.size(np.where(labels==1)) != 0:
inDepth = depths[np.min(np.where(labels==1))]
outDepth = depths[np.max(np.where(labels==1))]
axs[ichan].axvspan(inDepth, outDepth, color='purple', alpha=0.2)
for xc in depths:
axs[ichan].axvline(x=xc, color='k', linestyle='--', alpha=0.2)
plt.suptitle(titleLab, y=0.96,x=0.51, size=16, fontweight='bold')
fig.text(0.51, 0.03, 'Depth (mm)', ha='center', size=14, fontweight='bold')
fig.text(0.035, 0.5, featureLabels[numFeatures[ifeatures]].values[0], va='center', rotation='vertical', size=14, fontweight='bold')
if showOnly == True:
plt.show()
else:
figure = plt.gcf() # get current figure
figure.set_size_inches(12, 8)
if timeWindow==True:
filepath = outputDir + fileName + '.png'
else:
filepath = outputDir + fileName + '.png'
plt.savefig(filepath, dpi=100) # save the figure to file
plt.close('all')
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'feature:', featureLabels[numFeatures[ifeatures]].values[0])
def extract_raw_nwbFile(file_name, trimData, FilterData):
patientDF = | pd.DataFrame([]) | pandas.DataFrame |
import mongomanager
import intriniowrapper
import logging
import inspect
import copy
import pandas as pd
import commonqueries
from datetime import datetime
from dateutil.relativedelta import relativedelta
import os
import configwrapper
class IntrinioUpdater():
def __init__(self,config_file,proxies=None,timeout=300,max_retries=50,error_codes=[500,503],internal_error_codes=[401,403,404,429]):
self.config = configwrapper.ConfigWrapper(config_file=config_file)
collections=self.build_collections()
self.collections=collections
auth=(self.config.get_string('INTRINIO','username'),self.config.get_string('INTRINIO','password'))
if auth is None:
auth=()
if proxies is None:
proxies={}
if collections is None:
collections={}
self.mongo = mongomanager.MongoManager(port=self.config.get_int('FINANCIALDATA_MONGO','port'),host=self.config.get_string('FINANCIALDATA_MONGO','host'), username=self.config.get_string('FINANCIALDATA_MONGO','username'), password=self.config.get_string('FINANCIALDATA_MONGO','password'), dbname=self.config.get_string('FINANCIALDATA_MONGO','dbname'))
self.cq=commonqueries.CommonQueries(port=self.config.get_int('FINANCIALDATA_MONGO','port'),host=self.config.get_string('FINANCIALDATA_MONGO','host'), username=self.config.get_string('FINANCIALDATA_MONGO','username'), password=self.config.get_string('FINANCIALDATA_MONGO','password'), dbname=self.config.get_string('FINANCIALDATA_MONGO','dbname'),collections=collections)
self.intrinio=intriniowrapper.IntrinioWrapper(auth,proxies,timeout,max_retries,error_codes,internal_error_codes)
self.last_trade_day=pd.to_datetime(self.cq.get_last_complete_market_day())
return
def build_collections(self):
collections={}
for option in self.config.get_options('FINANCIALDATA_COLLECTIONS'):
collections[option]=self.config.get_string('FINANCIALDATA_COLLECTIONS',option)
return collections
def update_company(self,value):
company_details=self.intrinio.get_companies(identifier=value)
if company_details is None or 'cik' not in company_details or company_details['cik'] is None or 'latest_filing_date' not in company_details or company_details['latest_filing_date'] is None:
logging.error('company details is none')
return False
self.mongo.db[self.collections['intrinio_companies']].update({'cik':company_details['cik']},company_details,upsert=True)
logging.info('company update:'+value)
return True
def update_companies(self,full_update=False):
#will update the intrinio_companies collection also includs details in the same collection
self.mongo.create_index(self.collections['intrinio_companies'],'latest_filing_date')
self.mongo.create_index(self.collections['intrinio_companies'],'cik',unique=True)
self.mongo.create_index(self.collections['intrinio_companies'],'ticker')
max_date=self.mongo.db[self.collections['intrinio_companies']].find_one(sort=[("latest_filing_date",-1)])
logging.info('max_company:'+str(max_date))
if max_date==None or full_update==True:
max_date=None
else:
max_date=max_date['latest_filing_date']
companies=self.intrinio.get_companies(latest_filing_date=max_date)
if companies is None or len(companies)==0:
logging.info('companies len is 0 or companies is none')
return False
companies=pd.DataFrame(companies)
companies=companies[pd.notnull(companies['cik'])]
companies=companies[pd.notnull(companies['latest_filing_date'])]
companies=companies[pd.notnull(companies['ticker'])]
companies=companies.sort_values('latest_filing_date') #so we always process the oldest one first, this way if we ever need to do it again, we have the correct date
for index,company in companies.iterrows():
self.update_company(company['cik'])
logging.info(str(len(companies))+':companies updated sucessfully')
return True
def update_all_company_filings(self):
logging.info('Now in func:update_all_company_filings')
self.mongo.create_index(self.collections['intrinio_filings'],'filing_date')
self.mongo.create_index(self.collections['intrinio_filings'],'cik')
self.mongo.create_index(self.collections['intrinio_filings'],'accno',unique=True)
self.mongo.create_index(self.collections['intrinio_filings'],'report_type')
self.mongo.create_index(self.collections['intrinio_filings'],'period_ended')
self.mongo.create_index(self.collections['intrinio_pull_times'],'collection')
self.mongo.create_index(self.collections['intrinio_pull_times'],'date')
self.mongo.create_index(self.collections['intrinio_pull_times'],'cik')
last_trade_day=self.last_trade_day
logging.info('last_trade_day:'+str(last_trade_day))
max_filing_date=self.mongo.db[self.collections['intrinio_filings']].find_one({},sort=[("filing_date",-1)])
logging.info('max_filing_date:'+str(max_filing_date))
if max_filing_date is not None and pd.to_datetime(max_filing_date['filing_date'])>=last_trade_day:
logging.info('we are already up to date on filings, no need to get things, this can wait until tomorrow')
pass
if max_filing_date is None or pd.to_datetime(max_filing_date['filing_date'])<=last_trade_day-relativedelta(days=15):
logging.error('waited too long (15 days) to update filings, need to download all filings for each company, this will take a lot of calls')
companies=self.cq.get_companies()
ciks=companies['cik'].unique()
self.update_filings(ciks=[ciks])
filings=self.intrinio.get_filings() #gets all filings in the past 30 days from intinio
if filings is None or len(filings)==0:
logging.error('no filings found when getting the last 30 days of filings for all companies')
return
filings=pd.DataFrame(filings)
filings=filings.sort_values('accepted_date')
min_filings_date=pd.to_datetime(filings['filing_date']).min()
logging.info('min_filings_date:'+str(min_filings_date))
ciks=list(filings['cik'].unique())
for cik in ciks:
logging.info('updating filing for cik:'+cik)
cik_filings=filings[filings['cik']==cik].copy(deep=True)
last_filing_pull=self.mongo.db[self.collections['intrinio_pull_times']].find_one({"cik":cik,'collection':self.collections['intrinio_filings']})
last_already_pulled_cik_filing=self.mongo.db[self.collections['intrinio_filings']].find_one({"cik":cik},sort=[("filing_date",-1)])
if last_filing_pull is None and last_already_pulled_cik_filing is not None:
last_filing_pull=pd.to_datetime(last_already_pulled_cik_filing['filing_date']).date()
elif last_filing_pull is not None:
last_filing_pull=pd.to_datetime(last_filing_pull['date']).date()
else:
last_filing_pull=None
logging.info('last_filing_pull:'+str(last_filing_pull))
if last_filing_pull is None:
logging.info('doing a full update of filings for cik:'+cik)
self.update_filings(ciks=[cik],full_update=True)
elif last_filing_pull is not None and last_filing_pull < min_filings_date.date():
logging.info('doing a parital of filings for cik:'+cik)
self.update_filings(ciks=[cik])
elif last_filing_pull is not None and last_filing_pull >= min_filings_date.date():
logging.info('only updating what we need to, the last pull we have is greater than the min in the last 30 days')
cik_filings['cik']=cik
cik_filings['_id']=cik_filings['accno']
cik_filings=cik_filings.sort_values('accepted_date')
for index,filing in cik_filings.iterrows():
filing_data=filing.to_dict()
self.mongo.db[self.collections['intrinio_filings']].update({'accno':filing_data['accno']},filing_data,upsert=True)
logging.info(str(len(cik_filings))+': filings updated for:'+cik)
logging.info('update filings pull date')
else:
logging.error(str(last_filing_pull))
logging.error((pd.to_datetime(last_filing_pull['date'])).date())
logging.error(min_filings_date.date())
logging.error('we should never get here')
exit()
pull_data={'cik':cik,'collection':self.collections['intrinio_filings'],'date':last_trade_day.strftime('%Y-%m-%d')}
self.mongo.db[self.collections['intrinio_pull_times']].update({"cik":cik,'collection':self.collections['intrinio_filings']},pull_data,upsert=True)
return
def update_filings(self,full_update=False,ciks=None):
last_trade_day=self.last_trade_day
self.mongo.create_index(self.collections['intrinio_filings'],'filing_date')
self.mongo.create_index(self.collections['intrinio_filings'],'cik')
self.mongo.create_index(self.collections['intrinio_filings'],'accno',unique=True)
self.mongo.create_index(self.collections['intrinio_filings'],'report_type')
self.mongo.create_index(self.collections['intrinio_filings'],'period_ended')
self.mongo.create_index(self.collections['intrinio_pull_times'],'collection')
self.mongo.create_index(self.collections['intrinio_pull_times'],'date')
self.mongo.create_index(self.collections['intrinio_pull_times'],'cik')
if ciks is None: #pass in a list of ciks that we want to update
companies=self.cq.get_companies()
ciks=companies['cik'].unique()
for cik in ciks:
company=self.cq.get_company(cik)
if company is None or len(company)>1 or len(company)==0 or 'latest_filing_date' not in company:
logging.error('cik:'+cik+' has no matching company')
continue
last_filing_pull=self.mongo.db[self.collections['intrinio_pull_times']].find_one({"cik":cik,'collection':self.collections['intrinio_filings']})
if full_update is not True and last_filing_pull is not None and pd.to_datetime(last_filing_pull['date'])>=last_trade_day:
logging.info('last_filing_pull:'+str(last_filing_pull['date']))
logging.info('we are already up to date on this company filings, continueing:'+cik)
continue
max_filing_date=self.mongo.db[self.collections['intrinio_filings']].find_one({"cik":cik},sort=[("filing_date",-1)])
if max_filing_date!=None and full_update is not True:
max_filing_date=(pd.to_datetime(max_filing_date['filing_date'])).date().strftime('%Y-%m-%d')
else:
max_filing_date=None
logging.info('max_filing_date:'+str(max_filing_date))
filings=self.intrinio.get_company_filings(start_date=max_filing_date,identifier=cik)
if filings is None or len(filings)==0:
logging.info('filings is none or filings are empty')
else:
filings=pd.DataFrame(filings)
filings['cik']=cik
filings['_id']=filings['accno']
filings=filings.sort_values('accepted_date')
for index,filing in filings.iterrows():
filing_data=filing.to_dict()
self.mongo.db[self.collections['intrinio_filings']].update({'accno':filing_data['accno']},filing_data,upsert=True)
logging.info(str(len(filings))+': filings updated for:'+cik)
logging.info('update filings pull date for cik:'+cik+' to:'+str(last_trade_day))
pull_data={'cik':cik,'collection':self.collections['intrinio_filings'],'date':last_trade_day.strftime('%Y-%m-%d')}
self.mongo.db[self.collections['intrinio_pull_times']].update({"cik":cik,'collection':self.collections['intrinio_filings']},pull_data,upsert=True)
return True
def update_standardized_fundamentals(self,full_update=False,ciks=None):
last_trade_day=self.last_trade_day
self.mongo.create_index(self.collections['intrinio_standardized_fundamentals'],'cik')
self.mongo.create_index(self.collections['intrinio_standardized_fundamentals'],'fiscal_year')
self.mongo.create_index(self.collections['intrinio_standardized_fundamentals'],'fiscal_period')
self.mongo.create_index(self.collections['intrinio_standardized_fundamentals'],'statement_type')
self.mongo.create_index(self.collections['intrinio_standardized_fundamentals'],'filing_date')
self.mongo.create_index(self.collections['intrinio_pull_times'],'collection')
self.mongo.create_index(self.collections['intrinio_pull_times'],'date')
self.mongo.create_index(self.collections['intrinio_pull_times'],'cik')
if ciks is None: #pass in a list of ciks that we want to update
ciks=self.cq.existing_ciks()
for cik in ciks:
company=self.cq.get_company(cik)
if company is None or len(company)==0 or 'latest_filing_date' not in company.columns:
continue
company_latest_filing_date=(pd.to_datetime(company['latest_filing_date'].iloc[0])).date()
max_fundamental_date=self.mongo.db[self.collections['intrinio_standardized_fundamentals']].find_one({"cik":cik},sort=[("filing_date",-1)])
last_fundamental_pull=self.mongo.db[self.collections['intrinio_pull_times']].find_one({"cik":cik,'collection':self.collections['intrinio_standardized_fundamentals']})
if last_fundamental_pull==None:
if max_fundamental_date==None:
last_fundamental_pull=datetime(1900,1,1).date()
else:
last_fundamental_pull=(pd.to_datetime(max_fundamental_date['filing_date'])).date()
else:
last_fundamental_pull=(pd.to_datetime(last_fundamental_pull['date'])).date()
if full_update==True: #then we need to re-download everything
last_fundamental_pull=datetime(1900,1,1).date()
if last_fundamental_pull>company_latest_filing_date:
logging.info('fundamentals up to date for, not updating:'+cik)
logging.info('last_fundamental_pull:'+str(last_fundamental_pull))
logging.info('company_latest_filing_date:'+str(company_latest_filing_date))
continue
filings=pd.DataFrame(list(self.mongo.db[self.collections['intrinio_filings']].find({'cik':cik})))
if len(filings)==0:
logging.info('no filings found, continueing')
continue
filings['filing_date']=pd.to_datetime(filings['filing_date']).dt.date
filings=filings[filings['filing_date']>=last_fundamental_pull]
filings=filings[filings['report_type'].isin(['10-K','10-K/A','10-Q','10-Q/A'])]
filings=filings.sort_values('accepted_date')
if len(filings)==0:
logging.info('empty filings after statement type and date filter, no need to update, continuing')
continue
statements_df= | pd.DataFrame() | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from ..analysis import determine_analysis_type
from ..analysis.exc import NoDataError
from ..data import Vector, Categorical
class MyTestCase(unittest.TestCase):
def test_small_float_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 30))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_float_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 10000)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_float_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 10000).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_float_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 10000))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float32_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float32')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float32_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float32').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float32_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 30).astype('float32'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float16_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float16')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float16_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float16').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float16_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 30).astype('float16'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_single_float_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 1)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_single_float_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 1).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_single_float_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 1))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_vector(self):
np.random.seed(123456789)
input_array = Vector(st.norm.rvs(0, 1, 30))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_vector(self):
np.random.seed(123456789)
input_array = Vector(st.norm.rvs(0, 1, 10000))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_array_with_nan(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_list_with_nan(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(input_array.tolist()), Vector)
def test_small_series_with_nan(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(pd.Series(input_array)), Vector)
def test_none(self):
input_array = None
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_empty_list(self):
input_array = list()
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_empty_array(self):
input_array = np.array([])
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_empty_vector(self):
input_array = Vector([])
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_float_scalar(self):
input_array = 3.14159256
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_small_int_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_int_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 10000)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_int_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 10000).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_int_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 10000))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int32_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int32')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int32_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int32').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int32_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30).astype('int32'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int16_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int16')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int16_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int16').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int16_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30).astype('int16'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int8_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int8')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int8_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int8').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int8_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30).astype('int8'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_int_scalar(self):
input_array = 3
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_small_cat_list(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_cat_array(self):
np.random.seed(123456789)
input_array = np.array(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_cat_series(self):
np.random.seed(123456789)
input_array = pd.Series(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_large_cat_list(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)]
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_large_cat_array(self):
np.random.seed(123456789)
input_array = np.array(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_large_cat_series(self):
np.random.seed(123456789)
input_array = pd.Series(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_single_cat_list(self):
input_array = ['a']
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_single_cat_array(self):
input_array = np.array(['a'])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_single_cat_series(self):
input_array = | pd.Series(['a']) | pandas.Series |
# GetData.py
# <NAME>
# 29 November 2019
#
# This program takes as input an output filename and a starting date and
# returns song/artist names to the specified file.
import csv
import sys
import time
import pandas as pd
from selenium import webdriver
from datetime import datetime, timedelta
# get the list of songs/artists from a specified csv file
def getOldSongs(fname):
try: #if there is data in the file already
data = | pd.read_csv(fname) | pandas.read_csv |
#!/usr/bin/env python
"""Script for generating figures of catalog statistics. Run `QCreport.py -h`
for command line usage.
"""
import os
import sys
import errno
import argparse
from datetime import date, datetime
from math import sqrt, radians, cos
import markdown
import numpy as np
import pandas as pd
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Polygon
from obspy.geodetics.base import gps2dist_azimuth
# Python 2
try:
from urllib2 import urlopen, HTTPError
# Python 3
except ImportError:
from urllib.request import urlopen, HTTPError
import QCutils as qcu
from decorators import retry, printstatus
###############################################################################
###############################################################################
###############################################################################
@printstatus('Creating basic catalog summary')
def basic_cat_sum(catalog, dirname, dup1, dup2, timewindow, distwindow):
"""Gather basic catalog summary statistics."""
lines = []
lines.append('Catalog name: %s\n\n' % dirname[:-9].upper())
lines.append('First date in catalog: %s\n' % catalog['time'].min())
lines.append('Last date in catalog: %s\n\n' % catalog['time'].max())
lines.append('Total number of events: %s\n\n' % len(catalog))
lines.append('Minimum latitude: %s\n' % catalog['latitude'].min())
lines.append('Maximum latitude: %s\n' % catalog['latitude'].max())
lines.append('Minimum longitude: %s\n' % catalog['longitude'].min())
lines.append('Maximum longitude: %s\n\n' % catalog['longitude'].max())
lines.append('Minimum depth: %s\n' % catalog['depth'].min())
lines.append('Maximum depth: %s\n' % catalog['depth'].max())
lines.append('Number of 0 km depth events: %s\n'
% len(catalog[catalog['depth'] == 0]))
lines.append('Number of NaN depth events: %s\n\n'
% len(catalog[pd.isnull(catalog['depth'])]))
lines.append('Minimum magnitude: %s\n' % catalog['mag'].min())
lines.append('Maximum magnitude: %s\n' % catalog['mag'].max())
lines.append('Number of 0 magnitude events: %s\n'
% len(catalog[catalog['mag'] == 0]))
lines.append('Number of NaN magnitude events: %s\n\n'
% len(catalog[pd.isnull(catalog['mag'])]))
lines.append('Number of possible duplicates (%ss and %skm threshold): %d\n'
% (timewindow, distwindow, dup1))
lines.append('Number of possible duplicates (16s and 100km threshold): %d'
% dup2)
with open('%s_catalogsummary.txt' % dirname, 'w') as sumfile:
for line in lines:
sumfile.write(line)
def largest_ten(catalog, dirname):
"""Make a list of the 10 events with largest magnitude."""
catalog = catalog.sort_values(by='mag', ascending=False)
topten = catalog.head(n=10)
topten = topten[['time', 'id', 'latitude', 'longitude', 'depth', 'mag']]
with open('%s_largestten.txt' % dirname, 'w') as magfile:
for event in topten.itertuples():
line = ' '.join([str(x) for x in event[1:]]) + '\n'
magfile.write(line)
@printstatus('Finding possible duplicates')
def list_duplicates(catalog, dirname, timewindow=2, distwindow=15,
magwindow=None, minmag=-5, locfilter=None):
"""Make a list of possible duplicate events."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T'))
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
catalog = catalog[catalog['mag'] >= minmag]
if locfilter:
catalog = catalog[catalog['place'].str.contains(locfilter, na=False)]
cat = catalog[['time', 'convtime', 'id', 'latitude', 'longitude', 'depth',
'mag']].copy()
cat.loc[:, 'time'] = [qcu.to_epoch(x) for x in cat['time']]
duplines1 = [('Possible duplicates using %ss time threshold and %skm '
'distance threshold\n') % (timewindow, distwindow),
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
duplines2 = [('\n\nPossible duplicates using 16s time threshold and 100km '
'distance threshold\n'),
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
sep = '-----------------------\n'
thresh1dupes, thresh2dupes = 0, 0
for event in cat.itertuples():
trimdf = cat[cat['convtime'].between(event.convtime, event.convtime
+ | pd.Timedelta(seconds=16) | pandas.Timedelta |
import os
try:
import fool
except:
print("缺少fool工具")
import math
import pandas as pd
import numpy as np
import random
import tensorflow as tf
import re
np.random.seed(1)
def add2vocab(path,word):
vocab_data=pd.read_csv(path)
idx_to_chars=list(vocab_data['vocabulary'])+[word]
df_data = pd.DataFrame(idx_to_chars, columns=['vocabulary'])
df_data.to_csv(path,index=0)
def get_corpus_indices(data,chars_to_idx,mlm=False,nsp=False):
"""
转化成词库索引
"""
corpus_indices=[]
keys=chars_to_idx.keys()
#print(data)
for d in data:
if nsp==True:
corpus_chars=d
corpus_chars_idx=[]
if len(d)>0 and len(d[0])==1:
corpus_chars=['[cls]']+corpus_chars
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#用[mask]替换不存在词库中的单词
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
find_end=np.where(np.asarray(corpus_chars_idx)==chars_to_idx['。'])
for i in range(find_end[0].shape[0]):
corpus_chars_idx.insert(find_end[0][i]+i+1,chars_to_idx['[sep]'])
else:
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
elif mlm==True:
d=d.replace('\n','').replace('\r','').replace(' ','').replace('\u3000','')
corpus_chars=list(d)
corpus_chars_idx=[]
#print(2)
'''
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#用[mask]替换不存在词库中的单词
'''
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#用[mask]替换不存在词库中的单词
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
else:
corpus_chars=d
if isinstance(corpus_chars,(list)):#corpus_chars必须是列表list
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#用[mask]替换不存在词库中的单词
else:
corpus_chars=[corpus_chars]#转化成list
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
corpus_indices.append(corpus_chars_idx)#语料索引,既读入的文本,并通过chars_to_idx转化成索引
return corpus_indices
def data_format(data,labels):
'''
数据格式化,把整个批次的数据转化成最大数据长度的数据相同的数据长度(以-1进行填充)
'''
def format_inner(inputs,max_size):
new_data=[]
for x_t in inputs:
if(abs(len(x_t)-max_size)!=0):
for i in range(abs(len(x_t)-max_size)):
x_t.extend([-1])
new_data.append(tf.reshape(x_t,[1,-1]))
return new_data
max_size=0
new_data=[]
mask=[]
masks=[]
new_labels = []
#获取最大数据长度
for x in data:
if(max_size<len(x)):
max_size=len(x)
#得到masks
for d in data:
for i in range(max_size):
if(i<len(d)):
mask.append(1.0)
else:
mask.append(0.0)
masks.append(tf.reshape(mask,[1,-1]))
mask=[]
#print(masks,"max_size")
if data is not None:
new_data=format_inner(data,max_size)#格式化数据
if labels is not None:
new_labels=format_inner(labels,max_size) #格式化标签
#print(new_labels)
#print(new_data)
return new_data,new_labels,masks
def get_data(data,labels,chars_to_idx,label_chars_to_idx,batch_size,char2idx=True,mlm=False,nsp=False):
'''
function:
一个批次一个批次的yield数据
parameter:
data:需要批次化的一组数据
labels:data对应的情感类型
chars_to_idx;词汇到索引的映射
label_chars_to_idx;标签到索引的映射
batch_size;批次大小
'''
num_example=math.ceil(len(data)/batch_size)
example_indices=list(range(num_example))
random.shuffle(example_indices)
#print(data,"get_data")
for i in example_indices:
start=i*batch_size
if start >(len(data)-1):
start=(len(data)-1)
end=i*batch_size+batch_size
if end >(len(data)-1):
end=(len(data)-1)+1
X=data[start:end]
Y=labels[start:end]
#print(chars_to_idx,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
#print(char2idx," ",mlm," ",nsp,"进1")
if char2idx==True:
#print("进")
X=get_corpus_indices(X,chars_to_idx,mlm=mlm,nsp=nsp)
if mlm==True:
Y=X
else:
Y=get_corpus_indices(Y,label_chars_to_idx,mlm=mlm,nsp=nsp)
#print(X,"XXXXXX")
yield X,Y #只是索引化的文本,且长度不一
def nsp_vocab(folder,name):
path=folder+"\\"+name
df = pd.read_csv(path)
data = list(df["evaluation"])
#print(len(data))
datas=[]
labels=[]
for i in range(len(data)):
if data[i].find("。")==-1:
continue
#print(data[i])
x,y=build_sample_nsp(data[i])
if x==-1:
continue
datas.extend(x)
labels.extend(y)
#print(datas[-1])
#print(labels[-1])
datas=[list(d) for d in datas]
df_data = | pd.DataFrame(datas) | pandas.DataFrame |
"""
Функции и классы для проведения WoE-преобразований
"""
import math
import warnings
import numpy as np
import pandas as pd
import sklearn as sk
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm
class _GroupedPredictor(pd.DataFrame):
"""
Вспомогательный класс для удобства доступа к некоторым данным
"""
def get_predictor(self, x):
"""
Получение подвыборки по имени предиктора(ов)
Parameters
---------------
x : str/int/list-like
Предиктор или список предикторов
Returns:
-----------
self : pd.DataFrame
Часть датафрейма (самого себя)
"""
if isinstance(x, (list, set, tuple)):
return self[self["predictor"].isin(x)]
else:
return self[self["predictor"] == x]
def append(self, other):
return _GroupedPredictor(super().append(other))
class WoeTransformer(TransformerMixin, BaseEstimator):
"""Класс для построения и применения WOE группировки к датасету
Parameters
----------
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
save_data : bool, default False
Параметр, определяющий, нужно ли сохранить данные для обучения
трансформера внутри экземпляра класса
join_bad_categories : bool, default False
Определяет, должени ли трансформер предпринять попытку для объединения
катогориальных групп в более крупные
Warning
-------
join_bad_categories - Экспериментальная функция.
Способ группировки категорий нестабилен
Attributes
----------
stats : pandas.DataFrame
Результаты WOE-группировки по всем предикторам
predictors : list
Список предикторов, на которых была построена группировка
cat_values : dict[str, list]
Словарь со списками категорий по предикторам, переданный при обучении
alpha_values : dict[str, float]
Словарь со значениями alpha для регуляризации групп
possible groups : pandas.DataFrame
Данные о значениях предиктора, которые могли бы стать
отдельными категориями
bad_groups : pandas.DataFrame
Данные о группах, которые не удовлетворяют условиям
"""
def __repr__(self):
return "WoeTransformer(min_sample_rate={!r}, min_count={!r}, n_fitted_predictors={!r})".format(
self.min_sample_rate,
self.min_count,
len(self.predictors),
)
def __init__(
self,
min_sample_rate: float = 0.05,
min_count: int = 3,
save_data: bool = False,
join_bad_categories: bool = False,
):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alpha_values = {}
self.save_data = save_data
self.join_bad_categories = join_bad_categories
# -------------------------
# Функции интерфейса класса
# -------------------------
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
# Сохранение категориальных значений
self.cat_values = cat_values
# Валидация данных и решейпинг
if hasattr(self, "_validate_data"):
X, y = self._validate_and_convert_data(X, y)
if self.save_data:
self.data = X
self.target = y
# Инициализация коэффициентов для регуляризации групп
self.alpha_values = {i: 0 for i in X.columns}
self.alpha_values.update(alpha_values)
# Агрегация значений предикторов
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def transform(self, X, y=None):
"""
Применение обученного трансформера к новым данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм, который нужно преобразовать
Предикторы, которые не были сгруппированы ранее, будут
проигнорированы и выведется сообщение
y : pandas.Series
Игнорируется
Returns
-----------
transformed : pandas.DataFrame
Преобразованный датасет
"""
transformed = pd.DataFrame()
if hasattr(self, "_validate_data"):
try:
X, y = self._validate_and_convert_data(X, y)
except AttributeError:
pass
for i in X:
if i in self.predictors:
try:
transformed[i] = self._transform_single(X[i])
except Exception as e:
print(f"Transform failed on predictor: {i}", e)
else:
print(f"Column is not in fitted predictors list: {i}")
return transformed
def fit_transform(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
с последующим примененим группировки к тем же данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pandas.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-----------
transformed : pd.DataFrame
Преобразованный датасет
"""
self.fit(X, y, cat_values=cat_values, alpha_values=alpha_values)
return self.transform(X)
def plot_woe(self, predictors=None):
"""
Отрисовка одного или нескольких графиков группировки
Parameters
---------------
predictors : str or array, default None
Предиктор(ы), по которым нужны графики
-- если str - отрисовывается один график
-- если array - отрисовываются графики из списка
-- если None - отрисовываются все сгруппированные предикторы
Warning
-------
Запуск метода без аргументов может занять длительное время при большом
количестве предикторов
"""
if predictors is None:
predictors = self.predictors
elif isinstance(predictors, str):
predictors = [predictors]
elif isinstance(predictors, (list, tuple, set)):
predictors = predictors
_, axes = plt.subplots(figsize=(10, len(predictors) * 5), nrows=len(predictors))
try:
for i, col in enumerate(predictors):
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes[i])
except TypeError:
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes)
# return fig
def get_iv(self, sort=False):
"""Получение списка значений IV по предикторам
Parameters
----------
sort : bool, default False
Включает сортировку результата по убыванию IV
Returns
-------
pandas.Series
"""
try:
res = self.stats.groupby("predictor")["IV"].sum()
if sort:
res = res.sort_values(ascending=False)
res = dict(res)
except AttributeError as e:
print(f"Transformer was not fitted yet. {e}")
res = {}
return res
# -------------------------
# Внутренние функции над всем датасетом
# -------------------------
def _validate_and_convert_data(self, X, y):
"""Проверяеn входные данные, трансформирует в объекты pandas
Использует метод _validate_data из sklearn/base.py
"""
if hasattr(X, "columns"):
predictors = X.columns
else:
predictors = ["X" + str(i + 1) for i in range(X.shape[1])]
if y is None:
X_valid = self._validate_data(X, y, dtype=None, force_all_finite=False)
X_valid = pd.DataFrame(X, columns=predictors)
y_valid = None
else:
X_valid, y_valid = self._validate_data(
X, y, dtype=None, force_all_finite=False
)
y_valid = pd.Series(y, name="target")
X_valid = pd.DataFrame(X, columns=predictors)
return X_valid, y_valid
def _grouping(self, X, y):
"""
Применение группировки ко всем предикторам
"""
df = X.copy()
df = df.fillna("пусто")
df["target"] = y.copy()
# Группировка и расчет показателей
for col in df.columns[:-1]:
grouped_temp = self._group_single(df[col], y)
num_mask = self._get_nums_mask(grouped_temp["value"])
cat_val_mask = grouped_temp["value"].isin(self.cat_values.get(col, []))
is_all_categorical = all(~num_mask | cat_val_mask)
if self.join_bad_categories and is_all_categorical:
repl = self._get_cat_values_for_join(grouped_temp)
grouped_temp = self._group_single(df[col].replace(repl), y)
self.grouped = self.grouped.append(grouped_temp)
# Замена пустых значений обратно на np.nan ИЛИ преобразование в числовой тип
try:
self.grouped["value"] = self.grouped["value"].replace({"пусто": np.nan})
except TypeError:
self.grouped["value"] = pd.to_numeric(
self.grouped["value"], downcast="signed"
)
def _fit_numeric(self, X, y):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
Returns
-------
None
"""
res = pd.DataFrame()
for i in X:
res_i = self._fit_single(X[i], y)
res = res.append(res_i)
self.predictors.append(i)
self.stats = self.stats.append(res)
# -------------------------
# Внутренние функции над отдельными столбцами
# -------------------------
def _group_single(self, x, y):
"""
Агрегация данных по значениям предиктора.
Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений
и долю целевых в группе
Parameters:
---------------
X : pandas.DataFrame
Таблица данных для агрегации
y : pandas.Series
Целевая переменная
"""
col = x.name
df = pd.DataFrame({col: x.values, "target": y.values})
grouped_temp = df.groupby(col)["target"].agg(["count", "sum"]).reset_index()
grouped_temp.columns = ["value", "sample_count", "target_count"]
grouped_temp["sample_rate"] = (
grouped_temp["sample_count"] / grouped_temp["sample_count"].sum()
)
grouped_temp["target_rate"] = (
grouped_temp["target_count"] / grouped_temp["sample_count"]
)
grouped_temp.insert(0, "predictor", col)
return _GroupedPredictor(grouped_temp)
def _fit_single(self, x, y, gr_subset=None, cat_vals=None):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
gr_subset : _GroupedPredictor
Предиктор
"""
gr_subset_num = pd.DataFrame()
gr_subset_cat = pd.DataFrame()
col = x.name
if gr_subset is None:
gr_subset = self.grouped.get_predictor(col)
if cat_vals is None:
cat_vals = self.cat_values.get(col, [])
nan_mask = x.isna()
num_mask = self._get_nums_mask(x) & (~x.isin(cat_vals)) & (~nan_mask)
num_vals = x.loc[num_mask].unique()
try:
# Расчет коэффициентов тренда по числовым значениям предиктора
if num_mask.sum() > 0:
try:
poly_coefs = np.polyfit(
x.loc[num_mask].astype(float), y.loc[num_mask], deg=1
)
except np.linalg.LinAlgError as e:
print(f"Error in np.polyfit on predictor: '{col}'.\nError MSG: {e}")
print("Linear Least Squares coefficients were set to [1, 0]")
poly_coefs = np.array([1, 0])
self.trend_coefs.update({col: poly_coefs})
# Расчет монотонных границ
gr_subset_num = gr_subset[gr_subset["value"].isin(num_vals)].copy()
gr_subset_num["value"] = pd.to_numeric(gr_subset_num["value"])
gr_subset_num = gr_subset_num.sort_values("value")
borders = self._monotonic_borders(gr_subset_num, self.trend_coefs[col])
self.borders.update({col: borders})
# Применение границ к сгруппированным данным
gr_subset_num["groups"] = pd.cut(gr_subset_num["value"], borders)
gr_subset_num["type"] = "num"
except ValueError as e:
print(f"ValueError on predictor {col}.\nError MSG: {e}")
# Расчет коэффициентов тренда по категориальным значениям предиктора
if (~num_mask).sum() > 0:
gr_subset_cat = gr_subset[~gr_subset["value"].isin(num_vals)].copy()
gr_subset_cat["groups"] = gr_subset_cat["value"].fillna("пусто")
gr_subset_cat["type"] = "cat"
# Объединение числовых и категориальных значений
gr_subset = pd.concat([gr_subset_num, gr_subset_cat], axis=0, ignore_index=True)
# Расчет WOE и IV
alpha = self.alpha_values.get(col, 0)
res_i = self._statistic(gr_subset, alpha=alpha)
is_empty_exists = any(res_i["groups"].astype(str).str.contains("пусто"))
if is_empty_exists:
res_i["groups"].replace({"пусто": np.nan}, inplace=True)
return res_i
def _transform_single(self, x, stats=None):
"""
Применение группировки и WoE-преобразования
Parameters
---------------
x : pandas.Series
Значения предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
orig_index = x.index
X_woe = x.copy()
if stats is None:
stats = self.stats.get_predictor(X_woe.name)
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "num"
}
cat_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = stats.loc[stats["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = stats.loc[stats["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
pd.IntervalIndex(stats.loc[stats["type"] == "num", "groups"]).right
)
# Выделение только числовых значений предиктора
# (похожих на числа и тех, что явно не указаны как категориальные)
X_woe_num = pd.to_numeric(
X_woe[(self._get_nums_mask(X_woe)) & (~X_woe.isin(cat_bounds))]
)
# Разбивка значений на интервалы в соответствии с группировкой
X_woe_num = pd.cut(X_woe_num, num_bounds)
# Замена групп на значения WOE
X_woe_num = X_woe_num.replace(num_map)
X_woe_num.name = "woe"
else:
X_woe_num = pd.Series()
# predict по категориальным значениям (может обновлять значения по числовым)
DF_cat = stats.loc[stats["type"] == "cat"]
if DF_cat.shape[0] > 0:
# Выделение строковых значений и тех, что явно выделены как категориальные
X_woe_cat = X_woe[X_woe.isin(cat_map.keys())]
# Замена групп на значения WOE
X_woe_cat = X_woe_cat.replace(cat_map)
else:
X_woe_cat = pd.Series()
# predict по новым категориям (нечисловые: которых не было при групприровке)
# Сбор индексов категориальных и числовых значений
used_index = np.hstack([X_woe_cat.index, X_woe_num.index])
if len(used_index) < len(x):
X_woe_oth = X_woe.index.drop(used_index)
X_woe_oth = pd.Series(0, index=X_woe_oth)
else:
X_woe_oth = pd.Series()
X_woe = | pd.concat([X_woe_num, X_woe_cat, X_woe_oth]) | pandas.concat |
import unittest
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype, is_string_dtype
from pandas.util.testing import assert_frame_equal
from shift_detector.precalculations.store import InsufficientDataError, Store
from shift_detector.utils.column_management import ColumnType
class TestStore(unittest.TestCase):
def test_init_custom_column_types(self):
sales = {'brand': ["Jones LLC", "Alpha Co", "Blue Inc", "Blue Inc", "Alpha Co",
"Jones LLC", "Alpha Co", "Blue Inc", "Blue Inc", "Alpha Co",
"Jones LLC"] * 10,
'payment': [150, 200, 50, 10, 5, 150, 200, 50, 10, 5, 1] * 10,
'description': ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"] * 10}
df1 = df2 = pd.DataFrame.from_dict(sales)
with self.subTest("Successful initialisation"):
Store(df1, df2, custom_column_types={'description': ColumnType.categorical})
with self.subTest("Exception when no dict is passed as custom_column_types"):
self.assertRaises(TypeError, lambda: Store(df1, df2, custom_column_types='no_dict'))
with self.subTest("Exception when key of custom_column_types is not a string"):
self.assertRaises(TypeError, lambda: Store(df1, df2, custom_column_types={0: ColumnType.numerical}))
with self.subTest("Exception when value of custom_column_types is not a ColumnType"):
self.assertRaises(TypeError, lambda: Store(df1, df2, custom_column_types={'brand': 0}))
def test_min_data_size_is_enforced(self):
df1 = pd.DataFrame(list(range(10)))
df2 = pd.DataFrame(list(range(10)))
store = Store(df1=df1, df2=df2)
assert_frame_equal(df1.astype(float), store[ColumnType.numerical][0])
assert_frame_equal(df2.astype(float), store[ColumnType.numerical][1])
self.assertRaises(InsufficientDataError, Store, df1=pd.DataFrame(), df2=pd.DataFrame([0]))
self.assertRaises(InsufficientDataError, Store,
df1=pd.DataFrame(list(range(9))),
df2=pd.DataFrame(list(range(20))))
def test_apply_custom_column_types(self):
data = {'to_numerical': ['150', '200', '50', '10', '5', '150', '200', '50', '10', '5', '1'] * 10,
'to_text': ['150', '200', '50', '10', '5', '150', '200', '50', '10', '5', '1'] * 10,
'to_categorical': [150, 200, 50, 10, 5, 150, 200, 50, 10, 5, 1] * 10,
'stay_categorical': ['150', '200', '50', '10', '5', '150', '200', '50', '10', '5', '1'] * 10}
df1 = df2 = pd.DataFrame.from_dict(data)
custom_column_types = {
'to_numerical': ColumnType.numerical,
'to_text': ColumnType.text,
'to_categorical': ColumnType.categorical
}
store = Store(df1, df2, custom_column_types=custom_column_types)
with self.subTest("Apply custom_column_types"):
self.assertEqual(['to_categorical', 'stay_categorical'], store.type_to_columns[ColumnType.categorical])
self.assertEqual(['to_text'], store.type_to_columns[ColumnType.text])
self.assertEqual(['to_numerical'], store.type_to_columns[ColumnType.numerical])
with self.subTest("Apply numerical conversion for custom_column_types to dataframes"):
self.assertTrue(is_numeric_dtype(store.df1['to_numerical']))
self.assertTrue(store.df1['to_numerical'].equals(pd.Series([150.0, 200.0, 50.0, 10.0, 5.0,
150.0, 200.0, 50.0, 10.0, 5.0, 1.0] * 10)))
with self.subTest("Apply categorical conversion for custom_column_types to dataframes"):
self.assertTrue(is_string_dtype(store.df1['to_categorical']))
self.assertTrue(store.df1['to_categorical'].equals(pd.Series(['150', '200', '50', '10', '5',
'150', '200', '50', '10', '5', '1'] * 10)))
with self.subTest("Apply textual conversion for custom_column_types to dataframes"):
self.assertTrue(is_string_dtype(store.df1['to_text']))
self.assertTrue(store.df1['to_text'].equals(pd.Series(['150', '200', '50', '10', '5',
'150', '200', '50', '10', '5', '1'] * 10)))
def test_change_column_type(self):
data = {'to_numerical': ['a', '200', '50', '10', '5', '150', '200', '50', '10', '5', '1'] * 10}
df1 = df2 = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
met_df = pd.read_csv('../datasets/cleaned_dataset.csv', index_col=0)
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
le = LabelEncoder()
ohe = OneHotEncoder(sparse=False)
val = le.fit_transform(met_df['rest_type']).reshape(-1,1)
res = met_df['rest_type'].values
res = ohe.fit_transform(val)
res = pd.DataFrame(res)
new = pd.concat([met_df,res], axis=1)
new.dropna(inplace=True)
new.drop('rest_type', axis=1, inplace=True)
X = new.drop('cost_for_two', axis=1)
y = new['cost_for_two']
X_train = X[:round(0.7*len(X))]
X_valid = X[round(0.7*len(X))+1 : -round(0.15*len(X))]
X_test = X[-round(0.15*len(X)):]
#y
y_train = y[:round(0.7*len(y))]
y_valid = y[round(0.7*len(y))+1 : -round(0.15*len(y))]
y_test = y[-round(0.15*len(y)):]
from sklearn.ensemble import RandomForestRegressor
print("modelling.....")
rf_100 = RandomForestRegressor(n_estimators=50, criterion='mae')
rf_100.fit(X_train, y_train)
print("modelling done.....")
pickle.dump(rf_100, open('model.pkl','wb'))
pickle.dump(le, open('label.pkl','wb'))
pickle.dump(ohe, open('hot.pkl','wb'))
model = pickle.load(open('model.pkl','rb'))
le = pickle.load(open('label.pkl','rb'))
ohe = pickle.load(open('hot.pkl','rb'))
node = [[1,1,340,240,10,1, 'Casual Dining', 3.8, 4.0, 4.3]]
node = pd.DataFrame(node)
node.columns = met_df.drop('cost_for_two',axis=1).columns
val = le.transform(node['rest_type']).reshape(-1,1)
res = node['rest_type'].values
res = ohe.transform(val)
res = pd.DataFrame(res)
new = | pd.concat([node,res], axis=1) | pandas.concat |
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize("func, values", [
("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
])
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
'c_int': [1, 2, 3, 4],
'c_float': [4.02, 3.03, 2.04, 1.05],
'c_date': ['2019', '2018', '2016', '2017']})
df['c_date'] = pd.to_datetime(df['c_date'])
result = getattr(df.groupby('name'), func)()
expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
def test_nunique_preserves_column_level_names():
# GH 23222
test = pd.DataFrame([1, 2, 2],
columns=pd.Index(['A'], name="level_0"))
result = test.groupby([0, 0, 0]).nunique()
expected = pd.DataFrame([2], columns=test.columns)
tm.assert_frame_equal(result, expected)
# count
# --------------------------------
def test_groupby_timedelta_cython_count():
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
tm.assert_series_equal(count_B, expected['B'])
def test_count_object():
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
# size
# --------------------------------
def test_size(df):
grouped = df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('A')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('B')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
tm.assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame(columns=['A', 'B'])
out = Series(dtype='int64', index=Index([], name='A'))
tm.assert_series_equal(df.groupby('A').size(), out)
def test_size_groupby_all_null():
# GH23050
# Assert no 'Value Error : Length of passed values is 2, index implies 0'
df = DataFrame({'A': [None, None]}) # all-null groups
result = df.groupby('A').size()
expected = Series(dtype='int64', index=Index([], name='A'))
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""Interface for flopy's implementation for MODFLOW."""
__all__ = ["MfSfrNetwork"]
import pickle
from itertools import combinations, zip_longest
from textwrap import dedent
import geopandas
import numpy as np
import pandas as pd
from shapely import wkt
from shapely.geometry import LineString, Point, Polygon, box
from shapely.ops import linemerge
from swn.core import SurfaceWaterNetwork
from swn.spatial import compare_crs, get_sindex
from swn.util import abbr_str
try:
import matplotlib
except ImportError:
matplotlib = False
class MfSfrNetwork:
"""MODFLOW SFR network class.
Attributes
----------
model : flopy.modflow.mf.Modflow
Instance of a flopy MODFLOW model
segments : geopandas.GeoDataFrame
Copied from swn.segments, but with additional columns added
segment_data : pandas.DataFrame
Similar to structure in model.sfr.segment_data, but for one stress
period. Transient data (where applicable) will show summary statistics.
The index is 'nseg', ordered and starting from 1. An additional column
'segnum' is used to identify segments, and if defined,
abstraction/diversion identifiers, where iupseg != 0.
reaches : geopandas.GeoDataFrame
Similar to structure in model.sfr.reach_data with index 'reachID',
ordered and starting from 1. Contains geometry and other columns
not used by flopy. Use get_reach_data() for use with flopy.
diversions : geopandas.GeoDataFrame, pd.DataFrame or None
Copied from swn.diversions, if set/defined.
logger : logging.Logger
Logger to show messages.
"""
def __init__(self, logger=None):
"""Initialise MfSfrNetwork.
Parameters
----------
logger : logging.Logger, optional
Logger to show messages.
"""
from swn.logger import get_logger, logging
from importlib.util import find_spec
if logger is None:
self.logger = get_logger(self.__class__.__name__)
elif isinstance(logger, logging.Logger):
self.logger = logger
else:
raise ValueError(
"expected 'logger' to be Logger; found " + str(type(logger)))
self.logger.warning(
"using legacy MfSfrNetwork; consider using SwnModflow")
self.logger.info('creating new %s object', self.__class__.__name__)
if not find_spec('flopy'):
raise ImportError(self.__class__.__name__ + ' requires flopy')
self.segments = None
self.segment_data = None
self.reaches = None
self.diversions = None
# all other properties added afterwards
@classmethod
def from_swn_flopy(
cls, swn, model, ibound_action='freeze',
reach_include_fraction=0.2, min_slope=1./1000,
hyd_cond1=1., hyd_cond_out=None, thickness1=1., thickness_out=None,
width1=10., width_out=None, roughch=0.024,
abstraction={}, inflow={}, flow={}, runoff={}, etsw={}, pptsw={}):
"""Create a MODFLOW SFR structure from a surface water network.
Parameters
----------
swn : swn.SurfaceWaterNetwork
Instance of a SurfaceWaterNetwork.
model : flopy.modflow.mf.Modflow
Instance of a flopy MODFLOW model with DIS and BAS6 packages.
ibound_action : str, optional
Action to handle IBOUND:
- ``freeze`` : Freeze IBOUND, but clip streams to fit bounds.
- ``modify`` : Modify IBOUND to fit streams, where possible.
reach_include_fraction : float or pandas.Series, optional
Fraction of cell size used as a threshold distance to determine if
reaches outside the active grid should be included to a cell.
Based on the furthest distance of the line and cell geometries.
Default 0.2 (e.g. for a 100 m grid cell, this is 20 m).
min_slope : float or pandas.Series, optional
Minimum downwards slope imposed on segments. If float, then this is
a global value, otherwise it is per-segment with a Series.
Default 1./1000 (or 0.001).
hyd_cond1 : float or pandas.Series, optional
Hydraulic conductivity of the streambed, as a global or per top of
each segment. Used for either STRHC1 or HCOND1/HCOND2 outputs.
Default 1.
hyd_cond_out : None, float or pandas.Series, optional
Similar to thickness1, but for the hydraulic conductivity of each
segment outlet. If None (default), the same hyd_cond1 value for the
top of the outlet segment is used for the bottom.
thickness1 : float or pandas.Series, optional
Thickness of the streambed, as a global or per top of each segment.
Used for either STRTHICK or THICKM1/THICKM2 outputs. Default 1.
thickness_out : None, float or pandas.Series, optional
Similar to thickness1, but for the bottom of each segment outlet.
If None (default), the same thickness1 value for the top of the
outlet segment is used for the bottom.
width1 : float or pandas.Series, optional
Channel width, as a global or per top of each segment. Used for
WIDTH1/WIDTH2 outputs. Default 10.
width_out : None, float or pandas.Series, optional
Similar to width1, but for the bottom of each segment outlet.
If None (default), the same width1 value for the top of the
outlet segment is used for the bottom.
roughch : float or pandas.Series, optional
Manning's roughness coefficient for the channel. If float, then
this is a global value, otherwise it is per-segment with a Series.
Default 0.024.
abstraction : dict or pandas.DataFrame, optional
See generate_segment_data for details.
Default is {} (no abstraction from diversions).
inflow : dict or pandas.DataFrame, optional
See generate_segment_data for details.
Default is {} (no outside inflow added to flow term).
flow : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
runoff : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
etsw : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
pptsw : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
logger : logging.Logger, optional
Logger to show messages.
"""
obj = cls()
import flopy
if not isinstance(swn, SurfaceWaterNetwork):
raise ValueError('swn must be a SurfaceWaterNetwork object')
elif ibound_action not in ('freeze', 'modify'):
raise ValueError('ibound_action must be one of freeze or modify')
obj.model = model
obj.segments = swn.segments.copy()
# Make sure model CRS and segments CRS are the same (if defined)
crs = None
segments_crs = getattr(obj.segments.geometry, 'crs', None)
modelgrid_crs = None
modelgrid = obj.model.modelgrid
epsg = modelgrid.epsg
proj4_str = modelgrid.proj4
if epsg is not None:
segments_crs, modelgrid_crs, same = compare_crs(segments_crs, epsg)
else:
segments_crs, modelgrid_crs, same = compare_crs(segments_crs,
proj4_str)
if (segments_crs is not None and modelgrid_crs is not None and
not same):
obj.logger.warning(
'CRS for segments and modelgrid are different: {0} vs. {1}'
.format(segments_crs, modelgrid_crs))
crs = segments_crs or modelgrid_crs
# Make sure their extents overlap
minx, maxx, miny, maxy = modelgrid.extent
model_bbox = box(minx, miny, maxx, maxy)
rstats = obj.segments.bounds.describe()
segments_bbox = box(
rstats.loc['min', 'minx'], rstats.loc['min', 'miny'],
rstats.loc['max', 'maxx'], rstats.loc['max', 'maxy'])
if model_bbox.disjoint(segments_bbox):
raise ValueError('modelgrid extent does not cover segments extent')
# More careful check of overlap of lines with grid polygons
obj.logger.debug('building model grid cell geometries')
dis = obj.model.dis
cols, rows = np.meshgrid(np.arange(dis.ncol), np.arange(dis.nrow))
ibound = obj.model.bas6.ibound[0].array.copy()
ibound_modified = 0
grid_df = pd.DataFrame({'row': rows.flatten(), 'col': cols.flatten()})
grid_df.set_index(['row', 'col'], inplace=True)
grid_df['ibound'] = ibound.flatten()
if ibound_action == 'freeze' and (ibound == 0).any():
# Remove any inactive grid cells from analysis
grid_df = grid_df.loc[grid_df['ibound'] != 0]
# Determine grid cell size
col_size = np.median(dis.delr.array)
if dis.delr.array.min() != dis.delr.array.max():
obj.logger.warning(
'assuming constant column spacing %s', col_size)
row_size = np.median(dis.delc.array)
if dis.delc.array.min() != dis.delc.array.max():
obj.logger.warning(
'assuming constant row spacing %s', row_size)
cell_size = (row_size + col_size) / 2.0
# Note: modelgrid.get_cell_vertices(row, col) is slow!
xv = modelgrid.xvertices
yv = modelgrid.yvertices
r, c = [np.array(s[1])
for s in grid_df.reset_index()[['row', 'col']].iteritems()]
cell_verts = zip(
zip(xv[r, c], yv[r, c]),
zip(xv[r, c + 1], yv[r, c + 1]),
zip(xv[r + 1, c + 1], yv[r + 1, c + 1]),
zip(xv[r + 1, c], yv[r + 1, c])
)
obj.grid_cells = grid_cells = geopandas.GeoDataFrame(
grid_df, geometry=[Polygon(r) for r in cell_verts], crs=crs)
obj.logger.debug('evaluating reach data on model grid')
grid_sindex = get_sindex(grid_cells)
reach_include = swn.segments_series(reach_include_fraction) * cell_size
# Make an empty DataFrame for reaches
obj.reaches = pd.DataFrame(columns=['geometry'])
obj.reaches.insert(1, column='row', value=pd.Series(dtype=int))
obj.reaches.insert(2, column='col', value=pd.Series(dtype=int))
empty_reach_df = obj.reaches.copy() # take this before more added
obj.reaches.insert(
1, column='segnum',
value=pd.Series(dtype=obj.segments.index.dtype))
obj.reaches.insert(2, column='dist', value=pd.Series(dtype=float))
empty_reach_df.insert(3, column='length', value=pd.Series(dtype=float))
empty_reach_df.insert(4, column='moved', value=pd.Series(dtype=bool))
# recursive helper function
def append_reach_df(df, row, col, reach_geom, moved=False):
if reach_geom.geom_type == 'LineString':
df.loc[len(df.index)] = {
'geometry': reach_geom,
'row': row,
'col': col,
'length': reach_geom.length,
'moved': moved,
}
elif reach_geom.geom_type.startswith('Multi'):
for sub_reach_geom in reach_geom.geoms: # recurse
append_reach_df(df, row, col, sub_reach_geom, moved)
else:
raise NotImplementedError(reach_geom.geom_type)
# helper function that returns early, if necessary
def assign_short_reach(reach_df, idx, segnum):
reach = reach_df.loc[idx]
reach_geom = reach['geometry']
threshold = reach_include[segnum]
if reach_geom.length > threshold:
return
cell_lengths = reach_df.groupby(['row', 'col'])['length'].sum()
this_row_col = reach['row'], reach['col']
this_cell_length = cell_lengths[this_row_col]
if this_cell_length > threshold:
return
grid_geom = grid_cells.at[(reach['row'], reach['col']), 'geometry']
# determine if it is crossing the grid once or twice
grid_points = reach_geom.intersection(grid_geom.exterior)
split_short = (
grid_points.geom_type == 'Point' or
(grid_points.geom_type == 'MultiPoint' and
len(grid_points) == 2))
if not split_short:
return
matches = []
# sequence scan on reach_df
for oidx, orch in reach_df.iterrows():
if oidx == idx or orch['moved']:
continue
other_row_col = orch['row'], orch['col']
other_cell_length = cell_lengths[other_row_col]
if (orch['geometry'].distance(reach_geom) < 1e-6 and
this_cell_length < other_cell_length):
matches.append((oidx, orch['geometry']))
if len(matches) == 0:
# don't merge, e.g. reach does not connect to adjacent cell
pass
elif len(matches) == 1:
# short segment is in one other cell only
# update new row and col values, keep geometry as it is
row_col1 = tuple(reach_df.loc[matches[0][0], ['row', 'col']])
reach_df.loc[idx, ['row', 'col', 'moved']] = row_col1 + (True,)
# self.logger.debug(
# 'moved short segment of %s from %s to %s',
# segnum, this_row_col, row_col1)
elif len(matches) == 2:
assert grid_points.geom_type == 'MultiPoint', grid_points.wkt
if len(grid_points) != 2:
obj.logger.critical(
'expected 2 points, found %s', len(grid_points))
# Build a tiny DataFrame of coordinates for this reach
reach_c = pd.DataFrame({
'pt': [Point(c) for c in reach_geom.coords[:]]
})
if len(reach_c) == 2:
# If this is a simple line with two coords, split it
reach_c.index = [0, 2]
reach_c.loc[1] = {
'pt': reach_geom.interpolate(0.5, normalized=True)}
reach_c.sort_index(inplace=True)
reach_geom = LineString(list(reach_c['pt'])) # rebuild
# first match assumed to be touching the start of the line
if reach_c.at[0, 'pt'].distance(matches[1][1]) < 1e-6:
matches.reverse()
reach_c['d1'] = reach_c['pt'].apply(
lambda p: p.distance(matches[0][1]))
reach_c['d2'] = reach_c['pt'].apply(
lambda p: p.distance(matches[1][1]))
reach_c['dm'] = reach_c[['d1', 'd2']].min(1)
# try a simple split where distances switch
ds = reach_c['d1'] < reach_c['d2']
cidx = ds[ds].index[-1]
# ensure it's not the index of either end
if cidx == 0:
cidx = 1
elif cidx == len(reach_c) - 1:
cidx = len(reach_c) - 2
row1, col1 = list(reach_df.loc[matches[0][0], ['row', 'col']])
reach_geom1 = LineString(reach_geom.coords[:(cidx + 1)])
row2, col2 = list(reach_df.loc[matches[1][0], ['row', 'col']])
reach_geom2 = LineString(reach_geom.coords[cidx:])
# update the first, append the second
reach_df.loc[idx, ['row', 'col', 'length', 'moved']] = \
(row1, col1, reach_geom1.length, True)
reach_df.at[idx, 'geometry'] = reach_geom1
append_reach_df(reach_df, row2, col2, reach_geom2, moved=True)
# self.logger.debug(
# 'split and moved short segment of %s from %s to %s and %s',
# segnum, this_row_col, (row1, col1), (row2, col2))
else:
obj.logger.critical(
'unhandled assign_short_reach case with %d matches: %s\n'
'%s\n%s', len(matches), matches, reach, grid_points.wkt)
def assign_remaining_reach(reach_df, segnum, rem):
if rem.geom_type == 'LineString':
threshold = cell_size * 2.0
if rem.length > threshold:
obj.logger.debug(
'remaining line segment from %s too long to merge '
'(%.1f > %.1f)', segnum, rem.length, threshold)
return
# search full grid for other cells that could match
if grid_sindex:
bbox_match = sorted(grid_sindex.intersection(rem.bounds))
sub = grid_cells.geometry.iloc[bbox_match]
else: # slow scan of all cells
sub = grid_cells.geometry
assert len(sub) > 0, len(sub)
matches = []
for (row, col), grid_geom in sub.iteritems():
if grid_geom.touches(rem):
matches.append((row, col, grid_geom))
if len(matches) == 0:
return
threshold = reach_include[segnum]
# Build a tiny DataFrame for just the remaining coordinates
rem_c = pd.DataFrame({
'pt': [Point(c) for c in rem.coords[:]]
})
if len(matches) == 1: # merge it with adjacent cell
row, col, grid_geom = matches[0]
mdist = rem_c['pt'].apply(
lambda p: grid_geom.distance(p)).max()
if mdist > threshold:
obj.logger.debug(
'remaining line segment from %s too far away to '
'merge (%.1f > %.1f)', segnum, mdist, threshold)
return
append_reach_df(reach_df, row, col, rem, moved=True)
elif len(matches) == 2: # complex: need to split it
if len(rem_c) == 2:
# If this is a simple line with two coords, split it
rem_c.index = [0, 2]
rem_c.loc[1] = {
'pt': rem.interpolate(0.5, normalized=True)}
rem_c.sort_index(inplace=True)
rem = LineString(list(rem_c['pt'])) # rebuild
# first match assumed to be touching the start of the line
if rem_c.at[0, 'pt'].touches(matches[1][2]):
matches.reverse()
rem_c['d1'] = rem_c['pt'].apply(
lambda p: p.distance(matches[0][2]))
rem_c['d2'] = rem_c['pt'].apply(
lambda p: p.distance(matches[1][2]))
rem_c['dm'] = rem_c[['d1', 'd2']].min(1)
mdist = rem_c['dm'].max()
if mdist > threshold:
obj.logger.debug(
'remaining line segment from %s too far away to '
'merge (%.1f > %.1f)', segnum, mdist, threshold)
return
# try a simple split where distances switch
ds = rem_c['d1'] < rem_c['d2']
cidx = ds[ds].index[-1]
# ensure it's not the index of either end
if cidx == 0:
cidx = 1
elif cidx == len(rem_c) - 1:
cidx = len(rem_c) - 2
row, col = matches[0][0:2]
rem1 = LineString(rem.coords[:(cidx + 1)])
append_reach_df(reach_df, row, col, rem1, moved=True)
row, col = matches[1][0:2]
rem2 = LineString(rem.coords[cidx:])
append_reach_df(reach_df, row, col, rem2, moved=True)
else:
obj.logger.critical(
'how does this happen? Segments from %d touching %d '
'grid cells', segnum, len(matches))
elif rem.geom_type.startswith('Multi'):
for sub_rem_geom in rem.geoms: # recurse
assign_remaining_reach(reach_df, segnum, sub_rem_geom)
else:
raise NotImplementedError(rem.geom_type)
for segnum, line in obj.segments.geometry.iteritems():
remaining_line = line
if grid_sindex:
bbox_match = sorted(grid_sindex.intersection(line.bounds))
if not bbox_match:
continue
sub = grid_cells.geometry.iloc[bbox_match]
else: # slow scan of all cells
sub = grid_cells.geometry
# Find all intersections between segment and grid cells
reach_df = empty_reach_df.copy()
for (row, col), grid_geom in sub.iteritems():
reach_geom = grid_geom.intersection(line)
if reach_geom.is_empty or reach_geom.geom_type == 'Point':
continue
remaining_line = remaining_line.difference(grid_geom)
append_reach_df(reach_df, row, col, reach_geom)
# Determine if any remaining portions of the line can be used
if line is not remaining_line and remaining_line.length > 0:
assign_remaining_reach(reach_df, segnum, remaining_line)
# Reassign short reaches to two or more adjacent grid cells
# starting with the shortest reach
reach_lengths = reach_df['length'].loc[
reach_df['length'] < reach_include[segnum]]
for idx in list(reach_lengths.sort_values().index):
assign_short_reach(reach_df, idx, segnum)
# Potentially merge a few reaches for each row/col of this segnum
drop_reach_ids = []
gb = reach_df.groupby(['row', 'col'])['geometry'].apply(list)
for (row, col), geoms in gb.copy().iteritems():
row_col = row, col
if len(geoms) > 1:
geom = linemerge(geoms)
if geom.geom_type == 'MultiLineString':
# workaround for odd floating point issue
geom = linemerge([wkt.loads(g.wkt) for g in geoms])
if geom.geom_type == 'LineString':
sel = ((reach_df['row'] == row) &
(reach_df['col'] == col))
drop_reach_ids += list(sel.index[sel])
obj.logger.debug(
'merging %d reaches for segnum %s at %s',
sel.sum(), segnum, row_col)
append_reach_df(reach_df, row, col, geom)
elif any(a.distance(b) < 1e-6
for a, b in combinations(geoms, 2)):
obj.logger.warning(
'failed to merge segnum %s at %s: %s',
segnum, row_col, geom.wkt)
# else: this is probably a meandering MultiLineString
if drop_reach_ids:
reach_df.drop(drop_reach_ids, axis=0, inplace=True)
# TODO: Some reaches match multiple cells if they share a border
# Add all reaches for this segment
for _, reach in reach_df.iterrows():
row, col, reach_geom = reach.loc[['row', 'col', 'geometry']]
if line.has_z:
# intersection(line) does not preserve Z coords,
# but line.interpolate(d) works as expected
reach_geom = LineString(line.interpolate(
line.project(Point(c))) for c in reach_geom.coords)
# Get a point from the middle of the reach_geom
reach_mid_pt = reach_geom.interpolate(0.5, normalized=True)
reach_record = {
'geometry': reach_geom,
'segnum': segnum,
'dist': line.project(reach_mid_pt, normalized=True),
'row': row,
'col': col,
}
obj.reaches.loc[len(obj.reaches.index)] = reach_record
if ibound_action == 'modify' and ibound[row, col] == 0:
ibound_modified += 1
ibound[row, col] = 1
if ibound_action == 'modify':
if ibound_modified:
obj.logger.debug(
'updating %d cells from IBOUND array for top layer',
ibound_modified)
obj.model.bas6.ibound[0] = ibound
obj.reaches = obj.reaches.merge(
grid_df[['ibound']],
left_on=['row', 'col'], right_index=True)
obj.reaches.rename(
columns={'ibound': 'prev_ibound'}, inplace=True)
else:
obj.reaches['prev_ibound'] = 1
# Now convert from DataFrame to GeoDataFrame
obj.reaches = geopandas.GeoDataFrame(
obj.reaches, geometry='geometry', crs=crs)
# Assign segment data
obj.segments['min_slope'] = swn.segments_series(min_slope)
if (obj.segments['min_slope'] < 0.0).any():
raise ValueError('min_slope must be greater than zero')
# Column names common to segments and segment_data
segment_cols = [
'roughch',
'hcond1', 'thickm1', 'elevup', 'width1',
'hcond2', 'thickm2', 'elevdn', 'width2']
# Tidy any previous attempts
for col in segment_cols:
if col in obj.segments.columns:
del obj.segments[col]
# Combine pairs of series for each segment
more_segment_columns = pd.concat([
swn.pair_segments_frame(hyd_cond1, hyd_cond_out, 'hcond'),
swn.pair_segments_frame(thickness1, thickness_out, 'thickm'),
swn.pair_segments_frame(width1, width_out, name='width',
method="constant")
], axis=1, copy=False)
for name, series in more_segment_columns.iteritems():
obj.segments[name] = series
obj.segments['roughch'] = swn.segments_series(roughch)
# Mark segments that are not used
obj.segments['in_model'] = True
outside_model = \
set(swn.segments.index).difference(obj.reaches['segnum'])
obj.segments.loc[list(outside_model), 'in_model'] = False
# Add information from segments
obj.reaches = obj.reaches.merge(
obj.segments[['sequence', 'min_slope']], 'left',
left_on='segnum', right_index=True)
obj.reaches.sort_values(['sequence', 'dist'], inplace=True)
# Interpolate segment properties to each reach
obj.reaches['strthick'] = 0.0
obj.reaches['strhc1'] = 0.0
for segnum, seg in obj.segments.iterrows():
sel = obj.reaches['segnum'] == segnum
if seg['thickm1'] == seg['thickm2']:
val = seg['thickm1']
else: # linear interpolate to mid points
tk1 = seg['thickm1']
tk2 = seg['thickm2']
dtk = tk2 - tk1
val = dtk * obj.reaches.loc[sel, 'dist'] + tk1
obj.reaches.loc[sel, 'strthick'] = val
if seg['hcond1'] == seg['hcond2']:
val = seg['hcond1']
else: # linear interpolate to mid points in log-10 space
lhc1 = np.log10(seg['hcond1'])
lhc2 = np.log10(seg['hcond2'])
dlhc = lhc2 - lhc1
val = 10 ** (dlhc * obj.reaches.loc[sel, 'dist'] + lhc1)
obj.reaches.loc[sel, 'strhc1'] = val
del obj.reaches['sequence']
del obj.reaches['dist']
# Use MODFLOW SFR dataset 2 terms ISEG and IREACH, counting from 1
obj.reaches['iseg'] = 0
obj.reaches['ireach'] = 0
iseg = ireach = 0
prev_segnum = None
for idx, segnum in obj.reaches['segnum'].iteritems():
if segnum != prev_segnum:
iseg += 1
ireach = 0
ireach += 1
obj.reaches.at[idx, 'iseg'] = iseg
obj.reaches.at[idx, 'ireach'] = ireach
prev_segnum = segnum
obj.reaches.reset_index(inplace=True, drop=True)
obj.reaches.index += 1 # flopy series starts at one
obj.reaches.index.name = 'reachID'
obj.reaches['rchlen'] = obj.reaches.geometry.length
obj.reaches['strtop'] = 0.0
obj.reaches['slope'] = 0.0
if swn.has_z:
for reachID, item in obj.reaches.iterrows():
geom = item.geometry
# Get Z from each end
z0 = geom.coords[0][2]
z1 = geom.coords[-1][2]
dz = z0 - z1
dx = geom.length
slope = dz / dx
obj.reaches.at[reachID, 'slope'] = slope
# Get strtop from LineString mid-point Z
zm = geom.interpolate(0.5, normalized=True).z
obj.reaches.at[reachID, 'strtop'] = zm
else:
r = obj.reaches['row'].values
c = obj.reaches['col'].values
# Estimate slope from top and grid spacing
px, py = np.gradient(dis.top.array, col_size, row_size)
grid_slope = np.sqrt(px ** 2 + py ** 2)
obj.reaches['slope'] = grid_slope[r, c]
# Get stream values from top of model
obj.reaches['strtop'] = dis.top.array[r, c]
# Enforce min_slope
sel = obj.reaches['slope'] < obj.reaches['min_slope']
if sel.any():
obj.logger.warning(
'enforcing min_slope for %d reaches (%.2f%%)',
sel.sum(), 100.0 * sel.sum() / len(sel))
obj.reaches.loc[sel, 'slope'] = obj.reaches.loc[sel, 'min_slope']
if not hasattr(obj.reaches.geometry, 'geom_type'):
# workaround needed for reaches.to_file()
obj.reaches.geometry.geom_type = obj.reaches.geom_type
# Build segment_data for Data Set 6
obj.segment_data = obj.reaches[['iseg', 'segnum']]\
.drop_duplicates().rename(columns={'iseg': 'nseg'})
# index changes from 'reachID', to 'segnum', to finally 'nseg'
segnum2nseg_d = obj.segment_data.set_index('segnum')['nseg'].to_dict()
obj.segment_data['icalc'] = 1 # assumption for all streams
obj.segment_data['outseg'] = obj.segment_data['segnum'].map(
lambda x: segnum2nseg_d.get(obj.segments.loc[x, 'to_segnum'], 0))
obj.segment_data['iupseg'] = 0 # handle diversions next
obj.segment_data['iprior'] = 0
obj.segment_data['flow'] = 0.0
obj.segment_data['runoff'] = 0.0
obj.segment_data['etsw'] = 0.0
obj.segment_data['pptsw'] = 0.0
# upper elevation from the first and last reachID items from reaches
obj.segment_data['elevup'] = \
obj.reaches.loc[obj.segment_data.index, 'strtop']
obj.segment_data['elevdn'] = obj.reaches.loc[
obj.reaches.groupby(['iseg']).ireach.idxmax().values,
'strtop'].values
obj.segment_data.set_index('segnum', drop=False, inplace=True)
# copy several columns over (except 'elevup' and 'elevdn', for now)
segment_cols.remove('elevup')
segment_cols.remove('elevdn')
obj.segment_data[segment_cols] = obj.segments[segment_cols]
# now use nseg as primary index, not reachID or segnum
obj.segment_data.set_index('nseg', inplace=True)
obj.segment_data.sort_index(inplace=True)
# Add diversions (i.e. SW takes)
if swn.diversions is not None:
obj.diversions = swn.diversions.copy()
# Mark diversions that are not used / outside model
obj.diversions['in_model'] = True
outside_model = []
# Add columns for ICALC=0
obj.segment_data['depth1'] = 0.0
obj.segment_data['depth2'] = 0.0
# workaround for coercion issue
obj.segment_data['foo'] = ''
is_spatial = (
isinstance(obj.diversions, geopandas.GeoDataFrame) and
'geometry' in obj.diversions.columns and
(~obj.diversions.is_empty).all())
if swn.has_z:
empty_geom = wkt.loads('linestring z empty')
else:
empty_geom = wkt.loads('linestring empty')
for divid, divn in obj.diversions.iterrows():
if divn.from_segnum not in segnum2nseg_d:
# segnum does not exist -- segment is outside model
outside_model.append(divid)
continue
iupseg = segnum2nseg_d[divn.from_segnum]
assert iupseg != 0, iupseg
nseg = len(obj.segment_data) + 1
rchlen = 1.0 # length required
thickm = 1.0 # thickness required
hcond = 0.0 # don't allow GW exchange
seg_d = dict(obj.segment_data.loc[iupseg])
seg_d.update({ # index is nseg
'segnum': divid,
'icalc': 0, # stream depth is specified
'outseg': 0,
'iupseg': iupseg,
'iprior': 0, # normal behaviour for SW takes
'flow': 0.0, # abstraction assigned later
'runoff': 0.0,
'etsw': 0.0,
'pptsw': 0.0,
'roughch': 0.0, # not used
'hcond1': hcond, 'hcond2': hcond,
'thickm1': thickm, 'thickm2': thickm,
'width1': 0.0, 'width2': 0.0, # not used
})
# Use the last reach as a template to modify for new reach
reach_d = dict(obj.reaches.loc[
obj.reaches.iseg == iupseg].iloc[-1])
reach_d.update({
'segnum': divid,
'iseg': nseg,
'ireach': 1,
'rchlen': rchlen,
'min_slope': 0.0,
'slope': 0.0,
'strthick': thickm,
'strhc1': hcond,
})
# Assign one reach at grid cell
if is_spatial:
# Find grid cell nearest to diversion
if grid_sindex:
bbox_match = sorted(
grid_sindex.nearest(divn.geometry.bounds))
# more than one nearest can exist! just take one...
num_found = len(bbox_match)
grid_cell = grid_cells.iloc[bbox_match[0]]
else: # slow scan of all cells
sel = grid_cells.intersects(divn.geometry)
num_found = sel.sum()
grid_cell = grid_cells.loc[sel].iloc[0]
if num_found > 1:
obj.logger.warning(
'%d grid cells are nearest to diversion %r, '
'but only taking the first %s',
num_found, divid, grid_cell)
row, col = grid_cell.name
strtop = dis.top[row, col]
reach_d.update({
'geometry': empty_geom, # divn.geometry,
'row': row,
'col': col,
'strtop': strtop,
})
else:
strtop = dis.top[reach_d['row'], reach_d['col']]
reach_d['strtop'] = strtop
seg_d.update({
'geometry': empty_geom,
'elevup': strtop,
'elevdn': strtop,
})
depth = strtop + thickm
seg_d.update({'depth1': depth, 'depth2': depth})
obj.reaches.loc[len(obj.reaches) + 1] = reach_d
obj.segment_data.loc[nseg] = seg_d
if outside_model:
obj.diversions.loc[list(outside_model), 'in_model'] = False
obj.logger.debug(
'added %d diversions, ignoring %d that did not connect to '
'existing segments',
obj.diversions['in_model'].sum(), len(outside_model))
else:
obj.logger.debug(
'added all %d diversions', len(obj.diversions))
# end of coercion workaround
obj.segment_data.drop('foo', axis=1, inplace=True)
else:
obj.diversions = None
# Finally, add/rename a few columns to align with reach_data
obj.reaches.insert(2, column='k', value=0)
obj.reaches.insert(3, column='outreach', value= | pd.Series(dtype=int) | pandas.Series |
import numpy as np
import random
import pandas as pd
import korbinian
import sys
##########parameters#############
seq_len = 10000
number_seq = 50
number_mutations = 2000
subset_num = 12
ident = 100 * (seq_len - number_mutations) / seq_len
List_rand_TM = r"D:\Databases\summaries\01\List01_rand\List01_rand_TM.csv"
#List_rand_TM = r"D:\Databases\summaries\03\List03_rand\List03_rand_TM.csv"
aa_prop_ser = | pd.Series.from_csv(List_rand_TM, sep="\t") | pandas.Series.from_csv |
"""Legacy feature computation from depart."""
import itertools
import re
import numpy as np
import pandas as pd
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio.SeqUtils.ProtParamData import kd
from pyteomics import parser
from sklearn.preprocessing import PolynomialFeatures
from xirt import sequences
def create_simple_features(df, seq_column="Sequence"):
"""
Create a simple feature matrix using the complete sequence (not position specific).
Parameters:
df: dataframe,
containing a "Sequence" column
Returns:
df, feature dataframe
"""
df[seq_column] = df[seq_column].apply(simply_alphabet).values
ff_df = | pd.DataFrame() | pandas.DataFrame |
import unittest
from yauber_algo.errors import *
class IIFTestCase(unittest.TestCase):
def test_iif(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import iif
#
# Function settings
#
algo = 'iif'
func = iif
setattr(sys.modules[func.__module__], 'IS_WARN_FUTREF', False)
setattr(sys.modules[func.__module__], 'IS_RAISE_FUTREF', False)
cond = array([0, 1, 0, 1, 0, 1], dtype=np.float)
cond_nan = array([0, 1, 0, 1, nan, 1], dtype=np.float)
cond_non_bin = array([0, 1, 0, 2, 0, 1], dtype=np.float)
cond_bool = array([False, True, False, True, False, True], dtype=np.bool)
cond_object = array([False, True, False, True, False, nan], dtype=np.object)
cond_nan_inf = array([0, inf, 0, 1, nan, 1], dtype=np.float)
cond_int32 = array([0, 1, 0, 1, 0, 1], dtype=np.int32)
arr_true = array([1, 2, 3, 4, 5, 6], dtype=np.float)
arr_false = array([-1, -2, -3, -4, -5, -6], dtype=np.float)
arr_true_int32 = array([1, 2, 3, 4, 5, 6], dtype=np.int32)
arr_false_int32 = array([-1, -2, -3, -4, -5, -6], dtype=np.int32)
arr_true_bool = array([True, True, True, True, True, True], dtype=np.bool)
arr_false_bool = array([False, False, False, False, False, False], dtype=np.bool)
arr_true_nan_inf = array([1, 2, 3, 4, 5, inf], dtype=np.float)
arr_false_nan_inf = array([inf, -2, -3, -4, -5, -6], dtype=np.float)
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([-1, 2, -3, 4, -5, 6], dtype=np.float),
func, (cond, arr_true, arr_false),
suffix='cond'
)
s.check_regular(
array([-1, 2, -3, 4, nan, 6], dtype=np.float),
func, (cond_nan, arr_true, arr_false),
suffix='cond_nan'
)
s.check_regular(
array([-1, 2, -3, 4, nan, 6], dtype=np.float),
func, (cond_non_bin, arr_true, arr_false),
suffix='cond_non_bin_exception',
exception=YaUberAlgoInternalError,
)
s.check_regular(
array([-1, 2, -3, 4, -5, 6], dtype=np.float),
func, (cond_bool, arr_true, arr_false),
suffix='cond_bool',
)
s.check_regular(
array([-1, 2, -3, 4, -5, 6], dtype=np.float),
func, (cond_object, arr_true, arr_false),
suffix='cond_object',
exception=YaUberAlgoDtypeNotSupportedError,
)
s.check_regular(
array([-1, 2, -3, 4, -5, 6], dtype=np.float),
func, (cond_bool[:2], arr_true, arr_false),
suffix='cond_diff_length1',
exception=YaUberAlgoArgumentError,
)
s.check_regular(
array([-1, 2, -3, 4, -5, 6], dtype=np.float),
func, (cond_bool, arr_true, arr_false[:2]),
suffix='cond_diff_length2',
exception=YaUberAlgoArgumentError,
)
s.check_naninf(
array([nan, nan, -3, 4, nan, nan], dtype=np.float),
func, (
array([0, inf, 0, 1, nan, 1], dtype=np.float),
arr_true_nan_inf,
arr_false_nan_inf
),
)
s.check_regular(
array([-1, 1, -3, 1, -5, 1], dtype=np.float),
func, (cond, 1, arr_false),
suffix='cond_true_is_number'
)
s.check_regular(
array([-2, 1, -2, 1, -2, 1], dtype=np.float),
func, (cond, 1, -2),
suffix='cond_false_is_number'
)
s.check_regular(
array([-2, 1, -2, 1, -2, 1], dtype=np.float),
func, (cond, 1, pd.Series(arr_false)),
suffix='different_types',
exception=YaUberAlgoArgumentError
)
s.check_regular(
array([-2, 1, -2, 1, -2, 1], dtype=np.float),
func, ([1, 2, 3], 1, pd.Series(arr_false)),
suffix='cond_different_types',
exception=YaUberAlgoArgumentError
)
s.check_series(
pd.Series(array([-1, 2, -3, 4, -5, 6], dtype=np.float)),
func, (pd.Series(cond), pd.Series(arr_true), | pd.Series(arr_false) | pandas.Series |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
#
# Read in data frame
#
df = | pd.read_csv("mcs_interestrate_change.csv", skiprows=1) | pandas.read_csv |
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
parser = argparse.ArgumentParser(description="TF_diversity_rw")
parser.add_argument(
"file_names",
type=str,
help="Name of folder and filenames for the promoters extracted",
)
parser.add_argument(
"Czechowski_gene_categories",
type=str,
help="Input location of Czechowski gene categories text file",
)
parser.add_argument(
"window_bed", type=str, help="Input location of window bed file"
)
parser.add_argument(
"TF_diversity_bed",
type=str,
help="Input location of TF diversity bed file",
)
parser.add_argument(
"EPD_TSS_bed",
type=str,
help="Input location of eukaryotic promoter database transcription start site bed file",
)
parser.add_argument(
"promoter_bed", type=str, help="Input location of promoter bed file"
)
parser.add_argument(
"promoter_no_5UTR",
type=str,
help="Input location of promoter no 5UTR bed file",
)
parser.add_argument(
"foldername_prefix", type=str, help="Output folder name prefix to use"
)
parser.add_argument(
"root_chrom_bp_covered",
type=str,
help="Input location of root chromatin bed file",
)
parser.add_argument(
"shoot_chrom_bp_covered",
type=str,
help="Input location of shoot chromatin bed file",
)
parser.add_argument(
"rootshootintersect_chrom_bp_covered",
type=str,
help="Input location of rootshootintersect chromatin bed file",
)
parser.add_argument(
"palette",
type=str,
help="Optional replacement colour palette for plots",
default=None,
nargs="?",
)
parser.add_argument(
"author_name",
type=str,
help="Optional author name to add to output file names",
default="Czechowski",
nargs="?",
)
parser.add_argument(
"variable1_name",
type=str,
help="Optional replacement name for 2nd variable eg. non-specific",
default="constitutive",
nargs="?",
)
parser.add_argument(
"variable2_name",
type=str,
help="Optional variable 2 name eg. tissue_specific",
default="variable",
nargs="?",
)
args = parser.parse_args()
# make directory for the plots to be exported to
dirName = f"../../data/output/{args.file_names}/rolling_window/{args.foldername_prefix}/"
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " created")
except FileExistsError:
print("Directory ", dirName, " already exists")
# make directory for the plots to be exported to
dirName = f"../../data/output/{args.file_names}/rolling_window/{args.foldername_prefix}/plots"
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " created")
except FileExistsError:
print("Directory ", dirName, " already exists")
def process_input_files(TF_diversity_bed, window_bed):
"""process and merge the input files into a df"""
# Read in TF_diversity_bed
TF_diversity = pd.read_table(TF_diversity_bed, sep="\t", header=0)
# Read in windows_bed
windows_df = pd.read_table(window_bed, sep="\t", header=None)
cols = ["chr", "start", "stop", "name"]
windows_df.columns = cols
# merge windows bed with TF_diversity
TF_diversity = pd.merge(windows_df, TF_diversity, how="left", on="name")
# turn NaN into 0s
TF_diversity[
[
"Shannon_diversity_TF",
"Shannon_diversity_TF_family",
"unique_TF_count",
"total_TF_count",
"TF_family_count",
]
] = TF_diversity[
[
"Shannon_diversity_TF",
"Shannon_diversity_TF_family",
"unique_TF_count",
"total_TF_count",
"TF_family_count",
]
].fillna(
0
)
# Make AGI column
TF_diversity["AGI"] = TF_diversity.name.str.split("_", expand=True)[0]
# make window number column
TF_diversity["window_number"] = TF_diversity.name.str.split(
"_", expand=True
)[1]
# make df columns integars
TF_diversity = TF_diversity.astype(
{
"stop": "int",
"start": "int",
"chr": "int",
"Shannon_diversity_TF": "int",
"Shannon_diversity_TF_family": "int",
"unique_TF_count": "int",
"total_TF_count": "int",
"TF_family_count": "int",
}
)
# add window length column
TF_diversity = TF_diversity.assign(
window_length=TF_diversity.stop - TF_diversity.start
)
# drop name column
TF_diversity.drop(["name"], axis=1, inplace=True)
# rename df
all_combined = TF_diversity
# set default seaborn settings for plotting
# allow colour codes in seaborn
sns.set(color_codes=True)
sns.set_style("ticks")
sns.set_palette(args.palette)
# remove windows with fewer than 100 promoters extending to that location
all_combined = all_combined[
all_combined["window_number"].map(
all_combined["window_number"].value_counts()
)
> 99
]
return all_combined
def add_coverage(df, coverage_bed, suffix):
"""add % bp covered data from a bed file to the df. Prefix is a name added to any new columns"""
# read in bed file
coverage_df = pd.read_table(coverage_bed, sep="\t", header=None)
cols = [
"chr",
"start",
"stop",
"name",
"no._of_overlaps",
"no._of_bases_covered",
"window_length",
"fraction_bases_covered",
]
coverage_df.columns = cols
# add % bases covered column
coverage_df["percentage_bases_covered"] = (
coverage_df["fraction_bases_covered"] * 100
)
# filter columns
coverage_df = coverage_df[
["chr", "start", "stop", "name", "percentage_bases_covered"]
]
# make df columns integars
df = df.astype({"stop": "int", "start": "int", "chr": "int"})
coverage_df = coverage_df.astype(
{"stop": "int", "start": "int", "chr": "int"}
)
# merge the dfs
merged = pd.merge(
df,
coverage_df,
how="left",
on=["chr", "start", "stop"],
suffixes=("", f"_{suffix}"),
)
# remove NaN
# merged = merged[merged['name'].notnull()]
return merged
def rep_sample(df, col, n, random_state):
"""function to return a df with equal sample sizes
taken from here: https://stackoverflow.com/questions/39457762/python-pandas-conditionally-select-a-uniform-sample-from-a-dataframe"""
# identify number of categories
nu = df[col].nunique()
# find number of rows
# m = len(df)
# integar divide total sample size by number of categories
mpb = n // nu
# multiply this by the number of categories and subtract from the number of samples to find the remainder
mku = n - mpb * nu
# make an array fileld with zeros corresponding to each category
fills = np.zeros(nu)
# make values in the array 1s up until the remainder
fills[:mku] = 1
# calculate sample sizes for each category
sample_sizes = (np.ones(nu) * mpb + fills).astype(int)
# group the df by categories
gb = df.groupby(col)
# define sample size function
def sample(sub_df, i):
return sub_df.sample(sample_sizes[i], random_state=random_state)
# sample = lambda sub_df, i: sub_df.sample(
# sample_sizes[i], random_state=random_state
# )
# run sample size function on each category
subs = [sample(sub_df, i) for i, (_, sub_df) in enumerate(gb)]
# return concatenated sub dfs
return pd.concat(subs)
def windows_coords(
output_prefix,
all_combined_df,
variable_of_interest1,
variable_of_interest1_name,
variable_of_interest2,
variable_of_interest2_name,
promoter_bed,
promoter_no_5UTR,
window_offset,
EPD_TSS_bed,
includeEPDTSS=False,
chromatin_tissue_variable="percentage_bases_covered_rootshootintersect_chrom",
chromatin_tissue_variable_name="% open chromatin root and shoot intersect",
x_range=False,
estimator="median",
ci=95,
n_boot=10000,
genetype=False,
genetype2=False,
genetype3=False,
):
"""function to add the centre of each window corresponding to each window no. and return a lineplot."""
# read in bed file
promoter_df = pd.read_table(promoter_bed, sep="\t", header=None)
col = [
"chr",
"start",
"stop",
"AGI",
"dot1",
"strand",
"source",
"type",
"dot2",
"attributes",
]
promoter_df.columns = col
# add promoter length column
promoter_df["length"] = promoter_df.stop - promoter_df.start
# merge promoter_bed with all_combined_df on AGI
merged = pd.merge(
all_combined_df,
promoter_df,
on="AGI",
how="left",
suffixes=("", "_wholeprom"),
)
# remove NaN for all variables of interest
merged = merged[merged[variable_of_interest1].notnull()]
merged = merged[merged[variable_of_interest2].notnull()]
# merged = merged[merged[variable_of_interest3].notnull()]
# make columns integars
merged = merged.astype(
{
"stop_wholeprom": "int",
"start_wholeprom": "int",
"start": "int",
"stop": "int",
}
)
# split merged into 2 dfs by strand
pos = merged[merged.strand == "+"].copy()
neg = merged[merged.strand == "-"].copy()
# add variable of interest position column where position is the middle of the window, with whole length of the longest promoter
pos["position"] = (pos.stop_wholeprom) - (
pos.start + 0.5 * (pos.stop - pos.start)
)
neg["position"] = (
neg.start + 0.5 * (neg.stop - neg.start)
) - neg.start_wholeprom
merged2 = pd.merge(pos, neg, how="outer")
merged2 = merged2.astype({"position": "int64"})
# get longest transcript TSS distribution (if Araport 11 definition used)
promoter_no_5UTR_df = pd.read_table(
promoter_no_5UTR, sep="\t", header=None
)
col = [
"chr",
"source",
"type",
"start",
"stop",
"dot1",
"strand",
"dot2",
"attributes",
]
promoter_no_5UTR_df.columns = col
# add AGI column
promoter_no_5UTR_df_agi = promoter_no_5UTR_df.assign(
AGI=promoter_no_5UTR_df.attributes.str.extract(r"ID=gene:(.*?)\;")
)
# merged with windows
merged2 = pd.merge(
merged2,
promoter_no_5UTR_df_agi,
on="AGI",
how="left",
suffixes=("", "_no_UTR"),
)
# remove NaN (promoters in promoters.gff but not in promoters_5UTR)
merged2 = merged2[merged2.length.notnull()]
# remove NaN (promoters in promoters_5UTR but not in promoters.gff - ie. only 5'UTRs)
merged2 = merged2[merged2.chr_no_UTR.notnull()]
# Get most common transcript TSS location from eukaryotic promoter database (last modified on EPD 06/06/2018)
# Note - not all promoters have a TSS on EPD
if includeEPDTSS is True:
EPD_TSS_df = pd.read_table(
EPD_TSS_bed, delim_whitespace=True, header=None, skiprows=4
)
cols = [
"chr",
"start",
"stop",
"transcript_EPD",
"score_EPD",
"strand_EPD",
"thickstart_EPD",
"thickend_EPD",
]
EPD_TSS_df.columns = cols
# add AGI column
EPD_TSS_df["AGI"] = EPD_TSS_df.transcript_EPD.str.split(
"_", expand=True
)[0]
# add TSS location column
EPD_TSS_df.loc[
EPD_TSS_df.strand_EPD == "+", "TSS_EPD"
] = EPD_TSS_df.loc[EPD_TSS_df.strand_EPD == "+", "thickstart_EPD"]
EPD_TSS_df.loc[EPD_TSS_df.strand_EPD == "-", "TSS_EPD"] = (
EPD_TSS_df.loc[EPD_TSS_df.strand_EPD == "-", "thickend_EPD"] - 1
)
# merged with windows
merged2 = pd.merge(
merged2, EPD_TSS_df, on="AGI", how="left", suffixes=("", "_EPD")
)
# remove NaN (promoters in EPD but not in promoters_5UTR)
merged2 = merged2[merged2.length.notnull()]
# transfrom EPD TSS column in the same way as the position column
merged2.loc[merged2.strand == "-", "TSS_transformed_EPD"] = (
merged2.loc[merged2.strand == "-", "TSS_EPD"]
- merged2.loc[merged2.strand == "-", "start_wholeprom"]
)
merged2.loc[merged2.strand == "+", "TSS_transformed_EPD"] = (
merged2.loc[merged2.strand == "+", "stop_wholeprom"]
- merged2.loc[merged2.strand == "+", "TSS_EPD"]
)
# make integars
merged2 = merged2.astype({"TSS_transformed_EPD": "float64"})
# calculate longest promoter length based on window cutoff
number_of_windows = len(all_combined_df.window_number.unique())
window_length = all_combined_df.window_length.max()
length_of_longest_promoter = number_of_windows * (
window_length - window_offset
)
# make 0 start like in bed files
merged2.start_no_UTR = merged2.start_no_UTR - 1
# add Araport TSS location column
# merged2['TSS'] = int()
merged2.loc[merged2.strand == "+", "TSS"] = merged2.loc[
merged2.strand == "+", "stop_no_UTR"
]
merged2.loc[merged2.strand == "-", "TSS"] = (
merged2.loc[merged2.strand == "-", "start_no_UTR"] - 1
)
# transform TSS location in the same way as the position column
merged2.loc[merged2.strand == "-", "TSS_transformed_Araport11"] = (
merged2.loc[merged2.strand == "-", "TSS"]
- merged2.loc[merged2.strand == "-", "start_wholeprom"]
)
merged2.loc[merged2.strand == "+", "TSS_transformed_Araport11"] = (
merged2.loc[merged2.strand == "+", "stop_wholeprom"]
- merged2.loc[merged2.strand == "+", "TSS"]
)
# make integars
merged2 = merged2.astype(
{
"start_no_UTR": "float64",
"stop_no_UTR": "float64",
"TSS": "float64",
"TSS_transformed_Araport11": "float64",
f"{variable_of_interest1}": "float64",
f"{variable_of_interest2}": "float64",
f"{chromatin_tissue_variable}": "float64",
}
)
# return merged2[['AGI','strand','start','stop','start_wholeprom','stop_wholeprom','start_no_UTR','stop_no_UTR','TSS','TSS_transformed','position','chr_no_UTR','window_number']]
# change estimator
if estimator == "mean":
new_estimator = estimator
if estimator == "median":
new_estimator = np.median
# set number of subplots so can easily change all output possibilities, where subplotA is the top
subplots = 2
# make subplots
if includeEPDTSS is True:
subplots = subplots + 1
f, axes = plt.subplots(subplots, figsize=(10, 10))
# OpenChromplot = axes[subplots-subplots]
# Araport11TSSplot = axes[subplots-(subplots-1)]
EPDTSSplot = axes[subplots - (subplots)]
# promlengthsplot = axes[subplots-(subplots-3)]
variableofinterest1plot = axes[subplots - (subplots - 1)]
variableofinterest2plot = axes[subplots - (subplots - 2)]
else:
f, axes = plt.subplots(subplots, figsize=(10, 8))
# OpenChromplot = axes[subplots-subplots]
# Araport11TSSplot = axes[subplots-(subplots-1)]
# promlengthsplot = axes[subplots-(subplots-2)]
variableofinterest1plot = axes[subplots - (subplots)]
variableofinterest2plot = axes[subplots - (subplots - 1)]
# check the plot axes variables are there. If they are not, assign None to them
# try:
# OpenChromplot
# except NameError:
# OpenChromplot = None
# try:
# Araport11TSSplot
# except NameError:
# Araport11TSSplot = None
try:
EPDTSSplot
except NameError:
EPDTSSplot = None
# try:
# promlengthsplot
# except NameError:
# promlengthsplot = None
try:
variableofinterest1plot
except NameError:
variableofinterest1plot = None
try:
variableofinterest2plot
except NameError:
variableofinterest2plot = None
# try:
# variableofinterest3plot
# except NameError:
# variableofinterest3plot = None
# If EPD TSS plot is present, filter promoters which aren't in EPD to remove NaNs
if EPDTSSplot is not None:
# remove NaN (promoters in promoters_5UTR but not in promoters.gff - ie. only 5'UTRs)
merged2 = merged2[merged2.TSS_transformed_EPD.notnull()]
if genetype is not False:
# filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
# remove windows with fewer than 50 promoters extending to that location if looking at specific genetypes
merged2 = merged2[
merged2["window_number"].map(
merged2["window_number"].value_counts()
)
> 49
]
# redefine longest promoter length based on window cutoff
number_of_windows = len(merged2.window_number.unique())
window_length = merged2.window_length.max()
length_of_longest_promoter = number_of_windows * (
window_length - window_offset
)
# make all values of interest negative as upstream from ATG
# merged_positive = merged2.copy()
if includeEPDTSS is True:
merged2[
[
"length",
"TSS_transformed_Araport11",
"position",
"TSS_transformed_EPD",
]
] = -merged2[
[
"length",
"TSS_transformed_Araport11",
"position",
"TSS_transformed_EPD",
]
]
else:
merged2[
["length", "TSS_transformed_Araport11", "position"]
] = -merged2[["length", "TSS_transformed_Araport11", "position"]]
if genetype is False:
# length_of_longest_promoter = merged_positive.length.max()
# if openchromplot variable present, add that plot
# next plot letter name
nextletter = "A"
# if variableofinterest1plot variable present, add that plot
if variableofinterest1plot is not None:
# variable of interest lineplot
sns.lineplot(
y=merged2[variable_of_interest1],
x=merged2.position,
ax=variableofinterest1plot,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles and axes labels
variableofinterest1plot.set_title(
f"{nextletter}: All promoters sliding windows {variable_of_interest1_name}",
weight="bold",
)
variableofinterest1plot.set_ylabel(
f"{estimator} {variable_of_interest1_name}"
)
variableofinterest1plot.set_xlabel("")
variableofinterest1plot.set_xticklabels([])
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest2plot variable present, add that plot
if variableofinterest2plot is not None:
# variable of interest lineplot
sns.lineplot(
y=merged2[variable_of_interest2],
x=merged2.position,
ax=variableofinterest2plot,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles and axes labels
variableofinterest2plot.set_title(
f"{nextletter}: All promoters sliding windows {variable_of_interest2_name}",
weight="bold",
)
variableofinterest2plot.set_ylabel(
f"{estimator} {variable_of_interest2_name}"
)
variableofinterest2plot.set_xlabel("position upstream of ATG")
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# #if variableofinterestplot variable present, add that plot
# if variableofinterest3plot !=None:
# #variable of interest lineplot
# sns.lineplot(y=merged2[variable_of_interest3], x=merged2.position, ax=variableofinterest3plot,estimator=new_estimator,ci=ci, n_boot=n_boot)
# #set titles and axes labels
# variableofinterest3plot.set_title(f'{nextletter}: All promoters sliding windows {variable_of_interest3_name}', weight='bold')
# variableofinterest3plot.set_ylabel(f'{estimator} {variable_of_interest3_name}')
# variableofinterest3plot.set_xlabel('position upstream of ATG')
elif genetype2 is False:
# filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
# next plot letter name
nextletter = "A"
# if openchromplot variable present, add that plot
# if variableofinterest1plot variable present, add that plot
if variableofinterest1plot is not None:
# variable of interest lineplot
sns.lineplot(
y=merged2[merged2.gene_type == genetype][
variable_of_interest1
],
x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterest1plot,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles and axes labels
variableofinterest1plot.set_title(
f"{nextletter}: {genetype} {variable_of_interest1_name}",
weight="bold",
)
variableofinterest1plot.set_ylabel(
f"{estimator} {variable_of_interest1_name}"
)
variableofinterest1plot.set_xlabel("")
variableofinterest1plot.set_xticklabels([])
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest2plot variable present, add that plot
if variableofinterest2plot is not None:
# variable of interest lineplot
sns.lineplot(
y=merged2[merged2.gene_type == genetype][
variable_of_interest2
],
x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterest2plot,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles and axes labels
variableofinterest2plot.set_title(
f"{nextletter}: {genetype} {variable_of_interest2_name}",
weight="bold",
)
variableofinterest2plot.set_ylabel(
f"{estimator} {variable_of_interest2_name}"
)
variableofinterest2plot.set_xlabel("position upstream of ATG")
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest3plot variable present, add that plot
# if variableofinterest3plot is not None:
# # variable of interest lineplot
# sns.lineplot(
# y=merged2[merged2.gene_type == genetype][
# variable_of_interest3
# ],
# x=merged2[merged2.gene_type == genetype].position,
# ax=variableofinterest3plot,
# estimator=new_estimator,
# ci=ci,
# n_boot=n_boot,
# )
# # set titles and axes labels
# variableofinterest3plot.set_title(
# f"{nextletter}: {genetype} {variable_of_interest3_name}",
# weight="bold",
# )
# variableofinterest3plot.set_ylabel(
# f"{estimator} {variable_of_interest3_name}"
# )
# variableofinterest3plot.set_xlabel("position upstream of ATG")
# set y axis as maximum mean window % bp covered of all genetype subset
# variableofinterestplot.set_ylim([0,merged2.groupby('window_number')[variable_of_interest].median().max()+20])
# set x axis range if specified
# if x_range==False:
# pass
# else:
# length_of_longest_promoter = x_range
# #for all subplots:
# for n in axes:
# #remove grids
# n.grid(False)
# n.set_xlim([-length_of_longest_promoter,0])
# f.tight_layout()
elif genetype3 is False:
# filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
# make a subselection of categories so all sample sizes are equal
# first select only the relevant genetypes
merged2 = merged2[merged2.gene_type.isin([genetype, genetype2])]
# make each promoter unique
merged2_unique = merged2.drop_duplicates("AGI")
# identify sample size of the minimum category
minimum_sample_size = merged2_unique.gene_type.value_counts().min()
# print this
print(f"sample size in each category = {minimum_sample_size}")
# save sample size as file
with open(
f"../../data/output/{args.file_names}/rolling_window/{args.foldername_prefix}/plots/number_of_genes_in_each_category.txt",
"w",
) as file:
file.write(
"number_of_genes_in_each_category=" + str(minimum_sample_size)
)
# multiply this by the number of categories
total_sample_size = minimum_sample_size * len(
merged2_unique.gene_type.unique()
)
# select equal sample sizes of each category with a random state of 1 so it's reproducible
equal_samplesizes = rep_sample(
merged2_unique, "gene_type", total_sample_size, random_state=1
)
# now filter out genes which were not selected using the minimum sample size
to_remove = merged2_unique[
~merged2_unique.AGI.isin(equal_samplesizes.AGI)
]
merged2 = merged2[~merged2.AGI.isin(to_remove.AGI)]
# if openchromplot variable present, add that plot
# add plot letter name
nextletter = "A"
# if variableofinterest1plot variable present, add that plot
if variableofinterest1plot is not None:
# lineplot variable of interest
sns.lineplot(
y=merged2[merged2.gene_type == genetype][
variable_of_interest1
],
x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterest1plot,
label=genetype,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
sns.lineplot(
y=merged2[merged2.gene_type == genetype2][
variable_of_interest1
],
x=merged2[merged2.gene_type == genetype2].position,
ax=variableofinterest1plot,
label=genetype2,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles & axes names
variableofinterest1plot.set_title(
f"{nextletter}: {variable_of_interest1_name}", weight="bold"
)
variableofinterest1plot.set_ylabel(
f"{estimator} {variable_of_interest1_name}"
)
variableofinterest1plot.set_xlabel("")
variableofinterest1plot.set_xticklabels([])
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest2plot variable present, add that plot
if variableofinterest2plot is not None:
# lineplot variable of interest
sns.lineplot(
y=merged2[merged2.gene_type == genetype][
variable_of_interest2
],
x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterest2plot,
label=genetype,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
sns.lineplot(
y=merged2[merged2.gene_type == genetype2][
variable_of_interest2
],
x=merged2[merged2.gene_type == genetype2].position,
ax=variableofinterest2plot,
label=genetype2,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles & axes names
variableofinterest2plot.set_title(
f"{nextletter}: {variable_of_interest2_name}", weight="bold"
)
variableofinterest2plot.set_ylabel(
f"{estimator} {variable_of_interest2_name}"
)
variableofinterest2plot.set_xlabel("position upstream of ATG")
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest3plot variable present, add that plot
# if variableofinterest3plot is not None:
# # lineplot variable of interest
# l1 = sns.lineplot(
# y=merged2[merged2.gene_type == genetype][
# variable_of_interest3
# ],
# x=merged2[merged2.gene_type == genetype].position,
# ax=variableofinterest3plot,
# label=genetype,
# estimator=new_estimator,
# ci=ci,
# n_boot=n_boot,
# )
# l2 = sns.lineplot(
# y=merged2[merged2.gene_type == genetype2][
# variable_of_interest3
# ],
# x=merged2[merged2.gene_type == genetype2].position,
# ax=variableofinterest3plot,
# label=genetype2,
# estimator=new_estimator,
# ci=ci,
# n_boot=n_boot,
# )
# # set titles & axes names
# variableofinterest3plot.set_title(
# f"{nextletter}: {variable_of_interest3_name}", weight="bold"
# )
# variableofinterest3plot.set_ylabel(
# f"{estimator} {variable_of_interest3_name}"
# )
# variableofinterest3plot.set_xlabel("position upstream of ATG")
# set y axis as maximum mean window % bp covered of all genetype subset
# axes[2].set_ylim([0,merged2.groupby('window_number').percentage_bases_covered.median().max()+20])
# gene_type labels
# gene_type_labels = [genetype, genetype2]
# Create the legend
axes[0].legend()
# if x_range==False:
# pass
# else:
# length_of_longest_promoter = x_range
# #for all subplots:
# for n in axes:
# #remove grids
# n.grid(False)
# n.set_xlim([-length_of_longest_promoter,0])
# f.tight_layout()
else:
# filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
# make a subselection of categories so all sample sizes are equal
# make each promoter unique
merged2_unique = merged2.drop_duplicates("AGI")
# identify sample size of the minimum category
minimum_sample_size = merged2_unique.gene_type.value_counts().min()
# print this
print(f"sample size in each category = {minimum_sample_size}")
# save sample size as file
with open(
f"../../data/output/{args.file_names}/rolling_window/{args.foldername_prefix}/plots/number_of_genes_in_each_category.txt",
"w",
) as file:
file.write(
"number_of_genes_in_each_category=" + str(minimum_sample_size)
)
# multiply this by the number of categories
total_sample_size = minimum_sample_size * len(
merged2_unique.gene_type.unique()
)
# select equal sample sizes of each category with a random state of 1 so it's reproducible
equal_samplesizes = rep_sample(
merged2_unique, "gene_type", total_sample_size, random_state=1
)
# now filter out genes which were not selected using the minimum sample size
to_remove = merged2_unique[
~merged2_unique.AGI.isin(equal_samplesizes.AGI)
]
merged2 = merged2[~merged2.AGI.isin(to_remove.AGI)]
# add plot letter name
nextletter = "A"
# if openchromplot variable present, add that plot
# if variableofinterest1plot variable present, add that plot
if variableofinterest1plot is not None:
# lineplot
sns.lineplot(
y=merged2[merged2.gene_type == genetype][
variable_of_interest1
],
x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterest1plot,
label=genetype,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
sns.lineplot(
y=merged2[merged2.gene_type == genetype2][
variable_of_interest1
],
x=merged2[merged2.gene_type == genetype2].position,
ax=variableofinterest1plot,
label=genetype2,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
sns.lineplot(
y=merged2[merged2.gene_type == genetype3][
variable_of_interest1
],
x=merged2[merged2.gene_type == genetype3].position,
ax=variableofinterest1plot,
label=genetype3,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles & axes names
variableofinterest1plot.set_title(
f"{variable_of_interest1_name}", weight="bold"
)
variableofinterest1plot.set_ylabel(
f"{estimator} {variable_of_interest1_name}"
)
variableofinterest1plot.set_xlabel("")
variableofinterest1plot.set_xticklabels([])
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest2plot variable present, add that plot
if variableofinterest2plot is not None:
# lineplot
sns.lineplot(
y=merged2[merged2.gene_type == genetype][
variable_of_interest2
],
x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterest2plot,
label=genetype,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
sns.lineplot(
y=merged2[merged2.gene_type == genetype2][
variable_of_interest2
],
x=merged2[merged2.gene_type == genetype2].position,
ax=variableofinterest2plot,
label=genetype2,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
sns.lineplot(
y=merged2[merged2.gene_type == genetype3][
variable_of_interest2
],
x=merged2[merged2.gene_type == genetype3].position,
ax=variableofinterest2plot,
label=genetype3,
estimator=new_estimator,
ci=ci,
n_boot=n_boot,
)
# set titles & axes names
variableofinterest2plot.set_title(
f"{variable_of_interest2_name}", weight="bold"
)
variableofinterest2plot.set_ylabel(
f"{estimator} {variable_of_interest2_name}"
)
variableofinterest2plot.set_xlabel("position upstream of ATG")
# change to next letter
nextletter = chr(ord(nextletter) + 1)
# if variableofinterest3plot variable present, add that plot
# if variableofinterest3plot != None:
# # lineplot
# l1 = sns.lineplot(
# y=merged2[merged2.gene_type == genetype][
# variable_of_interest3
# ],
# x=merged2[merged2.gene_type == genetype].position,
# ax=variableofinterest3plot,
# label=genetype,
# estimator=new_estimator,
# ci=ci,
# n_boot=n_boot,
# )
# l2 = sns.lineplot(
# y=merged2[merged2.gene_type == genetype2][
# variable_of_interest3
# ],
# x=merged2[merged2.gene_type == genetype2].position,
# ax=variableofinterest3plot,
# label=genetype2,
# estimator=new_estimator,
# ci=ci,
# n_boot=n_boot,
# )
# l3 = sns.lineplot(
# y=merged2[merged2.gene_type == genetype3][
# variable_of_interest3
# ],
# x=merged2[merged2.gene_type == genetype3].position,
# ax=variableofinterest3plot,
# label=genetype3,
# estimator=new_estimator,
# ci=ci,
# n_boot=n_boot,
# )
# # set titles & axes names
# variableofinterest3plot.set_title(
# f"{variable_of_interest3_name}", weight="bold"
# )
# variableofinterest3plot.set_ylabel(
# f"{estimator} {variable_of_interest3_name}"
# )
# variableofinterest3plot.set_xlabel("position upstream of ATG")
# set y axis as maximum mean window % bp covered of all genetype subset
# axes[2].set_ylim([0,merged2.groupby('window_number').percentage_bases_covered.median().max()+20])
# gene_type labels
# gene_type_labels = [genetype, genetype2, genetype3]
# Create the legend
axes[0].legend()
# set x axis length
# if x_range==False:
# pass
# else:
# length_of_longest_promoter = x_range
# #for all subplots:
# for n in axes:
# #remove grids
# n.grid(False)
# n.set_xlim([-length_of_longest_promoter,0])
# leg = n.legend()
# f.tight_layout()
# set x axis range if specified
if x_range is False:
pass
else:
length_of_longest_promoter = x_range
# for all subplots:
for n in axes:
# remove grids
n.grid(False)
n.set_xlim([(-length_of_longest_promoter - 50), 0])
# set a tight layout
f.tight_layout()
# save figure
plt.savefig(
f"../../data/output/{args.file_names}/rolling_window/{args.foldername_prefix}/plots/{output_prefix}_TF_diversity_{estimator}_sliding_window.pdf",
format="pdf",
)
# remove plot
plt.clf()
return merged2
def add_genetype(df, gene_categories):
"""function to add gene type to the df, and remove random genes"""
select_genes = pd.read_table(gene_categories, sep="\t", header=None)
cols = ["AGI", "gene_type"]
select_genes.columns = cols
merged = | pd.merge(select_genes, df, on="AGI", how="left") | pandas.merge |
"""Script of my solution to DrivenData Modeling Women's Health Care Decisions
Use this script in the following way:
python solution.py <name-of-submission>
Argument is optional, the script will assign default name.
"""
from __future__ import division
import sys
import pdb
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn import multiclass
from XGBoostClassifier import XGBoostClassifier
np.random.seed(17411)
def multiclass_log_loss(y_true, y_prob, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples, n_classes]
y_prob : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_prob, eps, 1 - eps)
rows = y_prob.shape[0]
cols = y_prob.shape[1]
vsota = np.sum(y_true * np.log(predictions) + (1-y_true) * np.log(1-predictions))
vsota = vsota / cols
return -1.0 / rows * vsota
def load_train_data(path=None, train_size=0.8):
train_values = | pd.read_csv('data/processed_train.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 17:28:40 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
from datetime import date
start = '20040101'
today = date.today().strftime('%Y%m%d')
firstday = '19991230'
###################################################Germany
swap_spread_de = con.bdh( 'EUSS1 CMPN Curncy', 'PX LAST' , firstday , today)
#eu : EUSS1 CMPN Curncy , swap-bond spread
swap_spread_de.names = 'DE'
######################################################UK
swap_spread_uk = con.bdh( 'BPSP1 CMPN Curncy', 'PX LAST' , firstday , today)
swap_spread_uk.names = 'UK'
######################################################Japan:
swap_spread_jp = con.bdh( 'JYSS1 CMPN Curncy', 'PX LAST' , firstday , today)
swap_spread_jp.names = 'JP'
#####################################################China:
#swap_interest_rate_ch = con.bdh( 'CCUSWO1 BGN Curncy', 'PX LAST' , start , today)
#govt_ch = con.bdh( 'GCNY10YR Index', 'PX LAST' , start , today)
rates_ch = con.bdh(['GCNY10YR Index','CCUSWO1 BGN Curncy'], 'PX LAST' , firstday , today)
#rates_ch_w = rates_ch.groupby(pd.Grouper(freq='W')).last()
#rates_ch = rates_ch[rates_ch.index>start]
swap_spread_ch = rates_ch.iloc[:,0] - rates_ch.iloc[:,1].values
swap_spread_ch.names = 'CH'
#CCUSWO1 BGN Curncy , interest rate swap aldık , GOV BOND: GCNY10YR Index
####################################################Turkey
rates_tr = con.bdh( ['TYUSSW1 BGN Curncy','IECM1Y Index'], 'PX LAST' , firstday , today)
#rates_tr = rates_tr[rates_tr.index>start]
#rates_tr_w = rates_tr.groupby(pd.Grouper(freq='W')).last()
swap_spread_tr = rates_tr.iloc[:,0] - rates_tr.iloc[:,1].values
swap_spread_tr.names = 'TR'
#govt_tr = con.bdh( 'IECM1Y Index', 'PX LAST' , start , today)
#govt_tr_int = govt_ch.interpolate(method= 'linear')
#govt_tr_int_w = govt_tr_int.groupby(pd.Grouper(freq='W')).last()
#govt_tr_int_w = govt_tr_int_w[govt_tr_int_w.index>start]
#TYUSSW1 BGN Curncy, interest rate swap , IECM1Y Index
###################################################Mexico
#swap_spread_mx = con.bdh( 'MPSP1 BGN Curncy', 'PX LAST' , start , today)
#swap_spread_mx.names = 'MX'
#mxn : MPSP1 BGN Curncy , NO DATA
##################################################Brazil
rates_br = con.bdh( ['BCSCN1Y CMPN Curncy','GTBRL1Y Govt'], 'PX LAST' , firstday , today)
#rates_br_w = rates_br[rates_br.index>start]
#rates_br_w = rates_br.groupby(pd.Grouper(freq='W')).last()
swap_spread_br = rates_br.iloc[:,0] - rates_br.iloc[:,1].values
swap_spread_br.names = 'BR'
#Brazil : BCSCN1Y CMPN Curncy, interest rate swap , GTBRL1Y Govt
#################################################Russia
swap_spread_ru = con.bdh( 'RRUSSW1 BGN Curncy', 'PX LAST' , firstday , today)
#russia: RRUSSW1 BGN Curncy, spread
swap_spread_ru.names = 'RU'
################################################South Africa
rates_sa = con.bdh( ['SASW1 BGN Curncy','GTZAR10Y Govt'], 'PX LAST' , firstday , today)
#rates_sa_w = rates_sa[rates_sa.index>start]
#rates_sa_w = rates_sa.groupby(pd.Grouper(freq='W')).last()
swap_spread_sa = rates_sa.iloc[:,0] - rates_sa.iloc[:,1].values
swap_spread_sa.names = 'SA'
#South Africa : SASW1 BGN Curncy, interest rate swap aldık, GTZAR10Y GOvt
swap_spread= | pd.concat( [swap_spread_de, swap_spread_uk, swap_spread_jp, swap_spread_ch, swap_spread_tr, swap_spread_br, swap_spread_ru, swap_spread_sa ],axis=1) | pandas.concat |
import pandas as pd
def full_describe(series: pd.Series, verbose=True):
"""
Calculates a pandas describe of series, plus a count of unique and NaN
:param verbose: printing some other info
:param series: Pandas Series
:return: df with stats as cols
"""
stats_df = | pd.DataFrame() | pandas.DataFrame |
# Pre-Process Text Data
# Import Modules
import os
import pandas as pd
import numpy as np
import csv
import matplotlib.pyplot as plt
import nltk
import string
import re
import datetime
from tqdm import tqdm
from textblob import TextBlob
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import *
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import collections
from gensim import corpora, models, similarities
import gensim
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
import logging
import pickle
import collections
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
analyser = SentimentIntensityAnalyzer()
def get_date(created):
return datetime.datetime.fromtimestamp(created)
def main():
print('Pre-Processing Reddit Data')
print('First Pre-Process the Posts themselves')
# Read in CSV as pandas dataframe
data = pd.read_csv('ClimateSkepticsAllPosts.csv')
### Clean Text ###
# 1. Remove URLs from posts and make text lowercase
def remove_url(input_txt):
url_temp = re.findall(r"http\S*", input_txt)
url_links.append(url_temp) # Append links to other subreddits to array for further network analysis
input_txt = re.sub(r"http\S+", "", input_txt)
input_txt = input_txt.lower()
return input_txt
url_links = [] # Set up empty array to count URLs
data['Clean_Post'] = data['title'].apply(lambda post: remove_url(post))
# 2. Remove Punctuation, Numbers, and Special Characters
data['Clean_Post'] = data['Clean_Post'].str.replace("[^a-zA-Z#]", " ")
# 3. Remove Short Words (3 Letters or Less)
data['Clean_Post'] = data['Clean_Post'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# 4. Tokenisation
data['Tokenised_Post'] = data['Clean_Post'].apply(lambda x: x.split())
# 5. Remove Stopwords
stopword = nltk.corpus.stopwords.words('english')
def remove_stopwords(text):
text = [word for word in text if word not in stopword]
return text
data['Post_Nonstop'] = data['Tokenised_Post'].apply(lambda x: remove_stopwords(x))
print('Saving to CSV')
data.to_csv('Reddit_Post_DF_PP.csv')
print('Read and Pre-Process Top-Level Comments and Subcomments')
subreddit_links = []
# Define Functions outside of loop
def remove_deleted_comments(input_txt):
input_txt = re.sub(r"deleted", "", input_txt)
return input_txt
def remove_user_subreddit(input_txt):
input_txt = re.sub(r"/u/[\w]*", '', input_txt)
sr_temp = re.findall(r"/r/[\w]*", input_txt)
subreddit_links.append(sr_temp) # Append links to other subreddits to array for further network analysis
input_txt = re.sub(r"/r/[\w]*", '', input_txt)
return input_txt
# Read in CSV as pandas dataframe
toplevelcomments = pd.DataFrame(columns=['comment','timestamp','Score'])
for i in list(range(0,len(data['id']))):
if os.path.isfile('Comments/%s.csv' %(data['id'][i])):
try:
data2 = pd.read_csv('Comments/' + '%s.csv' %(data['id'][i]))
row = next(data2.iterrows())
tempdf = pd.DataFrame(row[1])
tempdf['timestamp'] = pd.to_numeric(data2.iloc[2])
tempdf['Score'] = data2.iloc[1]
tempdf.columns = ['comment','timestamp','Score']
toplevelcomments = pd.concat([toplevelcomments,tempdf])
# Subcomments
subcom_list = []
scores = []
subcom_ts = []
for j in list(range(0,data2.shape[1])):
try:
subc = eval(data2.loc[3,:][j]) # Read text representation of dictionary as dictionary
except:
print(j)
print(data2.loc[3,:][j])
for key in subc:
try:
val = subc.get(key)
score = val[0]
comment = val[1]
timestamp = int(val[2])
blob = TextBlob(comment)
sentences = blob.sentences
temp = []
for sentence in sentences:
string = str(sentence)
if string.startswith(">"): # Remove quotes of previous comments and other sources
pass
else:
temp.append(str(sentence))
new_comment = ' '.join(temp)
subcom_list.append(new_comment)
scores.append(score)
subcom_ts.append(timestamp)
except:
pass
if len(subcom_list)>0:
try:
tempsc = pd.DataFrame(subcom_list)
tempsc.columns = ['subcomment']
tempsc['timestamp'] = subcom_ts
tempsc['Score'] = scores
tempsc = tempsc[tempsc['subcomment'].map(lambda d: len(d)) > 0]
subcomments = pd.concat([subcomments,tempsc])
except NameError:
subcomments = pd.DataFrame(subcom_list)
subcomments.columns = ['subcomment']
subcomments['timestamp'] = subcom_ts
subcomments['Score'] = scores
subcomments = subcomments[subcomments['subcomment'].map(lambda d: len(d)) > 0]
except:
pass # Pass if no comments for post
# Convert timestamps to Dates for comments and subcomments (posts done so in the get data phase)
_timestamp = toplevelcomments["timestamp"].apply(get_date)
toplevelcomments = toplevelcomments.assign(timestamp = _timestamp)
_timestamp = subcomments["timestamp"].apply(get_date)
subcomments = subcomments.assign(timestamp = _timestamp)
print('Top Level Comments: ' + str(len(toplevelcomments)))
print('Subcomments: ' + str(len(subcomments)))
print('Total Comments: ' + str(len(toplevelcomments) + len(subcomments)))
print('Pre-Processing Comments')
# 0. Remove NaNs
toplevelcomments.dropna(axis = 0, subset=['comment'], inplace=True)
# 1. Remove URLs from comments and make text lowercase. Also remove deleted comments, usernames and subreddit names.
toplevelcomments['Clean_Comment'] = toplevelcomments['comment'].apply(lambda x: remove_url(x))
toplevelcomments['Clean_Comment'] = toplevelcomments['Clean_Comment'].apply(lambda x: remove_deleted_comments(x))
toplevelcomments['Clean_Comment'] = toplevelcomments['Clean_Comment'].apply(lambda x: remove_user_subreddit(x))
#toplevelcomments['Clean_Comment'] = toplevelcomments['Clean_Comment'].apply(lambda x: remove_climate_words(x))
# 2. Remove Punctuation, Numbers, and Special Characters
toplevelcomments['Clean_Comment'] = toplevelcomments['Clean_Comment'].str.replace("[^a-zA-Z#]", " ")
# 3. Remove Short Words (3 Letters or Less)
toplevelcomments['Clean_Comment'] = toplevelcomments['Clean_Comment'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# 4. Tokenisation
toplevelcomments['Tokenised_Comment'] = toplevelcomments['Clean_Comment'].apply(lambda x: x.split())
# 5. Remove Stopwords
toplevelcomments['Comment_Nonstop'] = toplevelcomments['Tokenised_Comment'].apply(lambda x: remove_stopwords(x))
# 6. Remove Blank Rows from Dataframe
toplevelcomments = toplevelcomments[toplevelcomments['Comment_Nonstop'].map(lambda d: len(d)) > 0] # Only keep rows where tokenised comments has at least 1 element
path=os.getcwd()
dirname = path + '/Comments_PP'
if not os.path.exists(dirname):
os.mkdir(dirname)
toplevelcomments.to_csv('Comments_PP/' + 'All_TLC_PP.csv')
# Clean Subcomments
# 0. Remove NaNs
subcomments.dropna(axis = 0, subset=['subcomment'], inplace=True)
# 1. Remove URLs from comments and make text lowercase. Also remove deleted comments, usernames and subreddit names.
subcomments['Clean_Comment'] = subcomments['subcomment'].apply(lambda x: remove_url(x))
subcomments['Clean_Comment'] = subcomments['Clean_Comment'].apply(lambda x: remove_deleted_comments(x))
subcomments['Clean_Comment'] = subcomments['Clean_Comment'].apply(lambda x: remove_user_subreddit(x))
#subcomments['Clean_Comment'] = subcomments['Clean_Comment'].apply(lambda x: remove_climate_words(x))
# 2. Remove Punctuation, Numbers, and Special Characters
subcomments['Clean_Comment'] = subcomments['Clean_Comment'].str.replace("[^a-zA-Z#]", " ")
# 3. Remove Short Words (3 Letters or Less)
subcomments['Clean_Comment'] = subcomments['Clean_Comment'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# 4. Tokenisation
subcomments['Tokenised_Comment'] = subcomments['Clean_Comment'].apply(lambda x: x.split())
# 5. Remove Stopwords
subcomments['Comment_Nonstop'] = subcomments['Tokenised_Comment'].apply(lambda x: remove_stopwords(x))
# 6. Remove Blank Rows from Dataframe
subcomments = subcomments[subcomments['Comment_Nonstop'].map(lambda d: len(d)) > 0] # Only keep rows where tokenised comments has at least 1 element
path=os.getcwd()
dirname = path + '/Comments_PP'
if not os.path.exists(dirname):
os.mkdir(dirname)
subcomments.to_csv('Comments_PP/' + 'All_SC_PP.csv')
## Combine Dataframes
# Extract the key columns from main post dataframe
posts = data[['title','timestamp','score','Clean_Post','Tokenised_Post','Post_Nonstop']]
# Make sure column names are the same
posts.columns = ['Comment','timestamp','Score','Clean_Comment','Tokenised_Comment','Comment_Nonstop']
toplevelcomments.columns = ['Comment','timestamp','Score','Clean_Comment','Tokenised_Comment','Comment_Nonstop']
subcomments.columns = ['Comment','timestamp','Score','Clean_Comment','Tokenised_Comment','Comment_Nonstop']
full_df = | pd.concat([posts,toplevelcomments,subcomments]) | pandas.concat |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with | option_context("display.precision", 5) | pandas.option_context |
import pytrec_eval
from repro_eval.Evaluator import RplEvaluator
from repro_eval.util import trim
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
sns.set_style('whitegrid')
palette = sns.color_palette("GnBu_d")
sns.set_palette(palette)
colors = sns.color_palette()
ORIG_B = './data/runs/orig/input.WCrobust04'
ORIG_A = './data/runs/orig/input.WCrobust0405'
QREL = 'data/qrels/core17.txt'
QREL_RPL = 'data/qrels/core18.txt'
runs_rpl = {
'rpl_wcr04_tf_1':
{'path': './data/runs/rpl/45/irc_task2_WCrobust04_001'},
'rpl_wcr0405_tf_1':
{'path': './data/runs/rpl/45/irc_task2_WCrobust0405_001'},
'rpl_wcr04_tf_2':
{'path': './data/runs/rpl/46/irc_task2_WCrobust04_001'},
'rpl_wcr0405_tf_2':
{'path': './data/runs/rpl/46/irc_task2_WCrobust0405_001'},
'rpl_wcr04_tf_3':
{'path': './data/runs/rpl/47/irc_task2_WCrobust04_001'},
'rpl_wcr0405_tf_3':
{'path': './data/runs/rpl/47/irc_task2_WCrobust0405_001'},
'rpl_wcr04_tf_4':
{'path': './data/runs/rpl/48/irc_task2_WCrobust04_001'},
'rpl_wcr0405_tf_4':
{'path': './data/runs/rpl/48/irc_task2_WCrobust0405_001'},
'rpl_wcr04_tf_5':
{'path': './data/runs/rpl/49/irc_task2_WCrobust04_001'},
'rpl_wcr0405_tf_5':
{'path': './data/runs/rpl/49/irc_task2_WCrobust0405_001'}
}
def main():
rpl_eval = RplEvaluator(qrel_orig_path=QREL,
run_b_orig_path=ORIG_B,
run_a_orig_path=ORIG_A,
run_b_rep_path=None,
run_a_rep_path=None,
qrel_rpd_path=QREL_RPL)
rpl_eval.trim()
rpl_eval.evaluate()
for run_name, info in runs_rpl.items():
with open(info.get('path')) as run_file:
info['run'] = pytrec_eval.parse_run(run_file)
trim(info['run'])
info['scores'] = rpl_eval.evaluate(info['run'])
pairs = [('rpl_wcr04_tf_1', 'rpl_wcr0405_tf_1'),
('rpl_wcr04_tf_2', 'rpl_wcr0405_tf_2'),
('rpl_wcr04_tf_3', 'rpl_wcr0405_tf_3'),
('rpl_wcr04_tf_4', 'rpl_wcr0405_tf_4'),
('rpl_wcr04_tf_5', 'rpl_wcr0405_tf_5')]
df_content = {
'P_10': [rpl_eval.er(run_b_score=runs_rpl[pair[0]]['scores'], run_a_score=runs_rpl[pair[1]]['scores'])['P_10'] for pair in pairs],
'ndcg': [rpl_eval.er(run_b_score=runs_rpl[pair[0]]['scores'], run_a_score=runs_rpl[pair[1]]['scores'])['ndcg'] for pair in pairs],
'map': [rpl_eval.er(run_b_score=runs_rpl[pair[0]]['scores'], run_a_score=runs_rpl[pair[1]]['scores'])['map'] for pair in pairs],
}
df = | pd.DataFrame(df_content, index=['tf_1', 'tf_2', 'tf_3', 'tf_4', 'tf_5']) | pandas.DataFrame |
import os, datetime, pymongo, configparser
import pandas as pd
from bson import json_util
global_config = None
global_client = None
global_stocklist = None
def getConfig(root_path):
global global_config
if global_config is None:
#print("initial Config...")
global_config = configparser.ConfigParser()
global_config.read(root_path + "/" + "config.ini")
return global_config
def getClient():
global global_client
from pymongo import MongoClient
if global_client is None:
#print("initial DB Client...")
global_client = MongoClient('localhost', 27017)
return global_client
def getCollection(database, collection):
client = getClient()
db = client[database]
return db[collection]
def getStockList(root_path, database, sheet):
global global_stocklist
if global_stocklist is None:
#print("initial Stock List...")
global_stocklist = queryStockList(root_path, database, sheet)
return global_stocklist
def setStockList(df):
global global_stocklist
df.set_index('symbol', inplace=True)
global_stocklist = df
return global_stocklist
def readFromCollection(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find(queryString)
df = pd.DataFrame(list(result))
if df.empty == False: del df['_id']
return df
def writeToCollection(collection, df, id = None):
jsonStrings = df.to_json(orient='records')
bsonStrings = json_util.loads(jsonStrings)
for string in bsonStrings:
if id is not None:
id_string = ''.join([string[item] for item in id])
string['_id'] = id_string
collection.save(string)
def readFromCollectionExtend(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find_one(queryString)
if result is None:
return pd.DataFrame(), {}
return pd.read_json(result['data'], orient='records'), result['metadata']
def writeToCollectionExtend(collection, symbol, df, metadata=None):
jsonStrings = {"_id":symbol, "symbol":symbol, "data":df.to_json(orient='records'), "metadata":metadata}
#bsonStrings = json_util.loads(jsonStrings)
collection.save(jsonStrings)
def writeToCSV(csv_dir, CollectionKey, df):
if os.path.exists(csv_dir) == False:
os.makedirs(csv_dir)
filename = csv_dir + CollectionKey + '.csv'
df.to_csv(filename)
def queryStockList(root_path, database, sheet):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False: df = setStockList(df)
return df
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=0)
if df.empty == False: df = setStockList(df)
return df
return pd.DataFrame()
except Exception as e:
print("queryStockList Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeStockList(root_path, database, sheet, df, symbol = None):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
if symbol is not None:
df = df[df.index == symbol].reset_index()
writeToCollection(collection, df, ['symbol'])
# try:
# index_info = collection.index_information()
# print("index info", index_info)
# except Exception as e:
# print(e)
# writeToCollection(collection, df)
# #collection.create_index('symbol', unique=True, drop_dups=True)
# else:
# writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storeStockList Exception", e)
def queryStockPublishDay(root_path, database, sheet, symbol):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename) == False: return ''
df = pd.read_csv(filename, index_col=["index"])
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
except Exception as e:
print("queryStockPublishDay Exception", e)
return ''
return ''
def storePublishDay(root_path, database, sheet, symbol, date):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=["index"])
publishDate = df[df['symbol'] == symbol]
if publishDate.empty:
df.loc[len(df)] = [symbol, date]
else:
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storePublishDay Exception", e)
def queryStock(root_path, database, sheet_1, sheet_2, symbol, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
stockList = getStockList(root_path, database, sheet_1)
lastUpdateTime = pd.Timestamp(stockList.loc[symbol][update_key])
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
df.set_index('date', inplace=True)
if 'index' in df:
del df['index']
return df, lastUpdateTime
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = csv_dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename, index_col=["date"])
return df, lastUpdateTime
except Exception as e:
print("queryStock Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeStock(root_path, database, sheet_1, sheet_2, symbol, df, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, sheet_1)
if (stockList[stockList.index == symbol][update_key][0] != now_date):
stockList.set_value(symbol, update_key, now_date)
storeStockList(root_path, database, sheet_1, stockList, symbol)
# df.set_index('date')
# df.index = df.index.astype(str)
# df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = df.reset_index()
if 'date' in df: df.date = df.date.astype(str)
writeToCollectionExtend(collection, symbol, df, {})
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database)+ config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeStock Exception", e)
def queryNews(root_path, database, sheet, symbol):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp(getStockList(root_path, database, 'SHEET_US_DAILY').loc[symbol]['news_update'])
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df = readFromCollection(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
#df.set_index('date', inplace=True)
return df, lastUpdateTime
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename)
return df, lastUpdateTime
except Exception as e:
print("queryNews Exception", e)
return pd.DataFrame(), lastUpdateTime
return | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 18:37:20 2020
@author: Blackr
The following is a script I wrote to automatically find the tafel data
a .csv file with the appropriate I, Ewe, and time columns
time = s
I = mA
E = V
This is mostly used as a coding excerise for post processing. Re-evaluate when it is time to write more post-processing scripts
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
df = pd.read_csv('March5_2020_Cell4.csv')
#Drop columns that have all NaN (.csv problem?)
df.dropna(axis =1, how ='all', inplace = True)
#Determine what Ewe data is "stable" compared to transition noise and drop the "noise" data
df['Ewe'].value_counts()
#Explanation of how the code below works is here:
#https://stackoverflow.com/questions/34913546/remove-low-counts-from-pandas-data-frame-column-on-condition
s = df['Ewe'].value_counts()
dfclean = df[df.isin(s.index[s >= 100]).values]
#MDetermine median value of I vs. t curves
#Create an array of unique Ewe values
unique = dfclean['Ewe'].unique()
#For loop to create an array of median values corresponding to the median aggregate of each unique Ewe
median_values = []
for x in unique:
df2 = df[df['Ewe'] == x]
median_value = df2['I'].median()
median_values.append(median_value)
#Convert unique and median_value variables into Series following by a dataframe
median_values = pd.Series(median_values)
unique = | pd.Series(unique) | pandas.Series |
# pylint: disable=redefined-outer-name
import filecmp
from io import StringIO
from pathlib import Path
from tempfile import TemporaryDirectory
import pandas as pd
import pytest
from courier.config import get_config
from courier.elements import CourierIssue, IssueStatistics, export_articles
from courier.elements.export_articles import extract_percentage, save_overlap, save_statistics, save_statistics_by_case
CONFIG = get_config()
def test_export_articles_generates_expected_output():
with TemporaryDirectory() as output_dir:
errors = export_articles('012656', output_dir)
assert len(sorted(Path(output_dir).glob('*.txt'))) == 5
assert not filecmp.dircmp(output_dir, CONFIG.test_files_dir / 'expected/export_articles').diff_files
assert len(filecmp.dircmp(output_dir, CONFIG.test_files_dir / 'not_expected').diff_files) == 1
assert len(errors) == 0
def test_export_articles_with_errors_generates_expected_output():
courier_id = '063436'
issue = CourierIssue(courier_id)
with TemporaryDirectory() as output_dir:
errors = export_articles('063436', output_dir)
assert len(sorted(Path(output_dir).glob('*.txt'))) == IssueStatistics(issue).number_of_articles
assert len(errors) != 0
def test_stats():
courier_ids = ['066943', '014255', '016653']
stats = []
with TemporaryDirectory() as output_dir:
for courier_id in courier_ids:
stats += export_articles(courier_id, output_dir)
statistics = (
| pd.DataFrame(stats) | pandas.DataFrame |
"""
To test the quality of the estimators, we generate data
both from a semilinear Choo and Siow model
and from a semilinear nested logit model.
We use both the Poisson estimator and the minimum-distance estimator
on the former model, and only the minimum-distance estimator on the latter.
"""
from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import SeedSequence
import pandas as pd
import seaborn as sns
from .utils import nprepeat_col, nprepeat_row, print_stars
from .choo_siow import entropy_choo_siow
from .entropy import EntropyFunctions
from .min_distance_utils import MDEResults
from .min_distance import estimate_semilinear_mde
from .model_classes import ChooSiowPrimitives, NestedLogitPrimitives
from .nested_logit import setup_standard_nested_logit
from .poisson_glm import PoissonGLMResults, choo_siow_poisson_glm
def choo_siow_simul(
phi_bases: np.ndarray,
n: np.ndarray,
m: np.ndarray,
true_coeffs: np.ndarray,
n_households: int,
n_simuls: int,
seed: int = None,
) -> Tuple[List[MDEResults], List[PoissonGLMResults]]:
"""
Monte Carlo simulation of the minimum distance and Poisson estimators
for the Choo and Siow model
Args:
phi_bases: an (X,Y,K) array of bases
n: an X-vector, margins for men
m: an Y-vector, margins for women
true_coeffs: a K-vector, the true values of
the coefficients of the bases
n_households: the number of households
n_simuls: the number of samples for the simulation
seed: an integer seed for the random number generator
Returns:
the lists of results for the min distance estimator
and the Poisson GLM estimator
"""
Phi = phi_bases @ true_coeffs
choo_siow_instance = ChooSiowPrimitives(Phi, n, m)
ss = SeedSequence(seed)
child_seeds = ss.spawn(n_simuls)
min_distance_results = []
poisson_results = []
for s in range(n_simuls):
mus_sim = choo_siow_instance.simulate(n_households, child_seeds[s])
mde_results = estimate_semilinear_mde(mus_sim, phi_bases, entropy_choo_siow)
min_distance_results.append(mde_results)
poisson_glm_results = choo_siow_poisson_glm(mus_sim, phi_bases, verbose=1)
poisson_results.append(poisson_glm_results)
print(f"\nChoo-Siow: estimates for sample {s}:")
print(" MDE:")
print(mde_results.estimated_coefficients)
print(" Poisson:")
print(poisson_glm_results.estimated_beta)
return min_distance_results, poisson_results
def nested_logit_simul(
phi_bases: np.ndarray,
n: np.ndarray,
m: np.ndarray,
entropy_model: EntropyFunctions,
true_alphas: np.ndarray,
true_betas: np.ndarray,
n_households: int,
n_simuls: int,
seed: int = None,
) -> List[MDEResults]:
"""
Monte Carlo simulation of the minimum distance estimator
for the nested logit
Args:
phi_bases: an (X,Y,K) array of bases
n: an X-vector, margins for men
m: an Y-vector, margins for women
entropy_model: the nested logit specification
true_alphas: an (n_rhos+n_deltas)-vector,
the true values of the nests parameters
true_betas: a K-vector,
the true values of the coefficients of the bases
n_households: the number of households
n_simuls: the number of samples for the simulation
seed: an integer seed for the random number generator
Returns:
the list of results for the min distance estimator
"""
Phi = phi_bases @ true_betas
nests_for_each_x, nests_for_each_y = entropy_model.more_params
nested_logit_instance = NestedLogitPrimitives(
Phi, n, m, nests_for_each_x, nests_for_each_y, true_alphas
)
ss = SeedSequence(seed)
child_seeds = ss.spawn(n_simuls)
min_distance_results = []
for s in range(n_simuls):
mus_sim = nested_logit_instance.simulate(n_households, child_seeds[s])
mde_result = estimate_semilinear_mde(
mus_sim,
phi_bases,
entropy_model,
more_params=entropy_model.more_params,
)
min_distance_results.append(mde_result)
print(f"\nNested logit: MDE estimates for sample {s}:")
print(mde_result.estimated_coefficients)
return min_distance_results
if __name__ == "__main__":
"""we draw n_simuls samples of n_households households"""
n_households = 100_000
n_simuls = 1000
run_choo_siow = True
run_nested_logit = False
plot_choo_siow = True
plot_nested_logit = False
# integer to select a variant; None to do the central scenario
do_variant = None
X, Y, K = 20, 20, 8
# set of 8 basis functions
phi_bases = np.zeros((X, Y, K))
phi_bases[:, :, 0] = 1.0
vec_x = np.arange(X)
vec_y = np.arange(Y)
phi_bases[:, :, 1] = nprepeat_col(vec_x, Y)
phi_bases[:, :, 2] = nprepeat_row(vec_y, X)
phi_bases[:, :, 3] = phi_bases[:, :, 1] * phi_bases[:, :, 1]
phi_bases[:, :, 4] = phi_bases[:, :, 1] * phi_bases[:, :, 2]
phi_bases[:, :, 5] = phi_bases[:, :, 2] * phi_bases[:, :, 2]
for i in range(X):
for j in range(i, Y):
phi_bases[i, j, 6] = 1
phi_bases[i, j, 7] = i - j
true_betas = np.array([1.0, 0.0, 0.0, -0.01, 0.02, -0.01, 0.5, 0.0])
str_variant = ""
if do_variant is not None:
if do_variant == 1:
X, Y, K = 10, 10, 4
phi_bases = phi_bases[:X, :Y, :K]
true_betas = true_betas[:K]
elif do_variant == 2:
X, Y, K = 4, 5, 6
phi_bases = phi_bases[:X, :Y, :K]
true_betas = true_betas[:K]
elif do_variant == 3:
X, Y, K = 20, 20, 2
phi_bases = phi_bases[:X, :Y, :K]
true_betas = true_betas[:K]
str_variant = f"_v{do_variant}"
t = 0.2
n = np.logspace(start=0, base=1 - t, stop=X - 1, num=X)
m = np.logspace(start=0, base=1 - t, stop=Y - 1, num=Y)
beta_names = [f"beta[{i}]" for i in range(1, K + 1)]
seed = 5456456
if run_choo_siow:
choo_siow_results_file = (
"choo_siow_simul_results"
+ f"{str_variant}_N{n_households}_seed{seed}"
+ ".csv"
)
min_distance_results, poisson_results = choo_siow_simul(
phi_bases, n, m, true_betas, n_households, n_simuls, seed
)
mde_estimated_beta = np.zeros((n_simuls, K))
poisson_estimated_beta = np.zeros((n_simuls, K))
mde_stderrs_beta = np.zeros((n_simuls, K))
poisson_stderrs_beta = np.zeros((n_simuls, K))
K2 = 2 * K
n_rows = K2 * n_simuls
simulation = np.zeros(n_rows, dtype=int)
estimate = np.zeros(n_rows)
stderrs = np.zeros(n_rows)
estimator = []
coefficient = []
beg_s = 0
for s in range(n_simuls):
mde_resus_s = min_distance_results[s]
poisson_resus_s = poisson_results[s]
mde_estimated_beta[s, :] = mde_resus_s.estimated_coefficients
poisson_estimated_beta[s, :] = poisson_resus_s.estimated_beta
mde_stderrs_beta[s, :] = mde_resus_s.stderrs_coefficients
poisson_stderrs_beta[s, :] = poisson_resus_s.stderrs_beta
slice_K2 = slice(beg_s, beg_s + K2)
simulation[slice_K2] = s
estimator.extend(["Minimum distance"] * K)
estimator.extend(["Poisson"] * K)
coefficient.extend(beta_names)
coefficient.extend(beta_names)
slice_K = slice(beg_s, beg_s + K)
estimate[slice_K] = mde_estimated_beta[s, :]
stderrs[slice_K] = mde_stderrs_beta[s, :]
slice_K_K2 = slice(beg_s + K, beg_s + K2)
estimate[slice_K_K2] = poisson_estimated_beta[s, :]
stderrs[slice_K_K2] = poisson_stderrs_beta[s, :]
beg_s += K2
true_values = np.tile(true_betas, 2 * n_simuls)
choo_siow_results = pd.DataFrame(
{
"Simulation": simulation,
"Estimator": estimator,
"Parameter": coefficient,
"Estimate": estimate,
"Standard Error": stderrs,
"True value": true_values,
}
)
choo_siow_results.to_csv(choo_siow_results_file)
if plot_choo_siow:
choo_siow_results_file = (
"choo_siow_simul_results"
+ f"{str_variant}_N{n_households}_seed{seed}"
+ ".csv"
)
choo_siow_results = | pd.read_csv(choo_siow_results_file) | pandas.read_csv |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 qizai <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
This script will search for all lp{0, 1, ..., N}_{right, left, middle}_100_region.csv
file. Each line in the csv file is a complex, with at least two fragment intersected with the
loop region. 'complex_direction' indicate the side of motif this complex has at least
one fragment overlap with.
NOTE [TODO]:
annotation file need to contain 'convergence' info. Original ChIA-PET annot has a extended
annotated file which has it.
pseudo_annot needs to be change accordingly.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import ipdb
import os
import functools
import tqdm
import argparse
def mainfn(args):
p2loop_annot = args.p2loop_annot
p2loop_tag = args.p2loop_tag
p2root = args.p2intersected_complex_folder
p2res_root = args.p2binning_results_saved_folder
expr_name = args.expr_name
nbins = args.nbins
pseudo_loop = args.pseudo
annot_col_name = ['left_chr', 'left_start', 'left_end',
'right_chr', 'right_start', 'right_end',
'PET count', 'left_max_intensity', 'right_max_intensity',
'left_max_index', 'right_max_index', 'loop_ID',
'left_motif_chr', 'left_motif_start', 'left_motif_end',
'left_motif_strand', 'left_distance',
'right_motif_chr', 'right_motif_start', 'right_motif_end',
'right_motif_strand', 'right_distance']
if pseudo_loop:
df_loop_annot = pd.read_csv(p2loop_annot, sep='\t',
names = annot_col_name)
df_loop_annot = df_loop_annot.assign(convergence = 'convergence')
else:
# annot_col_name = ['bias', 'convergence', 'NULL motif']
df_loop_annot_raw = pd.read_csv(p2loop_annot, sep='\t',
names = annot_col_name)
df_loop_tag = pd.read_csv(p2loop_tag, sep = '\t', index_col = 0)
loop_conv_tag = df_loop_tag.loc[df_loop_annot_raw.index]['convergence'].values
df_loop_annot = df_loop_annot_raw.assign(convergence = loop_conv_tag)
p2region = os.path.join(p2root, expr_name)
left_region_set = set([x.split('_')[0]
for x in os.listdir(p2region) if ('right' in x)])
right_region_set = set([x.split('_')[0]
for x in os.listdir(p2region) if ('left' in x)])
middle_region_set = set([x.split('_')[0]
for x in os.listdir(p2region) if ('middle' in x)])
common_regions = list(right_region_set.intersection(left_region_set).intersection(middle_region_set))
# ---- filter out the larger loops.
com_idxs = df_loop_annot['loop_ID'].isin(common_regions)
df_coms = df_loop_annot[com_idxs]
com_convergence_idxs = (df_coms['convergence'] == 'convergence')
df_coms_conv = df_coms[com_convergence_idxs]
coms_conv_2kb_idx = df_coms_conv.apply(lambda x: (x.right_end - x.left_start) > 2e5,
axis=1)
df_coms_conv_2kb = df_coms_conv[coms_conv_2kb_idx]
# ----- main loop ----
df_combine_mid_right_bin_vectors = df_combine_mid_left_bin_vectors = | pd.DataFrame() | pandas.DataFrame |
import io
import textwrap
from collections import namedtuple
import numpy as np
import pandas as pd
import statsmodels.api as sm
from estimagic.config import EXAMPLE_DIR
from estimagic.visualization.estimation_table import _convert_model_to_series
from estimagic.visualization.estimation_table import _create_statistics_sr
from estimagic.visualization.estimation_table import _process_body_df
from estimagic.visualization.estimation_table import _process_model
from estimagic.visualization.estimation_table import estimation_table
from pandas.testing import assert_frame_equal as afe
from pandas.testing import assert_series_equal as ase
# test process_model for different model types
NamedTup = namedtuple("NamedTup", "params info")
fix_path = EXAMPLE_DIR / "diabetes.csv"
df_ = pd.read_csv(fix_path, index_col=0)
est = sm.OLS(endog=df_["target"], exog=sm.add_constant(df_[df_.columns[0:4]])).fit()
def test_estimation_table():
models = [est]
return_type = "python"
res = estimation_table(models, return_type, append_notes=False)
exp = {}
body_str = """
index,{(1)}
const,152.13$^{*** }$
,(2.85)
Age,37.24$^{ }$
,(64.12)
Sex,-106.58$^{* }$
,(62.13)
BMI,787.18$^{*** }$
,(65.42)
ABP,416.67$^{*** }$
,(69.49)
"""
exp["body_df"] = _read_csv_string(body_str).fillna("")
exp["body_df"].set_index("index", inplace=True)
footer_str = """
,{(1)}
Observations,442.0
R$^2$,0.4
Adj. R$^2$,0.39
Residual Std. Error,59.98
F Statistic,72.91$^{***}$
"""
exp["footer_df"] = _read_csv_string(footer_str).fillna("")
exp["footer_df"].set_index(" ", inplace=True)
exp["footer_df"].index.names = [None]
exp["footer_df"].index = pd.MultiIndex.from_arrays([exp["footer_df"].index])
exp["notes_tex"] = "\\midrule\n"
exp[
"notes_html"
] = """<tr><td colspan="2" style="border-bottom: 1px solid black">
</td></tr>"""
afe(exp["footer_df"], res["footer_df"])
afe(exp["body_df"], res["body_df"], check_index_type=False)
ase(pd.Series(exp["notes_html"]), pd.Series(res["notes_html"]))
ase(pd.Series(exp["notes_tex"]), pd.Series(res["notes_tex"]))
def test_process_model_namedtuple():
# checks that process_model doesn't alter values
df = pd.DataFrame(columns=["value", "p_value", "ci_lower", "ci_upper"])
df["value"] = np.arange(10)
df["p_value"] = np.arange(10)
df["ci_lower"] = np.arange(10)
df["ci_upper"] = np.arange(10)
info = {"stat1": 0, "stat2": 0}
model = NamedTup(params=df, info=info)
res = _process_model(model)
afe(res.params, df)
ase(pd.Series(res.info), pd.Series(info))
def test_process_model_stats_model():
par_df = pd.DataFrame(
columns=["value", "p_value", "standard_error", "ci_lower", "ci_upper"],
index=["const", "Age", "Sex", "BMI", "ABP"],
)
par_df["value"] = [152.133484, 37.241211, -106.577520, 787.179313, 416.673772]
par_df["p_value"] = [
2.048808e-193,
5.616557e-01,
8.695658e-02,
5.345260e-29,
4.245663e-09,
]
par_df["standard_error"] = [2.852749, 64.117433, 62.125062, 65.424126, 69.494666]
par_df["ci_lower"] = [146.526671, -88.775663, -228.678572, 658.594255, 280.088446]
par_df["ci_upper"] = [157.740298, 163.258084, 15.523532, 915.764371, 553.259097]
info_dict = {}
info_dict["rsquared"] = 0.40026108237714
info_dict["rsquared_adj"] = 0.39477148130050055
info_dict["fvalue"] = 72.91259907398705
info_dict["f_pvalue"] = 2.700722880950139e-47
info_dict["df_model"] = 4.0
info_dict["df_resid"] = 437.0
info_dict["dependent_variable"] = "target"
info_dict["resid_std_err"] = 59.97560860753488
info_dict["n_obs"] = 442.0
res = _process_model(est)
afe(res.params, par_df)
ase(pd.Series(res.info), pd.Series(info_dict))
def test_process_model_dict():
df = pd.DataFrame(columns=["value", "p_value", "standard_error"])
df["value"] = np.arange(10)
df["p_value"] = np.arange(10)
df["standard_error"] = np.arange(10)
info = {"stat1": 0, "stat2": 0}
mod = {}
mod["params"] = df
mod["info"] = info
res = _process_model(mod)
afe(res.params, mod["params"])
ase(pd.Series(res.info), pd.Series(mod["info"]))
# test convert_model_to_series for different arguments
def test_convert_model_to_series_conf_int():
df = pd.DataFrame(
np.array(
[[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3], [1.2, 3.3, 4.3]]
).T,
columns=["value", "p_value", "ci_lower", "ci_upper"],
index=["a", "b", "c"],
)
si_lev = [0.1, 0.05, 0.01]
si_dig = 2
ci = True
si = True
ss = True
res = _convert_model_to_series(df, si_lev, si_dig, si, ci, ss)
exp = pd.Series(
[
"0.6$^{ }$",
r"{(0.6\,;\,1.2)}",
"2.3$^{** }$",
r"{(2.3\,;\,3.3)}",
"3.3$^{*** }$",
r"{(3.3\,;\,4.3)}",
],
index=["a", "", "b", "", "c", ""],
name="",
)
exp.index.name = "index"
| ase(exp, res) | pandas.testing.assert_series_equal |
# Streamlit live coding script
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import plotly.express as px
import plotly.graph_objects as go
df = pd.read_csv('src/data/marketing_campaign_cleaned.csv', index_col=[0])
st.title("Customer personality analysis")
st.markdown('## 1. Descriptive statistics')
st.subheader('Education')
fig = px.histogram(df, x='Education', color='Education')
st.plotly_chart(fig)
st.subheader('Income distribution')
fig = px.histogram(df, x='Income')
st.plotly_chart(fig)
st.subheader('Martial status')
fig = px.histogram(df, x='Living_With', color='Living_With')
st.plotly_chart(fig)
st.subheader('Education by martial status')
sunburst_df = df[['Education', 'Living_With']]
fig = px.sunburst(sunburst_df, path=['Living_With', 'Education'])
st.plotly_chart(fig)
st.subheader('Success of advertising campaings')
campaigns_df = pd.melt(df, value_vars=['AcceptedCmp1','AcceptedCmp2','AcceptedCmp3','AcceptedCmp4','AcceptedCmp5', 'Response'], var_name='campaign', value_name='success', ignore_index=True)
success_df = campaigns_df[campaigns_df.success == 1]
fig = px.histogram(success_df, x='success', y='campaign', color='campaign')
st.plotly_chart(fig)
st.subheader('Amount spent by age group')
category = pd.cut(df.Age,bins=[25,40,65,81],labels=['Adults','Seniors', 'Elderly'])
df.insert(2,'Age_Groups',category)
mean_spent = df.groupby(['Age_Groups']).Spent.mean().reset_index()
fig = px.bar(mean_spent, x='Age_Groups', y='Spent', color='Age_Groups')
fig.update_layout(
xaxis=dict(
title={'text': ''},
tickvals=[0, 1, 2],
ticktext=['25-40', '40-65', '65+']
),
yaxis={'title': {'text': 'Mean spendings'}}
)
st.plotly_chart(fig)
st.subheader('Number of customers per spent group')
spend_5_500 = df["Spent"][(df["Spent"] >=5) & (df["Spent"] <= 500)]
spend_501_1000 = df["Spent"][(df["Spent"] >=501) & (df["Spent"] <= 1000)]
spend_1001_1500 = df["Spent"][(df["Spent"] >=1001) & (df["Spent"] <= 1500)]
spend_1501_2000 = df["Spent"][(df["Spent"] >=1501) & (df["Spent"] <= 2000 )]
spend_2001_2525 = df["Spent"][(df["Spent"] >=2001) & (df["Spent"] <= 2525)]
spend_x = ['5-500', '501-1000', '1001-1500', '1501-2000', '2001-2525']
spend_y = [len(spend_5_500.values), len(spend_501_1000.values), len(spend_1001_1500.values), len(spend_1501_2000.values), len(spend_2001_2525.values)]
d = {'spent_group': spend_x, 'count': spend_y}
spent_df = pd.DataFrame(d)
fig = px.bar(spent_df, x='spent_group', y='count', color='spent_group')
fig.update_layout(
xaxis={'title': {'text': 'Spent group'}},
yaxis={'title': {'text': 'Number of customers'}}
)
st.plotly_chart(fig)
st.subheader('Number of customers per income group')
income_1_30k = df["Income"][(df["Income"] >=1000) & (df["Income"] <= 30000)]
income_30_60k = df["Income"][(df["Income"] >=30001) & (df["Income"] <= 60000)]
income_60_90k = df["Income"][(df["Income"] >=60001) & (df["Income"] <= 90000)]
income_90_120k = df["Income"][(df["Income"] >=90001) & (df["Income"] <= 120000 )]
income_120_170k = df["Income"][(df["Income"] >=120001) & (df["Income"] <= 170000)]
income_x = ['1k - 30k', '30k - 60k', '60k - 90k', '90k - 120k', '120k - 170k']
income_y = [len(income_1_30k.values), len(income_30_60k.values), len(income_60_90k.values), len(income_90_120k.values), len(income_120_170k.values)]
d = {'income_group': income_x, 'count': income_y}
income_df = | pd.DataFrame(d) | pandas.DataFrame |
import os
import pandas as pd
import datetime
import numpy as np
from talib import abstract
from .crawler import check_monthly_revenue
class Data():
def __init__(self):
self.date = datetime.datetime.now().date()
self.warrning = False
self.col2table = {}
tnames = os.listdir(os.path.join('history', 'items'))
for tname in tnames:
path = os.path.join('history', 'items', tname)
if not os.path.isdir(path):
continue
items = [f[:-4] for f in os.listdir(path)]
for item in items:
if item not in self.col2table:
self.col2table[item] = []
self.col2table[item].append(tname)
def get(self, name, amount=0, table=None, convert_to_numeric=True):
if table is None:
candidates = self.col2table[name]
if len(candidates) > 1 and self.warrning:
print('**WARRN there are tables have the same item', name, ':', candidates)
print('** take', candidates[0])
print('** please specify the table name as an argument if you need the file from another table')
for c in candidates:
print('** data.get(', name, ',',amount, ', table=', c, ')')
table = candidates[0]
df = pd.read_pickle(os.path.join('history', 'items', table, name + '.pkl'))
return df.loc[:self.date.strftime("%Y-%m-%d")].iloc[-amount:]
def talib(self, func_name, amount=0, **args):
func = getattr(abstract, func_name)
isSeries = True if len(func.output_names) == 1 else False
names = func.output_names
if isSeries:
dic = {}
else:
dics = {n:{} for n in names}
close = self.get('收盤價', amount)
open_ = self.get('開盤價', amount)
high = self.get('最高價', amount)
low = self.get('最低價', amount)
volume= self.get('成交股數', amount)
for key in close.columns:
try:
s = func({'open':open_[key].ffill(),
'high':high[key].ffill(),
'low':low[key].ffill(),
'close':close[key].ffill(),
'volume':volume[key].ffill()}, **args)
except Exception as e:
if "inputs are all NaN" != str(e):
print('Warrning occur during calculating stock '+key+':',e)
print('The indicator values are set to NaN.')
if isSeries:
s = pd.Series(index=close[key].index)
else:
s = pd.DataFrame(index=close[key].index, columns=dics.keys())
if isSeries:
dic[key] = s
else:
for colname, si in zip(names, s):
dics[colname][key] = si
if isSeries:
ret = pd.DataFrame(dic, index=close.index)
ret = ret.apply(lambda s:pd.to_numeric(s, errors='coerce'))
else:
newdic = {}
for key, dic in dics.items():
newdic[key] = pd.DataFrame(dic, close.index).loc[:self.date]
ret = [newdic[n] for n in names]#pd.Panel(newdic)
ret = [d.apply(lambda s: | pd.to_numeric(s, errors='coerce') | pandas.to_numeric |
from bs4 import BeautifulSoup as BS
from selenium import webdriver
from functools import reduce
import pandas as pd
import time
import xport
import pandas as pd
def render_page(url):
driver = webdriver.Chrome('/Users/cp/Downloads/chromedriver')
driver.get(url)
time.sleep(3)
r = driver.page_source
driver.quit()
return r
def scraper2(page, dates):
output = pd.DataFrame()
for d in dates:
url = str(str(page) + str(d))
r = render_page(url)
soup = BS(r, "html.parser")
container = soup.find('lib-city-history-observation')
check = container.find('tbody')
data = []
for c in check.find_all('tr', class_='ng-star-inserted'):
for i in c.find_all('td', class_='ng-star-inserted'):
trial = i.text
trial = trial.strip(' ')
data.append(trial)
if len(data)%2 == 0:
hour = pd.DataFrame(data[0::10], columns = ['hour'])
temp = pd.DataFrame(data[1::10], columns = ['temp'])
dew = pd.DataFrame(data[2::10], columns = ['dew'])
humidity = pd.DataFrame(data[3::10], columns = ['humidity'])
wind_speed = pd.DataFrame(data[5::10], columns = ['wind_speed'])
pressure = pd.DataFrame(data[7::10], columns = ['pressure'])
precip = pd.DataFrame(data[8::10], columns = ['precip'])
cloud = pd.DataFrame(data[9::10], columns = ['cloud'])
dfs = [hour, temp,dew,humidity, wind_speed, pressure, precip, cloud]
df_final = reduce(lambda left, right: pd.merge(left, right, left_index=True, right_index=True), dfs)
df_final['Date'] = str(d) + "-" + df_final.iloc[:, :1].astype(str)
output = output.append(df_final)
print('Scraper done!')
output = output[['hour', 'temp', 'dew', 'humidity', 'wind_speed', 'pressure',
'precip', 'cloud']]
return output
def jan_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2021-1-"+i))
return lst
january = jan_dates()
def feb_dates():
lst = []
for i in range(1, 30):
i = str(i)
lst.append(str("2020-2-"+i))
return lst
feb = feb_dates()
def march_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2020-3-"+i))
return lst
mar = march_dates()
def april_dates():
lst = []
for i in range(1, 31):
i = str(i)
lst.append(str("2020-4-"+i))
return lst
april = april_dates()
def may_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2020-5-"+i))
return lst
may = may_dates()
def june_dates():
lst = []
for i in range(1, 31):
i = str(i)
lst.append(str("2020-6-"+i))
return lst
june = june_dates()
def july_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2020-7-"+i))
return lst
july = july_dates()
def august_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2020-8-"+i))
return lst
august = august_dates()
def september_dates():
lst = []
for i in range(1, 31):
i = str(i)
lst.append(str("2020-9-"+i))
return lst
september = september_dates()
def october_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2020-10-"+i))
return lst
october = october_dates()
def november_dates():
lst = []
for i in range(1, 8):
i = str(i)
lst.append(str("2020-11-"+i))
return lst
november_to7 = november_dates()
def november_dates_end():
lst = []
for i in range(9, 31):
i = str(i)
lst.append(str("2020-11-"+i))
return lst
november_end = november_dates_end()
def december_dates():
lst = []
for i in range(1, 32):
i = str(i)
lst.append(str("2020-12-"+i))
return lst
december = december_dates()
def scraper3(page, dates):
output = pd.DataFrame()
for d in dates:
url = str(str(page) + str(d))
r = render_page(url)
soup = BS(r, "html.parser")
container = soup.find('lib-city-history-observation')
check = container.find('tbody')
data = []
for c in check.find_all('tr', class_='ng-star-inserted'):
for i in c.find_all('td', class_='ng-star-inserted'):
trial = i.text
trial = trial.strip(' ')
data.append(trial)
if len(data)%2 == 0:
hour = pd.DataFrame(data[0::10], columns = ['hour'])
temp = pd.DataFrame(data[1::10], columns = ['temp'])
dew = pd.DataFrame(data[2::10], columns = ['dew'])
humidity = pd.DataFrame(data[3::10], columns = ['humidity'])
wind_speed = pd.DataFrame(data[5::10], columns = ['wind_speed'])
pressure = pd.DataFrame(data[7::10], columns = ['pressure'])
precip = pd.DataFrame(data[8::10], columns = ['precip'])
cloud = pd.DataFrame(data[9::10], columns = ['cloud'])
# date2 = pd.DataFrame(str(str(d)), columns = ['date2'])
dfs = [hour, temp,dew,humidity, wind_speed, pressure, precip, cloud]
df_final = reduce(lambda left, right: pd.merge(left, right, left_index=True, right_index=True), dfs)
df_final['Date'] = str(d)
output = output.append(df_final)
print('Scraper done!')
output = output[['hour', 'temp', 'dew', 'humidity', 'wind_speed', 'pressure',
'precip', 'cloud', 'Date']]
# output.to_csv (f"r'/Users/cp/Desktop/capstone2/{dates[0]}_scraped_temps.csv'", index = False, header=True)
return output
# return data
def weather_scrape(lst2):
months = []
for i in lst2:
month1 = scraper3(page, i)
months.append(month1)
year = pd.concat(months)
return year
# year_2020 = weather_scrape(lst)
# year_2020.to_csv (r'/Users/cp/Desktop/capstone2/DALLAS_YEAR_SCRAPE.csv', index = False, header=True)
if __name__ =='__main__':
#page = 'https://www.wunderground.com/history/daily/us/tx/dallas/KDAL/date/'
def scraper(page, dates):
output = pd.DataFrame()
for d in dates:
url = str(str(page) + str(d))
r = render_page(url)
soup = BS(r, "html.parser")
container = soup.find('lib-city-history-observation')
check = container.find('tbody')
data = []
for c in check.find_all('tr', class_='ng-star-inserted'):
for i in c.find_all('td', class_='ng-star-inserted'):
trial = i.text
trial = trial.strip(' ')
data.append(trial)
if round(len(data) / 10) == 23:
hour = pd.DataFrame(data[0::10], columns = ['hour'])
temp = pd.DataFrame(data[1::10], columns = ['temp'])
dew = pd.DataFrame(data[2::10], columns = ['dew'])
humidity = pd.DataFrame(data[3::10], columns = ['humidity'])
wind_speed = pd.DataFrame(data[5::10], columns = ['wind_speed'])
pressure = pd.DataFrame(data[7::10], columns = ['pressure'])
precip = pd.DataFrame(data[8::10], columns = ['precip'])
cloud = pd.DataFrame(data[9::10], columns = ['cloud'])
dfs = [hour, temp,dew,humidity, wind_speed, pressure, precip, cloud]
df_final = reduce(lambda left, right: | pd.merge(left, right, left_index=True, right_index=True) | pandas.merge |
import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex, PeriodIndex,
TimedeltaIndex, Series, isna)
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
@pytest.mark.parametrize('nat, idx', [( | Timestamp('NaT') | pandas.Timestamp |
"""
Testing that functions from rpy work as expected
"""
import pandas as pd
import numpy as np
import unittest
import nose
import pandas.util.testing as tm
try:
import pandas.rpy.common as com
from rpy2.robjects import r
import rpy2.robjects as robj
except ImportError:
raise nose.SkipTest('R not installed')
class TestCommon(unittest.TestCase):
def test_convert_list(self):
obj = r('list(a=1, b=2, c=3)')
converted = com.convert_robj(obj)
expected = {'a': [1], 'b': [2], 'c': [3]}
tm.assert_dict_equal(converted, expected)
def test_convert_nested_list(self):
obj = r('list(a=list(foo=1, bar=2))')
converted = com.convert_robj(obj)
expected = {'a': {'foo': [1], 'bar': [2]}}
tm.assert_dict_equal(converted, expected)
def test_convert_frame(self):
# built-in dataset
df = r['faithful']
converted = com.convert_robj(df)
assert np.array_equal(converted.columns, ['eruptions', 'waiting'])
assert np.array_equal(converted.index, np.arange(1, 273))
def _test_matrix(self):
r('mat <- matrix(rnorm(9), ncol=3)')
r('colnames(mat) <- c("one", "two", "three")')
r('rownames(mat) <- c("a", "b", "c")')
return r['mat']
def test_convert_matrix(self):
mat = self._test_matrix()
converted = com.convert_robj(mat)
assert np.array_equal(converted.index, ['a', 'b', 'c'])
assert np.array_equal(converted.columns, ['one', 'two', 'three'])
def test_convert_r_dataframe(self):
is_na = robj.baseenv.get("is.na")
seriesd = tm.getSeriesData()
frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
# Null data
frame["E"] = [np.nan for item in frame["A"]]
# Some mixed type data
frame["F"] = ["text" if item %
2 == 0 else np.nan for item in range(30)]
r_dataframe = com.convert_to_r_dataframe(frame)
assert np.array_equal(
com.convert_robj(r_dataframe.rownames), frame.index)
assert np.array_equal(
com.convert_robj(r_dataframe.colnames), frame.columns)
assert all(is_na(item) for item in r_dataframe.rx2("E"))
for column in frame[["A", "B", "C", "D"]]:
coldata = r_dataframe.rx2(column)
original_data = frame[column]
assert np.array_equal(com.convert_robj(coldata), original_data)
for column in frame[["D", "E"]]:
for original, converted in zip(frame[column],
r_dataframe.rx2(column)):
if pd.isnull(original):
assert is_na(converted)
else:
assert original == converted
def test_convert_r_matrix(self):
is_na = robj.baseenv.get("is.na")
seriesd = tm.getSeriesData()
frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
# Null data
frame["E"] = [np.nan for item in frame["A"]]
r_dataframe = com.convert_to_r_matrix(frame)
assert np.array_equal(
com.convert_robj(r_dataframe.rownames), frame.index)
assert np.array_equal(
com.convert_robj(r_dataframe.colnames), frame.columns)
assert all(is_na(item) for item in r_dataframe.rx(True, "E"))
for column in frame[["A", "B", "C", "D"]]:
coldata = r_dataframe.rx(True, column)
original_data = frame[column]
assert np.array_equal(com.convert_robj(coldata),
original_data)
# Pandas bug 1282
frame["F"] = ["text" if item %
2 == 0 else np.nan for item in range(30)]
try:
wrong_matrix = com.convert_to_r_matrix(frame)
except TypeError:
pass
except Exception:
raise
def test_dist(self):
for name in ('eurodist',):
df = com.load_data(name)
dist = r[name]
labels = r['labels'](dist)
assert np.array_equal(df.index, labels)
assert np.array_equal(df.columns, labels)
def test_timeseries(self):
"""
Test that the series has an informative index.
Unfortunately the code currently does not build a DateTimeIndex
"""
for name in (
'austres', 'co2', 'fdeaths', 'freeny.y', 'JohnsonJohnson',
'ldeaths', 'mdeaths', 'nottem', 'presidents', 'sunspot.month', 'sunspots',
'UKDriverDeaths', 'UKgas', 'USAccDeaths',
'airmiles', 'discoveries', 'EuStockMarkets',
'LakeHuron', 'lh', 'lynx', 'nhtemp', 'Nile',
'Seatbelts', 'sunspot.year', 'treering', 'uspop'):
series = com.load_data(name)
ts = r[name]
assert np.array_equal(series.index, r['time'](ts))
def test_numeric(self):
for name in ('euro', 'islands', 'precip'):
series = com.load_data(name)
numeric = r[name]
names = numeric.names
assert np.array_equal(series.index, names)
def test_table(self):
iris3 = pd.DataFrame({'X0': {0: '0', 1: '1', 2: '2', 3: '3', 4: '4'},
'X1': {0: 'Sepal L.',
1: 'Sepal L.',
2: 'Sepal L.',
3: 'Sepal L.',
4: 'Sepal L.'},
'X2': {0: 'Setosa',
1: 'Setosa',
2: 'Setosa',
3: 'Setosa',
4: 'Setosa'},
'value': {0: '5.1', 1: '4.9', 2: '4.7', 3: '4.6', 4: '5.0'}})
hec = pd.DataFrame(
{
'Eye': {0: 'Brown', 1: 'Brown', 2: 'Brown', 3: 'Brown', 4: 'Blue'},
'Hair': {0: 'Black', 1: 'Brown', 2: 'Red', 3: 'Blond', 4: 'Black'},
'Sex': {0: 'Male', 1: 'Male', 2: 'Male', 3: 'Male', 4: 'Male'},
'value': {0: '32.0', 1: '53.0', 2: '10.0', 3: '3.0', 4: '11.0'}})
titanic = pd.DataFrame(
{
'Age': {0: 'Child', 1: 'Child', 2: 'Child', 3: 'Child', 4: 'Child'},
'Class': {0: '1st', 1: '2nd', 2: '3rd', 3: 'Crew', 4: '1st'},
'Sex': {0: 'Male', 1: 'Male', 2: 'Male', 3: 'Male', 4: 'Female'},
'Survived': {0: 'No', 1: 'No', 2: 'No', 3: 'No', 4: 'No'},
'value': {0: '0.0', 1: '0.0', 2: '35.0', 3: '0.0', 4: '0.0'}})
for name, expected in zip(('HairEyeColor', 'Titanic', 'iris3'),
(hec, titanic, iris3)):
df = | com.load_data(name) | pandas.rpy.common.load_data |
from model.toolkits.parse_conf import parse_config_vina, parse_protein_vina, parse_ligand_vina
import os
import pandas as pd
import numpy as np
from pathlib import Path
import argparse
import rdkit
from rdkit import Chem, DataStructs
from rdkit.Chem import Descriptors, rdMolDescriptors, AllChem, QED
try:
from openbabel import pybel
except:
import pybel
# from metrics_utils import logP, QED, SA, weight, NP
from functools import partial
from multiprocessing import Pool
from tqdm.auto import tqdm
def walk_folder(path, suffix):
# processed = []
files = os.listdir(path)
print(f"{len(files)} files have been detected!")
outpdbqt = []
for file in files:
# print(file)
if suffix in file:
outpdbqt.append(file)
# base_name = os.path.basename(file)
# # print(base_name)
# simple_name = base_name.replace('_', '.').split('.')
# simple_name = simple_name[0]
# processed.append({'simple_name': simple_name,
# 'base_name': base_name, 'full_name': file})
# # print(processed)
return outpdbqt
def prepare_ecfp(args):
dataset = args.dataset
path = args.path
df = pd.read_csv(dataset)
# smi_df = pd.read_csv(args.smi)
# smi_df = smi_df.set_index('ChEMBL ID')
df['index'] = df['Molecule']
df = df.set_index('index')
# counts = 0
# for index in df.index:
# smi_index = index.strip().split("_")[0]
# counts += 1
# try:
# # print(smi_df.loc[smi_index, 'Smiles'])
# smiRaw = smi_df.loc[smi_index, 'Smiles']
# mol = pybel.readstring("smi", smiRaw)
# # strip salt
# mol.OBMol.StripSalts(10)
# mols = mol.OBMol.Separate()
# # print(pybel.Molecule(mols))
# mol = pybel.Molecule(mols[0])
# for imol in mols:
# imol = pybel.Molecule(imol)
# if len(imol.atoms) > len(mol.atoms):
# mol = imol
# smi_clean = mol.write('smi')
# smi_clean = smi_clean.replace('\n', '')
# smi_clean = smi_clean.split()[0]
# df.loc[index, 'smi'] = smi_clean
# print(f'NO.{counts}: {smi_clean} was processed successfully')
# except Exception as e:
# print(e)
# continue
df = df.dropna(axis=0, how='any')
smiList = df['smi']
index = df['Molecule']
# print(smiList)
new_index, ecfpList = [], []
for i in range(len(index)):
try:
smi = smiList[i]
if i % 1000 == 0:
print(f"index: {i}; smi= {smi}")
mol = Chem.MolFromSmiles(smi)
ecfp = AllChem.GetMorganFingerprintAsBitVect(
mol, 3, nBits=1024)
ecfp=[index[i]]+list(ecfp)
ecfpList.append(ecfp)
# new_index.append()
except:
continue
# molList = [Chem.MolFromSmiles(smi)
# for smi in smiList]
# ecfpList = [list(AllChem.GetMorganFingerprintAsBitVect(
# mol, 3, nBits=1024)) for mol in molList]
# print(ecfpList)
colName = ['index']+[f'ecfp{i}' for i in range(len(ecfpList[0])-1)]
# print(colName)
dfEcfp = pd.DataFrame(ecfpList, columns=colName)
# dfEcfp['index'] = new_index
dfEcfp = dfEcfp.set_index('index')
# print(dfEcfp)
# print(df)
dfEcfp = pd.concat([df, dfEcfp], axis=1)
dfEcfp = dfEcfp.dropna(axis=0, how='any')
suffix = '_ecfpSmi.csv'
outdf = dataset.replace('.csv', suffix)
# dfEcfp = dfEcfp.dropna(axis=0, how='any')
# if not os.path.exists(outdf):
dfEcfp.to_csv(outdf, index=False)
def prepare_DScorePPropIFP(args, getScore=True, getSMILES=True):
dataset = args.dataset
# smiDataset = args.smi
df = pd.read_csv(dataset)
df = df.set_index('Molecule')
smi_df = | pd.read_csv(args.smi) | pandas.read_csv |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = | Timestamp('20130102') | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = | Categorical([], ["a", "b", "c"]) | pandas.Categorical |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual( | Timedelta(10.0, unit='d') | pandas.Timedelta |
#coding=utf-8
#键盘分析
#(1)分别读取csdn和yahoo数据库中的passwd
#(2)自定义了常见的14种键盘密码字符串
#(3)将从数据库中读取的passwd与定义的字符串进行子串匹配(忽略单个的字母和数字)
#(4)只选择相对高频的密码,生成保存频率最高的密码和对应频率的csv
import pandas as pd
import numpy as np
import csv
np.set_printoptions(suppress=True)
##############################################
#(1)读取数据
##############################################
yahoo_data = pd.read_csv('Yahoo-original-mail-passwd.csv',engine='python',sep='\t', quoting=csv.QUOTE_NONE,names=["email","passwd"], quotechar='"', error_bad_lines=False)
csdn_data = pd.read_csv('csdn-original-username-mail-passwd.csv',engine='python',sep='\t', quoting=csv.QUOTE_NONE,names=["name","email","passwd"],quotechar='"', error_bad_lines=False)
#读取密码
yahoo_passwd = | pd.Series(yahoo_data['passwd'].values) | pandas.Series |
"""Functions for transofrmation of films and books datasets.
Functions
---------
get_books_ratings - transform books dataset
get_films_ratings - transform films dataset
generate_datasets - generate films and books datasets
"""
from typing import Set
import pandas as pd
from pathlib import Path
from os import mkdir, path
BOOKS_LOCATION = 'raw_data/books.csv'
FILMS_LOCATIONS = ['raw_data/title.basics.tsv',
'raw_data/title.ratings.tsv']
BOOKS_COLS = {'original_title': 'title', 'ratings_count': 'num_votes'}
FILMS_COLS = {'originalTitle': 'title', 'startYear': 'year',
'averageRating': 'average_rating', 'numVotes': 'num_votes'}
def get_books_ratings(location: str) -> pd.DataFrame:
"""
Read data from books rating dataset, select
<NAME>' books and remove unnecessary data.
:param location: location of the dataset
:return: transformed data
>>> get_books_ratings(BOOKS_LOCATION)
title average_rating num_votes
0 The Time Machine 7.74 276076
1 The War of the Worlds 7.60 159752
2 The Invisible Man 7.24 84778
3 The Island of Dr. Moreau 7.44 60346
"""
dataframe = pd.read_csv(location, low_memory=False)
dataframe = dataframe.loc[(dataframe['authors'].str.contains(
'<NAME>')) & (~dataframe['language_code'].isnull())]
# transform rating from 0-5 to 0-10 system
dataframe['average_rating'] *= 2
# only keep columns with title, rating and ratings count
dataframe = dataframe.loc[:, ['original_title', 'average_rating',
'ratings_count']].reset_index(drop=True)
# rename columns
dataframe.rename(columns=BOOKS_COLS, inplace=True)
return dataframe
def get_films_ratings(location_1: str, location_2: str, books: Set[str]) -> pd.DataFrame:
"""
Read and transform data from two film datasets, only
selecting corresponding films for the given set of books.
:param location_1: location of film titles dataset
:param location_1: location of film ratings dataset
:param books: a set of books to select films to
:return: a dataframe with films ratings
>>> get_films_ratings(*FILMS_LOCATIONS, {'The Time Machine', 'The Island of Dr. Moreau',\
'The Invisible Man', 'The War of the Worlds'})
title year average_rating num_votes
0 The Invisible Man 1933 7.7 30172
1 The War of the Worlds 1953 7.1 32429
2 The Time Machine 1960 7.6 35786
3 The Island of Dr. Moreau 1977 5.9 5677
4 The Island of Dr. Moreau 1996 4.6 30894
5 The Time Machine 2002 6.0 117796
6 The Invisible Man 2020 7.1 152154
7 The Invisible Man 2017 3.3 168
"""
# read title basics data
df_basics = pd.read_csv(location_1, sep='\t', low_memory=False)
df_ratings = | pd.read_csv(location_2, sep='\t', low_memory=False) | pandas.read_csv |
import empyrical
import pandas as pd
def main(payload):
port_vals_df = _convert_port_vals_to_df(payload["portVals"])
# Calculates per data point returns
port_vals_returns = port_vals_df["value"].pct_change()
cum_returns = empyrical.cum_returns(port_vals_returns, starting_value=0)
# aggregate_returns = empyrical.aggregate_returns(port_vals_returns, convert_to='weekly')
max_drawdowns = empyrical.max_drawdown(port_vals_returns)
annual_return = empyrical.annual_return(
port_vals_returns, period="daily", annualization=None
)
annual_volatility = empyrical.annual_volatility(
port_vals_returns, period="daily", alpha=2.0, annualization=None
)
calmar_ratio = empyrical.calmar_ratio(
port_vals_returns, period="daily", annualization=None
)
omega_ratio = empyrical.omega_ratio(
port_vals_returns, risk_free=0.0, required_return=0.0, annualization=1
)
sharpe_ratio = empyrical.sharpe_ratio(
port_vals_returns, risk_free=0, period="daily", annualization=None
)
sortino_ratio = empyrical.sortino_ratio(
port_vals_returns,
required_return=0,
period="daily",
annualization=None,
_downside_risk=None,
)
downside_risk = empyrical.downside_risk(
port_vals_returns, required_return=0, period="daily", annualization=None
)
stability_of_timeseries = empyrical.stability_of_timeseries(port_vals_returns)
tail_ratio = empyrical.tail_ratio(port_vals_returns)
cagr = empyrical.cagr(port_vals_returns, period="daily", annualization=None)
# TODO: These are for benchmarking against stocks and indexes
# information_ratio = empyrical.information_ratio(port_vals_returns, 0.3)
# alpha_beta = empyrical.alpha_beta(port_vals_returns, factor_returns, risk_free=0.0, period='daily', annualization=None)
# alpha = empyrical.alpha(port_vals_returns, factor_returns, risk_free=0.0, period='daily', annualization=None, _beta=None)
# beta = empyrical.beta(port_vals_returns, factor_returns, risk_free=0.0)
return {
"cards": [
{"label": "Max Drawdowns", "value": max_drawdowns},
{
"label": "Annual Return",
"value": annual_return,
"type": "PERCENTAGE",
},
{
"label": "Annual Volatility",
"value": annual_volatility,
"type": "PERCENTAGE",
},
{
"label": "Calmar Ratio",
"value": calmar_ratio,
"type": "FIXED",
},
{
"label": "Omega Ratio",
"value": omega_ratio,
"type": "FIXED",
},
{
"label": "Sharpe Ratio",
"value": sharpe_ratio,
"type": "FIXED",
},
{
"label": "Sortino Ratio",
"value": sortino_ratio,
"type": "FIXED",
},
{
"label": "Downside Risk",
"value": downside_risk,
"type": "FIXED",
},
{
"label": "Stability of Time Series",
"value": stability_of_timeseries,
"type": "FIXED",
},
{
"label": "Tail Ratio",
"value": tail_ratio,
"type": "FIXED",
},
{
"label": "CAGR",
"value": cagr,
"type": "FIXED",
},
],
"graphs": [
{"title": "Portfolio Values", "data": payload["portVals"]},
{
"title": "Portfolio Values Returns",
"data": _convert_df_to_json(port_vals_returns),
},
{
"title": "Cumulative Returns",
"data": _convert_df_to_json(cum_returns),
},
],
"tables": [{"title": "Trades", "data": payload["trades"]}],
}
def _convert_port_vals_to_df(port_vals):
df = | pd.DataFrame.from_dict(port_vals, orient="columns") | pandas.DataFrame.from_dict |
import pandas as pd
from traja.dataset import dataset
def test_category_wise_sampling_few_categories():
data = list()
num_categories = 5
for category in range(num_categories):
for sequence in range(40 + int(category / 14)):
data.append([sequence, sequence, category])
df = pd.DataFrame(data, columns=['x', 'y', 'ID'])
# Hyperparameters
batch_size = 1
num_past = 10
num_future = 5
train_split_ratio = 0.5
validation_split_ratio = 0.2
dataloaders = dataset.MultiModalDataLoader(df,
batch_size=batch_size,
n_past=num_past,
n_future=num_future,
num_workers=1,
train_split_ratio=train_split_ratio,
validation_split_ratio=validation_split_ratio)
verify_category_wise_sampled_dataloaders(dataloaders, train_split_ratio, validation_split_ratio, num_categories)
def test_category_wise_sampling():
data = list()
num_categories = 150
for category in range(num_categories):
for sequence in range(40):
data.append([sequence, sequence, category])
df = | pd.DataFrame(data, columns=['x', 'y', 'ID']) | pandas.DataFrame |
import pandas as pd
from bs4 import BeautifulSoup
import requests
import re
from tqdm import tqdm
def get_fund_holding(symbol):
url = 'http://finance.sina.com.cn/fund/quotes/{}/bc.shtml'.format(symbol)
html = requests.get(url)
bs = BeautifulSoup(html.content, features="lxml")
tbl = bs.find('table', {'id':'fund_sdzc_table'})
if tbl is None or tbl.tbody.text=='\n':
return
pat = re.compile('\d\d\d\d-\d\d-\d\d')
report_date = pd.to_datetime( pat.findall(bs.find('div', {'class':'zqx_zcpz_date'}).text)[0] )
stocks = tbl.attrs['codelist'].split(',')
ts_codes = [ s[2:]+'.'+s[:2].upper() for s in stocks]
holding = pd.read_html(tbl.prettify())[0]
data_dict = dict(zip(ts_codes, holding[('占净值比例(%)', '持股比例')].str[:-1].astype(float)))
data = pd.DataFrame.from_dict(data_dict, 'index', columns=['holding'])
# lib_fund_holding.write(fund, data, metadata={'report_date':report_date})
return data
if __name__ == "__main__":
import numpy as np
symbols, _ = np.genfromtxt( './refData/fund_list.csv', dtype = str, delimiter = ',', unpack = True )
symbols = [ symbol.zfill(6) for symbol in symbols ]
res = []
for symbol in tqdm(symbols):
df = get_fund_holding(symbol)
if df is not None:
res.append( df.reset_index().assign(fund=symbol) )
fund_holding = | pd.concat(res, ignore_index=True) | pandas.concat |
import pandas as pd
from Datasets.utils import read_parquet, get_bib_info, clean
import json
import sys
import time
import csv
import random
from tqdm import tqdm
sys.path.append("Models/")
from Models import *
from position_rank import get_weights
def mask(text1, text2):
"""
a simple vectorization function
"""
base = 0
vectors = {}
vector1, vector2 = [], []
for phrase in text1:
if phrase not in vectors:
vectors[phrase] = base
base += 1
vector1.append(vectors[phrase])
for phrase in text2:
if phrase not in vectors:
vectors[phrase] = base
base += 1
vector2.append(vectors[phrase])
return vector1, vector2
def get_recall(l1,l2):
c = 0.0
C = float(len(l1))
for each in l2:
if each in l1:
c += 1.0
return c/C
def get_precision(l1,l2):
c = 0.0
C = float(len(l2))
for each in l2:
if each in l1:
c += 1.0
return c/C
def get_f1(r,p):
if r + p == 0:
return 0.0
return (2.0*p*r)/(p+r)
def get_Rprecision (phrase1, phrase2):
"""
relaxed since it uses "in" instead of checkign the position of words
"""
c = 0.0
for w1 in phrase2.split():
if w1 in phrase1:
c += 1
return c/max(len(phrase2.split()), len(phrase1.split()))
def get_all_Rprecision (gold_keywords, predicted_keywords):
scores=[]
for gphrase in gold_keywords:
rscores=[]
for pphrase in predicted_keywords:
rscores.append(get_Rprecision(gphrase,pphrase))
scores.append(max(rscores))
return sum(scores) / len(scores)
def get_scores (gold_keywords, predicted_keywords):
recall = get_recall(gold_keywords, predicted_keywords)
precision = get_precision(gold_keywords, predicted_keywords)
f1 = get_f1(recall, precision)
Rprecision = get_all_Rprecision(gold_keywords, predicted_keywords)
return f1, recall, precision, Rprecision
def get_all_scores (gold_keywords, predicted_keywords, text=None, adjust=False):
"""
SemEval-2010 Task 5, micro averaged f1, recall, precision
"""
metrics = get_scores(gold_keywords, predicted_keywords)
adjusted_gold = []
adjusted_metrics = []
if adjust:
if text:
for each in gold_keywords:
if each in text:
adjusted_gold.append(each)
if len(adjusted_gold) < 1:
adjusted_metrics = []
else:
adjusted_metrics = get_scores(adjusted_gold, predicted_keywords)
return metrics, adjusted_metrics
def get_key_abs(filepath,year1=1900,year2=2020, bib_files=[], types=[], journals=[], count=None, rand=False):
"""{'science-history-journals': 'Journals on the history, philosophy, and popularization of mathematics and
science', 'compsci': 'Computer science journals and topics', 'acm': 'ACM Transactions', 'cryptography':
'Cryptography', 'fonts': 'Fonts and typography', 'ieee': 'IEEE journals', 'computational': 'Computational/quantum
chemistry/physics journals', 'numerical': 'Numerical analysis journals', 'probstat': 'Probability and statistics
journals', 'siam': 'SIAM journals', 'math': 'Mathematics journals', 'mathbio': 'Mathematical and computational
biology'} """
jfile = open("Datasets/bib_info.json").read()
tables, names = json.loads(jfile)
new_tables = {}
for table in tables:
new_tables[table] = []
for f in tables[table]:
new_tables[table].append(list(f.keys())[0])
df = read_parquet(filepath)
#df =df[df["bib_file"]=="ibmjrd.bib"]
#print(df.columns)
#exit()
if bib_files:
try:
df = df[df["bib_file"].isin(bib_files)]
except:
df = df[df["bibsource"].isin(bib_files)]
if types:
types_list = []
for type in types:
types_list.extend(new_tables[type])
df = df[df['bib_file'].isin(types_list)]
if journals:
df = df[df["journal"].isin(journals)]
# year
try:
df["year"] = df["year"].astype("int")
df = df[df["year"] >= year1]
df = df[df["year"] <= year2]
except:
pass
if rand and count:
df = df.sample(n=count)
else:
df = df[:count]
keywords = df["keywords"].tolist()
abstracts = df["abstract"].tolist()
c1,c2=0.0,0.0
print(df.shape)
print(df["year"].value_counts())
processed_keywords = []
for i in range(len(keywords)):
if ";" in keywords[i]:
keyword = keywords[i].split(";")
elif "," in keywords[i]:
keyword = keywords[i].split(",")
elif "\t" in keywords[i]:
keyword = keywords[i].split("\t")
else:
keyword = keywords[i].split(" to")
new = []
for key in keyword:
if "---" in key:
key = key.split("---")
new.extend([clean(k) for k in key])
else:
new.append(clean(key))
processed_keywords.append(new)
return processed_keywords, abstracts
def eval_file(filepath, model, year1=1900, year2=2020, bib_files=[], types=[], journals=[], limit=None, rand=False, log=True,
model_param = "",outputpaths= ["output.json","output.tsv"], bib_weights = {}):
t1 = time.time()
keywords_gold, abstracts = get_key_abs(filepath, year1, year2, bib_files, types, journals, limit, rand)
if bib_weights:
keywords_gold_w, t = get_key_abs(filepath=bib_weights["dataset"], year1 = bib_weights["year1"], year2= bib_weights["year2"], types = bib_weights["types"])
weights = get_weights(keywords_gold_w)
model = BibRank(weights)
all_scores = []
all_scores_adjust = []
T0 = 0.0
T1 = 0.0
for i in tqdm(range(len(keywords_gold))):
t3 = time.time()
predicted_keywords = model.get_keywords(abstracts[i])
t4 = time.time()
try:
scores = get_all_scores(keywords_gold[i], predicted_keywords[0], abstracts[i], adjust=True)
except:
scores = [[0.0, 0.0, 0.0,0.0] , [0.0, 0.0, 0.0,0.0]]
t5 = time.time()
all_scores.append(scores[0])
if scores[1]:
all_scores_adjust.append(scores[1])
T0 += (t4 - t3)
T1 += (t5 - t4)
all_scores = pd.DataFrame(all_scores)
all_scores_adjust = | pd.DataFrame(all_scores_adjust) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Computes broadband power, offset and slope of power spectrum
Based on selected epochs (e.g. ASCIIS) in the list of files a power spectrum
is computed. Based on this power spectrum the broadband power is calculated,
followed by the offset and slope using the FOOOF algorithm.
Reference paper FOOOF: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2018) Parameterizing Neural
Power Spectra. bioRxiv, 299859. doi: https://doi.org/10.1101/299859
reference Github: https://fooof-tools.github.io/fooof/index.html
"""
__author__ = '<NAME>'
__contact__ = '<EMAIL>' # or <EMAIL>
__date__ = '2020/09/14' ### Date it was created
__status__ = 'Finished'
####################
# Review History #
####################
# Reviewed and Updated by Eduarda Centeno 20201030
####################
# Libraries #
####################
# Standard imports
import time
import os
import glob
import ast
from datetime import date
# Third party imports
import numpy as np # version 1.19.1
import matplotlib.pyplot as plt # version 3.3.0
import pandas as pd # version 1.1.0
from scipy import signal # version 1.4.1
from fooof import FOOOF # version 0.1.3
# Define Functions ------------------------------------------------------------
def find_paths(main_dir, subject, extension, **kwargs):
""" Flexible way to find files in subdirectories based on keywords
Parameters
----------
main_dir: str
Give the main directory where the subjects' folders are stored
subject: str
Give the name of the subject to be analyzed
extension: str
Give the extension type
**kwargs: str
Give keywords that will be used in the filtering of paths
!Important!
It is possible to use the kwargs 'start' & 'end' (int) OR
'selection' (list or str) for selecting epochs. The 'selection'
list should contain the exact way in which the Tr is written, e.g.
Tr01, or Tr_1, etc.
Examples
-------
Ex.1
find_paths(main_dir='/data/KNW/NO-cohorten/Scans/',
subject='sub-9690',
extension='.asc',
key1='T1',
key2='BNA',
key3='Tr_7')
This example will result in a list with a single path:
['.../T1/BNA/1_100_WITH_200_WITH_246_VE_89.643to102.750_Tr_7.asc']
Ex2.
find_paths(main_dir='/data/KNW/NO-cohorten/Scans/',
subject='sub-9690',
extension='.asc',
key1='T1',
key2='BNA',
start=20,
end=23)
This example will result in a list with several paths:
['.../T1/BNA/1_100_WITH_200_WITH_246_VE_260.037to273.143_Tr_20.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_273.144to286.250_Tr_21.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_286.251to299.358_Tr_22.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_299.358to312.465_Tr_23.asc']
Ex3.
find_paths(main_dir='/data/doorgeefluik/',
subject='mumo_002',
extension='.asc',
key1='OD1',
selection=['Tr01', 'Tr04'])
Returns
-------
updatedfilter: list
List with path strings
Notes
-------
Be careful that final slicing for 'start' & 'end' is done assuming that
the sorting step was correct. Thus, it is based on index not on finding the
specific start-end values in the string. This was done because the tested
paths had various ways of using Tr (e.g. Tr_1 or Tr_01, or Tr1 or Tr_01) -
what caused inconsistencies in the output.
"""
# Check if arguments are in the correct type
assert isinstance(main_dir, str), 'Argument must be str'
assert isinstance(subject, str), 'Argument must be str'
assert isinstance(extension, str), 'Argument must be str'
# Filtering step based on keywords
firstfilter = glob.glob(main_dir + subject + '/**/*' + extension,
recursive=True)
updatedfilter = firstfilter
print('\n..............NaN keys will be printed.................')
start = None
end = None
selection = None
for key, value in kwargs.items():
# In case the key value is NaN (possible in subjects dataframe)
if not isinstance(value,list) and pd.isnull(value):
print(key + '= NaN')
continue
elif key == 'start':
assert isinstance(value, (int,str,float)), 'Argument must be int or number str'
start = int(value)
elif key == 'end':
assert isinstance(value, (int,str,float)), 'Argument must be int or number str'
end = int(value)
elif key == 'selection':
if isinstance(value, list):
selection = value
elif isinstance(value, str):
selection = value.replace(';',',') # Step that convert ; to , (used in example.csv)
selection = ast.literal_eval(selection)
assert isinstance(selection, list), 'Argument should end up being a list of Tr numbers strings'
assert all(isinstance(item, str) for item in selection), 'Argument must be a list of of Tr numbers strings'
else:
start = None
end = None
selection = None
# Update list accoring to key value
updatedfilter = list(filter(lambda path: value in path, updatedfilter))
# Check if too many arguments were passed!
print('\n..............Checking if input is correct!.................')
#print(start, end, selection)
if (start and end) != None and selection != None:
raise RuntimeError('User should use Start&End OR Selection')
else:
print('All good to continue! \n')
pass
# To find index of Tr (last appearance)
location = updatedfilter[0].rfind('Tr')
# Sort list according to Tr* ending (+1 was necessary to work properly)
updatedfilter.sort(key=lambda path:int(''.join(filter(str.isdigit, path[location+1 :]))))
# After the list is sorted, slice by index.
if (start and end) != None:
print('Start&End were given. \n' +
'-- Start is: ' + str(start) +
'\n--End is: ' + str(end))
updatedfilter = updatedfilter[start-1:end]
# for number in range(start, end):
# updatedfilter = [
# list(filter(lambda k: str(number) in k[location:],
# updatedfilter))[0] for number in range(start, end)
# ]
# After the list is sorted, interesect with selection.
elif selection != None:
print('\nA selection of values was given.' +
'\nThe selection was: ' + str(selection))
updatedlist=[]
for item in selection:
updatedlist += list(filter(lambda path: item + extension in path[location:], updatedfilter))
updatedfilter = updatedlist
return updatedfilter
def make_csv(csv_path, output_path, extension = '.asc'):
"""Function to insert the number of epochs to include in analysis into csv.
Number of epochs is calculated by comparing the number of epochs available
for each subject and including the minimum amount.
Parameters
----------
csv_path : str,
path to the csv containing information on subjects to include
output_path: str,
complete path to output new csv (e.g. '/path/to/folder/new_csv.csv')
extension : str,
file extension of meg files (e.g. '.asc')
default = '.asc'
Returns
-------
None
saves the extended csv to the same directory where found old csv
(i.e. overwrites old csv)
epochs_df: pandas DataFrame,
dataframe containing the filepaths to the epochs included for every subject
"""
df = pd.read_csv(csv_path, delimiter = ',', header =0)
nr_epochs = []
for index, row in df.iterrows():
asc_paths = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'])
#store nr of epochs available for each subject
nr_epochs.append(len(asc_paths))
#find smallest number of epochs available
min_nr_epochs = min(nr_epochs)
#add start and stop epochs to df
df['Start'] = np.repeat(1,len(df['Path']))
df['End'] = np.repeat(min_nr_epochs, len(df['Path']))
#save new csv file that includes the epochs to analyse
df.to_csv(output_path, index = False, sep = ',')
#load new csv file with start and end epochs
new_csv = pd.read_csv(output_path)
subs = []
paths = []
#search for asc files between start and end epoch range specified in csv
for index, row in new_csv.iterrows():
subs.append(row['Case_ID'])
asc_paths = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'],
start = row['Start'],
end = row['End'])
#append list of asc_paths for subject to list
paths.append(asc_paths)
#store lists of asc_paths (for every subject) in dataframe
epochs_df = pd.DataFrame(paths)
#index rows to subject IDs
epochs_df.set_index([pd.Index(subs)], 'Subs', inplace = True)
return(epochs_df)
def cal_power_spectrum(timeseries, nr_rois=np.arange(92), fs=1250,
window='hamming', nperseg=4096, scaling='spectrum',
plot_figure=False, title_plot='average power spectrum'):
""" Calculate (and plot) power spectrum of timeseries
Parameters
----------
timeseries: DataFrame with ndarrays
Rows are timepoints, columns are rois/electrodes
Give list with rois/electrodes you want to include,
default=np.arange(92)
fs: int, optional
Sample frequency, default=1250
window: str or tuple, optional
Type of window you want to use, check spectral.py for details,
default='hamming'
nperseg : int, optional
Length of each segment, default=4096
scaling : str, optional
'density' calculates the power spectral density (V**2/Hz), 'spectrum'
calculates the power spectrum (V**2), default='spectrum'
plot_figure: bool
Creates a figure of the mean + std over all rois/electrodes,
default=False
title_plot: str
Give title of the plot, default='average power spectrum'
Returns
-------
f: ndarray
Array with sample frequencies (x-axis of power spectrum plot)
pxx: ndarray
Columns of power spectra for each roi/VE
"""
pxx = np.empty([int(nperseg/2+1), np.size(nr_rois)])
i = 0
for roi in nr_rois:
(f, pxx[:,i]) = signal.welch(timeseries[roi].values, fs, window,
nperseg, scaling=scaling)
i = i + 1
if plot_figure==True:
plt.figure()
plt.plot(f, np.mean(pxx,1), color='teal')
plt.plot(f, np.mean(pxx,1)+np.std(pxx,1), color='teal', linewidth=0.7)
plt.plot(f, np.mean(pxx,1)-np.std(pxx,1), color='teal', linewidth=0.7)
plt.fill_between(f, np.mean(pxx,1)+np.std(pxx,1), np.mean(pxx,1)
-np.std(pxx,1), color='teal', alpha=0.2)
plt.xlim(0, 50)
plt.xlabel('Frequency (Hz)')
plt.title(title_plot)
plt.show()
return f, pxx
def find_nearest(array, value):
""" Find nearest value of interest in array (used for frequencies,
no double value issues)
Parameters
----------
array: array
Give the array in which you want to find index of value nearest-by
value: int or float
The value of interest
Return
------
idx: int
Index of value nearest by value of interest
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def cal_FOOOF_parameters(pxx, f, freq_range=[0.5, 48]):
""" Obtain slope and offset using the FOOOF algorithm
Reference paper: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2018) Parameterizing Neural
Power Spectra. bioRxiv, 299859. doi: https://doi.org/10.1101/299859
Reference Github: https://fooof-tools.github.io/fooof/index.html
Parameters
----------
pxx: ndarray
Column of power spectra
f: 1d-array
Array with sample frequencies (x-asix of power spectrum plot)
freq_range: list, optional
Gives the upper and lower boundaries to calculate the broadband power,
default=[0.5, 48]
Returns
-------
FOOOF_offset: float
Offset
FOOOF_slope: float
Slope
"""
# initialize FOOOF oject
fm = FOOOF()
# create variables
fm.fit(f, pxx, freq_range)
FOOOF_offset = fm.background_params_[0]
FOOOF_slope = fm.background_params_[1]
time.sleep(1) # heavy algorithm
return FOOOF_offset, FOOOF_slope
def run_loop_powerspectrum(subject_list, extension='.asc',
nr_rois=np.arange(92), Fs=1250, window_length=4096,
freq_range=[0.5, 48], plot_figure=False):
""" Calculate power spectrum for all cases within the subject_list
Parameters
----------
subject_list: string
String with the file path.
extension: str, optional
Give the extension of ASCIIs, '.txt' or '.asc', default='.asc',
nr_rois: int, optional
Give list with rois you want to analyse, default=np.arange(92)
Fs: int, optional
Sample frequency, default=1250
window_length: int, optional
Window length to calculate power spectrum, default=4096
freq_range: list, optional
Gives the upper and lower boundaries to calculate the broadband power,
default=[0.5, 48]
plot_figure: bool
To plot average power spectrum plot per epoch or not, default=False
Return
------
mean_pxx: ndarray (size: len(subjects), len(power spectrum), nr_rois)
Power spectum for all subjects, and all rois/VE, averaged over
nr_epochs
broadband_power : ndarray (size: len(subjects), nr_rois)
Broadband power between freq_range
f: ndarray
Array with sample frequencies (x-axis of power spectrum plot)
"""
print('\n____STARTING TO COMPUTE POWER SPECTRUM!____')
subjects = pd.read_csv(subject_list, delimiter=',', header=0)
print('\nThis is the content of the subjects_list file: \n' + str(subjects))
mean_pxx = np.empty([len(subjects), int(window_length/2+1), np.size(nr_rois)])
broadband_power = np.empty([len(subjects), np.size(nr_rois)])
freq = np.empty([len(subjects), int(window_length/2+1)])
for index, row in subjects.iterrows():
print('\n\n//////////// Subject ' + str(index) + ' on subject_list ////////////')
files_list = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'],
start=row['Start'],
end=row['End'],
selection=row['Selection'])
print('\nThe paths found are: \n' + str(files_list))
if len(files_list) == 0:
print('No ASCIIs available for: ' + row)
continue
elif len(files_list) == 1:
single_ascii = files_list[0]
timeseries = pd.read_csv(single_ascii, index_col=False,
header=None, delimiter='\t')
f, pxx = cal_power_spectrum(timeseries,
nr_rois=nr_rois,
fs=Fs,
plot_figure=plot_figure,
title_plot='power spectrum')
mean_pxx = pxx
broadband_power = np.sum(
mean_pxx[
find_nearest(f,freq_range[0]):
find_nearest(f, freq_range[1]),:],
axis=0)
freq = f
else:
sub_pxx = np.zeros((len(files_list), int(window_length/2+1),
np.size(nr_rois)))
#mean_pxx[index,:,:] = 'nan'
#broadband_power[index,:] = 'nan'
for file, name in zip(range(len(files_list)),files_list):
location = name.rfind('Tr')
timeseries = pd.read_csv(files_list[file], index_col=False,
header=None, delimiter='\t')
# Compute power spectrum
f, pxx = cal_power_spectrum(timeseries, nr_rois=nr_rois, fs=Fs,
plot_figure=plot_figure,
title_plot= 'avg power spectrum - epoch: '
+ ''.join(filter(str.isdigit, name[location:])))
sub_pxx[file,:,:] = pxx
freq[index,:] = f
mean_pxx[index,:,:] = np.nanmean(sub_pxx, axis=0)
broadband_power[index,:] = np.sum(
mean_pxx[index,
find_nearest(f, freq_range[0]):
find_nearest(f, freq_range[1]),:],
axis=0)
return mean_pxx, broadband_power, freq
# -----------------------------------------------------------------------------
###########################
# Settings #
###########################
# set nice level to 10, especially FOOOF algorithm is heavy!
os.nice(10)
# 1. Create correctly your list of subjects you want to process
# an example is given here: 'example_MEG_list.csv'
subject_list = '/path/to/example_MEG_alternative.csv'
# 2. Define the type of file extension your are looking for
extension = '.asc' # extension type
# 3. Select which roi or rois you want to analyze
# if you want to analyze 1 roi, specify its number (nr_rois = (10,))
nr_rois = [0,5,9]#(10,) if only run for roi 11 # note that python indexes at 0!
# if you want to analyze multiple rois, create list with these rois
# (for example nr_rois = np.arange(78) for all 78 cortical AAL rois)
# 4. Set sample frequency (1250 Hz for Elekta data)
Fs = 1250 # sample frequency
# 5. Set frequency range you want to study
freq_range=[0.5, 48] # frequency range you want to analyze
# 6. Give output directory
dir_output = '/path/to/output/folder/'
# 7. Do you want to see the plots?
plot_choice = False
# 7a. Do you want to save the output?
save_output = False # you can save output
###########################
# Run analysis #
###########################
# mean_pxx contains the average power spectra over nr_epochs, for all subjects
# for all rois, broadband_power is the area under the average power spectra
# over the frequency range for all subjects and rois, f gives the frequencies
# of the power spectrum (can be useful when plotting power spectrum)
mean_pxx, broadband_power, f = run_loop_powerspectrum(subject_list,
extension, nr_rois=nr_rois, Fs=Fs,
window_length=4096, freq_range=freq_range, plot_figure=plot_choice)
# save output
if save_output == True:
subjects = pd.read_csv(subject_list, delimiter=',', header=0)
print('\n.....Saving power spectra and frequency data......')
for index, row in subjects.iterrows():
if len(mean_pxx.shape) > 2:
df_pxx_f = | pd.DataFrame(mean_pxx[index,:,:]) | pandas.DataFrame |
# 均分321个区间,统计落入格区间的数目
import pandas as pd
import numpy as np
df = | pd.read_csv('submit-final.csv', index_col=0) | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
This file contains training and testing settings to be used in this benchmark,
mainly:
TRAIN_BASE_END: Base training end date common across all rounds
TRAIN_ROUNDS_ENDS: a set of dates denoting end of training period for each
of the 6 rounds of the benchmark
TEST_STARTS_ENDS: a set of dates denoting start and end of testing period
for each of the 6 rounds of the benchmark
"""
import pandas as pd
TRAIN_BASE_END = pd.to_datetime("2016-11-01")
TRAIN_ROUNDS_ENDS = pd.to_datetime(
["2016-12-01", "2016-12-01", "2017-01-01", "2017-01-01", "2017-02-01", "2017-02-01",]
)
TEST_STARTS_ENDS = [
pd.to_datetime(("2017-01-01", "2017-02-01")),
pd.to_datetime(("2017-02-01", "2017-03-01")),
pd.to_datetime(("2017-02-01", "2017-03-01")),
pd.to_datetime(("2017-03-01", "2017-04-01")),
pd.to_datetime(("2017-03-01", "2017-04-01")),
| pd.to_datetime(("2017-04-01", "2017-05-01")) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
This script can be used to convert a .mat simulation result file into a
.csv file with a subset of model variables, as defined in the "outputs" list.
The .csv file is saved in the same directory as the .mat file, and is based
on the case, climate, and scenario.
"""
from buildingspy.io.outputfile import Reader
import os
import pandas as pd
import glob
use_result_file = False
outputs = ['res.PHeaAhu', 'res.PHeaReh', 'res.PCooSen', 'res.PCooLat', 'res.PFan',
'AHU.returnAir.m_flow','AHU.TSup.T','AHU.VOut1.V_flow','weather.weaBus.TDryBul','weather.weaBus.HGloHor',
'Building.zoneVAV1.VSupRoo_flow.V_flow','Building.zoneVAV2.VSupRoo_flow.V_flow','Building.zoneVAV3.VSupRoo_flow.V_flow',
'Building.zoneVAV4.VSupRoo_flow.V_flow','Building.zoneVAV5.VSupRoo_flow.V_flow','Building.zoneVAV6.VSupRoo_flow.V_flow',
'Building.zoneVAV7.VSupRoo_flow.V_flow','Building.zoneVAV8.VSupRoo_flow.V_flow','Building.zoneVAV9.VSupRoo_flow.V_flow',
'Building.zoneVAV10.VSupRoo_flow.V_flow','Building.zoneVAV11.VSupRoo_flow.V_flow','Building.zoneVAV12.VSupRoo_flow.V_flow',
'Building.zoneVAV13.VSupRoo_flow.V_flow','Building.zoneVAV14.VSupRoo_flow.V_flow','Building.zoneVAV15.VSupRoo_flow.V_flow',
'Building.zoneVAV16.VSupRoo_flow.V_flow','Building.zoneVAV17.VSupRoo_flow.V_flow','Building.zoneVAV18.VSupRoo_flow.V_flow',
'Building.zoneVAV19.VSupRoo_flow.V_flow','Building.zoneVAV20.VSupRoo_flow.V_flow','Building.zoneVAV21.VSupRoo_flow.V_flow',
'Building.zoneVAV1.TSupRoo.T','Building.zoneVAV2.TSupRoo.T','Building.zoneVAV3.TSupRoo.T',
'Building.zoneVAV4.TSupRoo.T','Building.zoneVAV5.TSupRoo.T','Building.zoneVAV6.TSupRoo.T',
'Building.zoneVAV7.TSupRoo.T','Building.zoneVAV8.TSupRoo.T','Building.zoneVAV9.TSupRoo.T',
'Building.zoneVAV10.TSupRoo.T','Building.zoneVAV11.TSupRoo.T','Building.zoneVAV12.TSupRoo.T',
'Building.zoneVAV13.TSupRoo.T','Building.zoneVAV14.TSupRoo.T','Building.zoneVAV15.TSupRoo.T',
'Building.zoneVAV16.TSupRoo.T','Building.zoneVAV17.TSupRoo.T','Building.zoneVAV18.TSupRoo.T',
'Building.zoneVAV19.TSupRoo.T','Building.zoneVAV20.TSupRoo.T','Building.zoneVAV21.TSupRoo.T',
'res.EHea', "res.ECooSen", "res.ECooLat", "res.EFan",
'res.DiscomfortHeating', 'res.DiscomfortCooling',
'Building.zoneVAV1.zon.TAir','Building.zoneVAV2.zon.TAir','Building.zoneVAV3.zon.TAir',
'Building.zoneVAV4.zon.TAir','Building.zoneVAV5.zon.TAir','Building.zoneVAV6.zon.TAir',
'Building.zoneVAV7.zon.TAir','Building.zoneVAV8.zon.TAir','Building.zoneVAV9.zon.TAir',
'Building.zoneVAV10.zon.TAir','Building.zoneVAV11.zon.TAir','Building.zoneVAV12.zon.TAir',
'Building.zoneVAV13.zon.TAir','Building.zoneVAV14.zon.TAir','Building.zoneVAV15.zon.TAir',
'Building.zoneVAV16.zon.TAir','Building.zoneVAV17.zon.TAir','Building.zoneVAV18.zon.TAir',
'Building.zoneVAV19.zon.TAir','Building.zoneVAV20.zon.TAir','Building.zoneVAV21.zon.TAir',
'Building.TRooHeaSet','Building.TRooCooSet']
print('Loading results...')
if use_result_file:
# use result file path directly
result_file = "Base_LA_hLoad_lHours/BICBase.mat"
path = os.path.abspath(os.path.join(__file__,"..","..","simulations",result_file))
r = Reader(path, 'dymola')
print('Done loading results.')
# Parse results into dataframe
print('Parsing and saving results...')
df = pd.DataFrame()
for variable in outputs:
try:
(time, values) = r.values(variable)
except KeyError:
print('Warning: {0} not in data'.format(variable))
continue
df_var = | pd.DataFrame(index=time,data=values,columns=[variable]) | pandas.DataFrame |
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from pandas.util._decorators import (
Appender,
doc,
)
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concat,
isna,
to_datetime,
to_timedelta,
)
from pandas.core import generic
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.io.common import get_handle
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {{'infer',
'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta')
Read a Stata dta file:
>>> df = pd.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> df = pd.DataFrame(values, columns=["i"])
>>> df.to_stata('filename.dta')
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass
>>> import os
>>> os.remove("./filename.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
DataFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Series and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + to_timedelta(days, unit="d")
else:
index = getattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Series:
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, "index", None)
if unit == "d":
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == "ms":
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Series(values, index=index)
else:
raise ValueError("format not understood")
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
quarter_month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
first_month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:
"""
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.dtype):
if delta:
time_delta = dates - stata_epoch
d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds
if days or year:
date_index = DatetimeIndex(dates)
d["year"] = date_index._data.year
d["month"] = date_index._data.month
if days:
days_in_ns = dates.view(np.int64) - to_datetime(
d["year"], format="%Y"
).view(np.int64)
d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates._values - stata_epoch
def f(x: datetime.timedelta) -> float:
return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d["year"] = year_month._values // 100
d["month"] = year_month._values - d["year"] * 100
if days:
def g(x: datetime.datetime) -> int:
return (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(f"Format {fmt} is not a known Stata date format")
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '{0}' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from {0} to {1}, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
class CategoricalConversionWarning(Warning):
pass
categorical_conversion_warning = """
One or more series with value labels are not fully labeled. Reading this
dataset with an iterator results in categorical variable with different
categories. This occurs since it is not possible to know all possible values
until the entire dataset has been read. To avoid this warning, you can either
read dataset without an iterator, or manually convert categorical data by
``convert_categoricals`` to False and then accessing the variable labels
through the value_labels method of the reader.
"""
def _cast_to_stata_types(data: DataFrame) -> DataFrame:
"""
Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ""
# original, if small, if large
conversion_data = (
(np.bool_, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64),
)
float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0]
float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.int64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc.format("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
ws = precision_loss_doc.format("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
f"Column {col} has a maximum value of infinity which is outside "
"the range supported by Stata."
)
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError(
f"Column {col} has a maximum value ({value}) outside the range "
f"supported by Stata ({float64_max})"
)
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
Parameters
----------
catarray : Series
Categorical Series to encode
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
def __init__(self, catarray: Series, encoding: str = "latin-1"):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
self.labname = catarray.name
self._encoding = encoding
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = 0
self.txt: list[bytes] = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
offsets: list[int] = []
values: list[int] = []
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, str):
category = str(category)
warnings.warn(
value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch,
)
category = category.encode(encoding)
offsets.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
values.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
"have a combined length less than 32,000 characters."
)
# Ensure int32
self.off = np.array(offsets, dtype=np.int32)
self.val = np.array(values, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def generate_value_label(self, byteorder: str) -> bytes:
"""
Generate the binary representation of the value labels.
Parameters
----------
byteorder : str
Byte order of the output
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
encoding = self._encoding
bio = BytesIO()
null_byte = b"\x00"
# len
bio.write(struct.pack(byteorder + "i", self.len))
# labname
labname = str(self.labname)[:32].encode(encoding)
lab_len = 32 if encoding not in ("utf-8", "utf8") else 128
labname = _pad_bytes(labname, lab_len + 1)
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack("c", null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + "i", self.n))
# textlen - int32
bio.write(struct.pack(byteorder + "i", self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + "i", offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + "i", value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(text + null_byte)
return bio.getvalue()
class StataMissingValue:
"""
An observation's missing value.
Parameters
----------
value : {int, float}
The Stata missing value code
Notes
-----
More information: <https://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES: dict[float, str] = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[b] = "."
for i in range(1, 27):
MISSING_VALUES[i + b] = "." + chr(96 + i)
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
key = struct.unpack("<f", float32_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
key = struct.unpack("<d", float64_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
"int8": 101,
"int16": 32741,
"int32": 2147483621,
"float32": struct.unpack("<f", float32_base)[0],
"float64": struct.unpack("<d", float64_base)[0],
}
def __init__(self, value: int | float):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
@property
def string(self) -> str:
"""
The Stata representation of the missing value: '.', '.a'..'.z'
Returns
-------
str
The representation of the missing value.
"""
return self._str
@property
def value(self) -> int | float:
"""
The binary representation of the missing value.
Returns
-------
{int, float}
The binary representation of the missing value.
"""
return self._value
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{type(self)}({self})"
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, type(self))
and self.string == other.string
and self.value == other.value
)
@classmethod
def get_base_missing_value(cls, dtype: np.dtype) -> int | float:
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
if dtype == np.int8: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int8"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int16: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int16"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int32: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int32"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float32: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["float32"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float64: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["float64"]
else:
raise ValueError("Unsupported dtype")
return value
class StataParser:
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = dict(
list(zip(range(1, 245), [np.dtype("a" + str(i)) for i in range(1, 245)]))
+ [
(251, np.dtype(np.int8)),
(252, np.dtype(np.int16)),
(253, np.dtype(np.int32)),
(254, np.dtype(np.float32)),
(255, np.dtype(np.float64)),
]
)
self.DTYPE_MAP_XML = {
32768: np.dtype(np.uint8), # Keys to GSO
65526: np.dtype(np.float64),
65527: np.dtype(np.float32),
65528: np.dtype(np.int32),
65529: np.dtype(np.int16),
65530: np.dtype(np.int8),
}
# error: Argument 1 to "list" has incompatible type "str";
# expected "Iterable[int]" [arg-type]
self.TYPE_MAP = list(range(251)) + list("bhlfd") # type: ignore[arg-type]
self.TYPE_MAP_XML = {
# Not really a Q, unclear how to handle byteswap
32768: "Q",
65526: "d",
65527: "f",
65528: "l",
65529: "h",
65530: "b",
}
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b"\xff\xff\xff\xfe"
float32_max = b"\xff\xff\xff\x7e"
float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff"
float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f"
self.VALID_RANGE = {
"b": (-127, 100),
"h": (-32767, 32740),
"l": (-2147483647, 2147483620),
"f": (
np.float32(struct.unpack("<f", float32_min)[0]),
np.float32(struct.unpack("<f", float32_max)[0]),
),
"d": (
np.float64(struct.unpack("<d", float64_min)[0]),
np.float64(struct.unpack("<d", float64_max)[0]),
),
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254, # float
100: 255, # double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
"b": 101,
"h": 32741,
"l": 2147483621,
"f": np.float32(struct.unpack("<f", b"\x00\x00\x00\x7f")[0]),
"d": np.float64(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
),
}
self.NUMPY_TYPE_MAP = {
"b": "i1",
"h": "i2",
"l": "i4",
"f": "f4",
"d": "f8",
"Q": "u8",
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = (
"aggregate",
"array",
"boolean",
"break",
"byte",
"case",
"catch",
"class",
"colvector",
"complex",
"const",
"continue",
"default",
"delegate",
"delete",
"do",
"double",
"else",
"eltypedef",
"end",
"enum",
"explicit",
"export",
"external",
"float",
"for",
"friend",
"function",
"global",
"goto",
"if",
"inline",
"int",
"local",
"long",
"NULL",
"pragma",
"protected",
"quad",
"rowvector",
"short",
"typedef",
"typename",
"virtual",
"_all",
"_N",
"_skip",
"_b",
"_pi",
"str#",
"in",
"_pred",
"strL",
"_coef",
"_rc",
"using",
"_cons",
"_se",
"with",
"_n",
)
class StataReader(StataParser, abc.Iterator):
__doc__ = _stata_reader_doc
def __init__(
self,
path_or_buf: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
):
super().__init__()
self.col_sizes: list[int] = []
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = ""
self._chunksize = chunksize
self._using_iterator = False
if self._chunksize is None:
self._chunksize = 1
elif not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer when set.")
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype: np.dtype | None = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
with get_handle(
path_or_buf,
"rb",
storage_options=storage_options,
is_text=False,
compression=compression,
) as handles:
# Copy to BytesIO, and ensure no encoding
# Argument 1 to "BytesIO" has incompatible type "Union[Any, bytes, None,
# str]"; expected "bytes"
self.path_or_buf = BytesIO(handles.handle.read()) # type: ignore[arg-type]
self._read_header()
self._setup_dtype()
def __enter__(self) -> StataReader:
"""enter context manager"""
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""exit context manager"""
self.close()
def close(self) -> None:
"""close the handle if its open"""
self.path_or_buf.close()
def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_header(self) -> None:
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
self._read_new_header()
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
def _read_new_header(self) -> None:
# The first part of the header is common to 117 - 119.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
nvar_type = "H" if self.format_version <= 118 else "I"
nvar_size = 2 if self.format_version <= 118 else 4
self.nvar = struct.unpack(
self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
)[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
self._seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
self._seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
def _get_dtypes(
self, seek_vartypes: int
) -> tuple[list[int | str], list[str | np.dtype]]:
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for _ in range(self.nvar)
]
def f(typ: int) -> int | str:
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError as err:
raise ValueError(f"cannot convert stata types [{typ}]") from err
typlist = [f(x) for x in raw_typlist]
def g(typ: int) -> str | np.dtype:
if typ <= 2045:
return str(typ)
try:
# error: Incompatible return value type (got "Type[number]", expected
# "Union[str, dtype]")
return self.DTYPE_MAP_XML[typ] # type: ignore[return-value]
except KeyError as err:
raise ValueError(f"cannot convert stata dtype [{typ}]") from err
dtyplist = [g(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self) -> list[str]:
# 33 in order formats, 129 in formats 118 and 119
b = 33 if self.format_version < 118 else 129
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self) -> list[str]:
if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the label list
def _get_lbllist(self) -> list[str]:
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
def _get_variable_labels(self) -> list[str]:
if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)
]
else:
vlblist = [
self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)
]
return vlblist
def _get_nobs(self) -> int:
if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._decode(self.path_or_buf.read(81))
else:
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._decode(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self) -> int:
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char: bytes) -> None:
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self._data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_types = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata types [{invalid_types}]") from err
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_dtypes = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err
if self.format_version > 108:
self.varlist = [
self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)
]
else:
self.varlist = [
self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP
dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtypes.append(("s" + str(i), "S" + str(typ)))
self._dtype = np.dtype(dtypes)
return self._dtype
def _calcsize(self, fmt: int | str) -> int:
if isinstance(fmt, int):
return fmt
return struct.calcsize(self.byteorder + fmt)
def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
encoding = self._encoding
msg = f"""
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(msg, UnicodeWarning)
return s.decode("latin-1")
def _read_value_labels(self) -> None:
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict: dict[str, dict[float | int, str]] = {}
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
assert self._dtype is not None
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = {}
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._decode(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
off = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
val = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = {}
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self) -> None:
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self.path_or_buf.read(3) != b"GSO":
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
# This path may not be correct, impossible to test
buf = buf[0:v_size] + buf[(4 + v_size) :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
decoded_va = va[0:-1].decode(self._encoding)
else:
# Stata says typ 129 can be binary, so use str
decoded_va = str(va)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = decoded_va
def __next__(self) -> DataFrame:
self._using_iterator = True
return self.read(nrows=self._chunksize)
def get_chunk(self, size: int | None = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(
self,
nrows: int | None = None,
convert_dates: bool | None = None,
convert_categoricals: bool | None = None,
index_col: str | None = None,
convert_missing: bool | None = None,
preserve_dtypes: bool | None = None,
columns: Sequence[str] | None = None,
order_categoricals: bool | None = None,
) -> DataFrame:
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
assert self._dtype is not None
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(
self.path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = | DataFrame.from_records(data) | pandas.core.frame.DataFrame.from_records |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
| Float64Index([1.0, np.nan]) | pandas.core.index.Float64Index |
import inspect
import json
import os
import re
from urllib.parse import quote
from urllib.request import urlopen
import pandas as pd
import param
from .configuration import DEFAULTS
class TutorialData(param.Parameterized):
label = param.String(allow_None=True)
raw = param.Boolean()
verbose = param.Boolean()
return_meta = param.Boolean()
use_cache = param.Boolean()
_source = None
_base_url = None
_data_url = None
_description = None
def __init__(self, **kwds):
super().__init__(**kwds)
self._cache_dir = DEFAULTS["cache_kwds"]["directory"]
self._remove_href = re.compile(r"<(a|/a).*?>")
os.makedirs(self._cache_dir, exist_ok=True)
self._init_owid()
@property
def _cache_path(self):
cache_file = f"{self.label}.pkl"
return os.path.join(self._cache_dir, cache_file)
@property
def _dataset_options(self):
options = set([])
for method in dir(self):
if method.startswith("_load_") and "owid" not in method:
options.add(method.replace("_load_", ""))
return list(options) + list(self._owid_labels_df.columns)
@staticmethod
def _specify_cache(cache_path, **kwds):
if kwds:
cache_ext = "_".join(
f"{key}={val}".replace(os.sep, "") for key, val in kwds.items()
)
cache_path = f"{os.path.splitext(cache_path)[0]}_{cache_ext}.pkl"
return cache_path
def _cache_dataset(self, df, cache_path=None, **kwds):
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
df.to_pickle(cache_path)
def _read_cache(self, cache_path=None, **kwds):
if not self.use_cache:
return None
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
try:
return pd.read_pickle(cache_path)
except Exception:
if os.path.exists(cache_path):
os.remove(cache_path)
return None
@staticmethod
def _snake_urlify(s):
# Replace all hyphens with underscore
s = s.replace(" - ", "_").replace("-", "_")
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", "", s)
# Replace all runs of whitespace with a underscore
s = re.sub(r"\s+", "_", s)
return s.lower()
def _init_owid(self):
cache_path = os.path.join(self._cache_dir, "owid_labels.pkl")
self._owid_labels_df = self._read_cache(cache_path=cache_path)
if self._owid_labels_df is not None:
return
owid_api_url = (
"https://api.github.com/"
"repos/owid/owid-datasets/"
"git/trees/master?recursive=1"
)
with urlopen(owid_api_url) as f:
sources = json.loads(f.read().decode("utf-8"))
owid_labels = {}
owid_raw_url = "https://raw.githubusercontent.com/owid/owid-datasets/master/"
for source_tree in sources["tree"]:
path = source_tree["path"]
if ".csv" not in path and ".json" not in path:
continue
label = "owid_" + self._snake_urlify(path.split("/")[-2].strip())
if label not in owid_labels:
owid_labels[label] = {}
url = f"{owid_raw_url}/{quote(path)}"
if ".csv" in path:
owid_labels[label]["data"] = url
elif ".json" in path:
owid_labels[label]["meta"] = url
self._owid_labels_df = pd.DataFrame(owid_labels)
self._cache_dataset(self._owid_labels_df, cache_path=cache_path)
def _load_owid(self, **kwds):
self._data_url = self._owid_labels_df[self.label]["data"]
meta_url = self._owid_labels_df[self.label]["meta"]
with urlopen(meta_url) as response:
meta = json.loads(response.read().decode())
self.label = meta["title"]
self._source = (
" & ".join(source["dataPublishedBy"] for source in meta["sources"])
+ " curated by Our World in Data (OWID)"
)
self._base_url = (
" & ".join(source["link"] for source in meta["sources"])
+ " through https://github.com/owid/owid-datasets"
)
self._description = re.sub(self._remove_href, "", meta["description"])
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = [self._snake_urlify(col) for col in df.columns]
return df
def _load_annual_co2(self, **kwds):
self._source = "NOAA ESRL"
self._base_url = "https://www.esrl.noaa.gov/"
self._data_url = (
"https://www.esrl.noaa.gov/"
"gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt"
)
self._description = (
"The carbon dioxide data on Mauna Loa constitute the longest record "
"of direct measurements of CO2 in the atmosphere. They were started "
"by <NAME> of the Scripps Institution of Oceanography in "
"March of 1958 at a facility of the National Oceanic and Atmospheric "
"Administration [Keeling, 1976]. NOAA started its own CO2 measurements "
"in May of 1974, and they have run in parallel with those made by "
"Scripps since then [Thoning, 1989]."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(
header=None,
comment="#",
sep="\s+", # noqa
names=["year", "co2_ppm", "uncertainty"],
)
base_kwds.update(kwds)
df = pd.read_csv(self._data_url, **base_kwds)
self._cache_dataset(df, **kwds)
return df
def _load_tc_tracks(self, **kwds):
self._source = "IBTrACS v04 - USA"
self._base_url = "https://www.ncdc.noaa.gov/ibtracs/"
self._data_url = (
"https://www.ncei.noaa.gov/data/"
"international-best-track-archive-for-climate-stewardship-ibtracs/"
"v04r00/access/csv/ibtracs.last3years.list.v04r00.csv"
)
self._description = (
"The intent of the IBTrACS project is to overcome data availability "
"issues. This was achieved by working directly with all the Regional "
"Specialized Meteorological Centers and other international centers "
"and individuals to create a global best track dataset, merging storm "
"information from multiple centers into one product and archiving "
"the data for public use."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(keep_default_na=False)
base_kwds.update(kwds)
df = | pd.read_csv(self._data_url, **base_kwds) | pandas.read_csv |
import ipyleaflet
import ipywidgets
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon, Point
import datetime
import requests
import xml.etree.ElementTree as ET
import calendar
import numpy as np
import pathlib
import os
class ANA_interactive_map:
def __init__(self, path_inventario):
self.df = pd.read_csv(path_inventario, engine='python', sep='\t', delimiter=';', parse_dates=['UltimaAtualizacao'])
self.df[['Latitude', 'Longitude']] = self.df[['Latitude', 'Longitude']].apply(lambda x: x.str.replace(',','.'))
self.df['Latitude'] = self.df['Latitude'].astype('float')
self.df['Longitude'] = self.df['Longitude'].astype('float')
self.gdf = gpd.GeoDataFrame(self.df, geometry=gpd.points_from_xy(self.df.Longitude, self.df.Latitude), crs='epsg:4674')
self.m01 = ipyleaflet.Map(zoom=2, center=(-16, -47))
self.layer()
self.controls_on_Map()
self.control_buttonDownload.on_click(self.download_buttom)
self.control_shapefileButtom.on_click(self.shapefile_buttom)
display(self.m01)
def date_location(self, *args):
self.heatmap_byLast.locations = [tuple(s) for s in self.df.loc[self.df['UltimaAtualizacao'] > self.date_slider.value, ['Latitude','Longitude']].to_numpy()]
def download_ANA_stations(self, list_codes, typeData, folder_toDownload):
numberOfcodes = len(list_codes)
count = 0
path_folder = pathlib.Path(folder_toDownload)
for station in list_codes:
params = {'codEstacao': station, 'dataInicio': '', 'dataFim': '', 'tipoDados': '{}'.format(typeData), 'nivelConsistencia': ''}
response = requests.get('http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica', params)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
list_data = []
list_consistenciaF = []
list_month_dates = []
for i in root.iter('SerieHistorica'):
codigo = i.find("EstacaoCodigo").text
consistencia = i.find("NivelConsistencia").text
date = i.find("DataHora").text
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
last_day = calendar.monthrange(date.year, date.month)[1]
month_dates = [date + datetime.timedelta(days=i) for i in range(last_day)]
data = []
list_consistencia = []
for day in range(last_day):
if params['tipoDados'] == '3':
value = 'Vazao{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(int(consistencia))
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(int(consistencia))
except AttributeError:
data.append(None)
list_consistencia.append(int(consistencia))
if params['tipoDados'] == '2':
value = 'Chuva{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(consistencia)
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(consistencia)
except AttributeError:
data.append(None)
list_consistencia.append(consistencia)
list_data = list_data + data
list_consistenciaF = list_consistenciaF + list_consistencia
list_month_dates = list_month_dates + month_dates
if len(list_data) > 0:
df = pd.DataFrame({'Date': list_month_dates, 'Consistence': list_consistenciaF, 'Data': list_data})
filename = '{}_{}.csv'.format(typeData, station)
df.to_csv(path_folder / filename)
count += 1
self.control_loadingDownload.value = float(count+1)/numberOfcodes
else:
count += 1
self.control_loadingDownload.value = float(count+1)/numberOfcodes
# pass
def download_buttom(self, *args):
try:
# last_draw = self.feature_collection['features'][-1]['geometry']
last_draw = self.control_draw.last_draw['geometry']
last_polygon = Polygon([(i[0], i[1]) for i in last_draw['coordinates'][0]])
except:
pass
if self.control_choiceDownload.value == 'Rain':
option = 2
if self.control_choiceDownload.value == 'Flow':
option = 3
if self.control_selectDownload.value == 'All':
code_list = self.gdf.loc[self.gdf['geometry'].within(last_polygon), 'Codigo'].to_list()
self.download_ANA_stations(list_codes=code_list, typeData=option, folder_toDownload=self.control_pathDownload.value)
elif self.control_selectDownload.value == 'byDate':
code_list = self.gdf.loc[(self.gdf['geometry'].within(last_polygon)) & (self.gdf['UltimaAtualizacao']>self.date_slider.value), 'Codigo'].to_list()
self.download_ANA_stations(list_codes=code_list, typeData=option, folder_toDownload=self.control_pathDownload.value)
elif self.control_selectDownload.value == 'Watershed':
for i in self.shape['geometry']:
code_list = self.gdf.loc[self.gdf['geometry'].within(i), 'Codigo'].to_list()
self.download_ANA_stations(list_codes=code_list, typeData=option, folder_toDownload=self.control_pathDownload.value)
def dropdown_shapefile(self, *args):
if self.control_selectDownload.value == 'Watershed':
self.control_shapefileText = ipywidgets.Text(placeholder='Insert Shapefile PATH HERE')
hbox_shape = ipywidgets.HBox([self.control_shapefileText, self.control_shapefileButtom])
widget_control04 = ipyleaflet.WidgetControl(widget=hbox_shape, position='bottomright')
self.m01.add_control(widget_control04)
else:
try:
self.control_shapefileText.close()
self.control_shapefileButtom.close()
self.m01.remove_control(widget_control04)
self.m01.remove_layer(self.geo_data)
except:
pass
def shapefile_buttom(self, *args):
if self.control_selectDownload.value == 'Watershed':
try:
self.shape = gpd.read_file(self.control_shapefileText.value)
self.geo_data = ipyleaflet.GeoData(geo_dataframe=self.shape, name='Bacias',style={'color': 'red', 'fillColor': '#c51b8a', 'opacity':0.05, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.6},
hover_style={'fillColor': 'red', 'fillOpacity': 0.2})
self.m01.add_layer(self.geo_data)
except:
pass
else:
try:
# self.m01.remove_layer(geo_data)
pass
except:
pass
def controls_on_Map(self):
control_layer = ipyleaflet.LayersControl(position='topright')
self.m01.add_control(control_layer)
control_fullscreen = ipyleaflet.FullScreenControl()
self.m01.add_control(control_fullscreen)
self.control_draw = ipyleaflet.DrawControl()
self.m01.add_control(self.control_draw)
control_scale = ipyleaflet.ScaleControl(position='bottomleft')
self.m01.add_control(control_scale)
slider_heatmap_radius = ipywidgets.IntSlider(description='Radius', min=1, max=50, value=15)
ipywidgets.jslink((slider_heatmap_radius, 'value'),(self.heatmap_all,'radius'))
widget_control01 = ipyleaflet.WidgetControl(widget=slider_heatmap_radius, position='bottomright')
self.m01.add_control(widget_control01)
self.date_slider = ipywidgets.SelectionSlider(options= | pd.date_range(start='2000-01-01',end='2020-01-01', freq='M') | pandas.date_range |
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
# Find the filepath of all .res NRIXS files
resfilepath_list = [filepath for filepath in glob.glob('*/*.res')]
# Consistency dictionary: For saving space in df
consist_dict = {'ok': 'O', 'acceptable': 'A', 'concerning': 'S'}
# Initialize df structures to store all values from phox .ptl files in
all_fitParam_df = pd.DataFrame()
all_fitQuality_df = pd.DataFrame()
all_valsFromData_df = pd.DataFrame()
all_valsFromRefData_df = pd.DataFrame()
all_valsFromPDOS_df = pd.DataFrame()
# resfilepath = resfilepath_list[27]
for resfilepath in resfilepath_list:
filepath = re.findall('([A-Za-z0-9/_]+/)[A-Za-z0-9_]+.res',resfilepath)[0]
filename = re.findall('/([A-Za-z0-9_]+).res',resfilepath)[0]
ptlfilepath = filepath+'Output/'+filename+'_phox_ptl.txt'
psthfilepath = filepath+'Output/'+filename+'_psth_ptl.txt'
folder = re.findall('([A-Za-z0-9/_]+)/',filepath)[0]
print(folder)
# Get date information from directory names
datetag = re.findall('([A-Za-z0-9]+)_',filepath)[0]
month = re.findall('[A-Za-z]+',datetag)[0]
year = re.findall('[0-9]+',datetag)[0]
# Initialize df structure to store values from phox .ptl file in
fitParam_df = pd.DataFrame({'Date': [month+' '+year], 'Folder': [folder], 'Index': [filename]})
fitQuality_df = | pd.DataFrame({'Date': [month+' '+year], 'Folder': [folder], 'Index': [filename]}) | pandas.DataFrame |
def test_get_number_rows_cols_for_fig():
from mspypeline.helpers import get_number_rows_cols_for_fig
assert get_number_rows_cols_for_fig([1, 1, 1, 1]) == (2, 2)
assert get_number_rows_cols_for_fig(4) == (2, 2)
def test_fill_dict():
from mspypeline.helpers import fill_dict
def test_default_to_regular():
from mspypeline.helpers import default_to_regular
from collections import defaultdict
d = defaultdict(int)
d["a"] += 1
assert isinstance(d, defaultdict)
d = default_to_regular(d)
assert isinstance(d, dict)
assert not isinstance(d, defaultdict)
def test_get_analysis_design():
from mspypeline.helpers import get_analysis_design
assert get_analysis_design(["A1_1", "A1_2", "A2_1", "A2_2"]) == {
'A1': {'1': 'A1_1', '2': 'A1_2'},
'A2': {'1': 'A2_1', '2': 'A2_2'}
}
assert get_analysis_design(["A_1_1"]) == {"A": {"1": {"1": "A_1_1"}}}
def test_plot_annotate_line():
from mspypeline.helpers import plot_annotate_line
def test_venn_names():
from mspypeline.helpers import venn_names
def test_install_r_dependencies():
from mspypeline.helpers.Utils import install_r_dependencies
def test_get_number_of_non_na_values():
from mspypeline.helpers import get_number_of_non_na_values as gna
assert gna(20) > gna(10) > gna(5) > gna(3)
assert gna(3) == gna(2) and gna(3) == gna(1)
def test_get_intersection_and_unique():
from mspypeline.helpers import get_intersection_and_unique
import pandas as pd
df1 = | pd.DataFrame() | pandas.DataFrame |
# This script helps to create TAble 1 (phenotypes per country)
import pandas as pd
from scipy.stats import chi2_contingency
import matplotlib.pyplot as plt
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
df = | pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t') | pandas.read_csv |
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pymaid
from pymaid_creds import url, name, password, token
# convert pair-sorted brain/sensories matrix to binary matrix based on synapse threshold
matrix_ad = pd.read_csv('data/axon-dendrite.csv', header=0, index_col=0)
matrix_dd = pd.read_csv('data/dendrite-dendrite.csv', header=0, index_col=0)
matrix_aa = pd.read_csv('data/axon-axon.csv', header=0, index_col=0)
matrix_da = pd.read_csv('data/dendrite-axon.csv', header=0, index_col=0)
# the columns are string by default and the indices int; now both are int
matrix_ad.columns = | pd.to_numeric(matrix_ad.columns) | pandas.to_numeric |
"""
Classes for comparing outputs of two RSMTool experiments.
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:organization: ETS
"""
import warnings
from collections import defaultdict
from copy import deepcopy
from os.path import exists, join
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from .reader import DataReader
from .utils.files import get_output_directory_extension
_df_eval_columns_existing_raw = ["N",
"h_mean",
"h_sd",
"sys_mean.raw_trim",
"sys_sd.raw_trim",
"corr.raw_trim",
"SMD.raw_trim",
"sys_mean.raw_trim_round",
"sys_sd.raw_trim_round",
"exact_agr.raw_trim_round",
"kappa.raw_trim_round",
"wtkappa.raw_trim",
"adj_agr.raw_trim_round",
"SMD.raw_trim_round",
"R2.raw_trim",
"RMSE.raw_trim"]
_df_eval_columns_existing_scale = ["N",
"h_mean",
"h_sd",
"sys_mean.scale_trim",
"sys_sd.scale_trim",
"corr.scale_trim",
"SMD.scale_trim",
"sys_mean.scale_trim_round",
"sys_sd.scale_trim_round",
"exact_agr.scale_trim_round",
"kappa.scale_trim_round",
"wtkappa.scale_trim",
"adj_agr.scale_trim_round",
"SMD.scale_trim_round",
"R2.scale_trim",
"RMSE.scale_trim"]
_df_eval_columns_renamed = ["N",
"H1 mean",
"H1 SD",
"score mean(b)",
"score SD(b)",
"Pearson(b)",
"SMD(b)",
"score mean(br)",
"score SD(br)",
"Agmt.(br)",
"K(br)",
"QWK(b)",
"Adj. Agmt.(br)",
"SMD(br)",
"R2(b)",
"RMSE(b)"]
raw_rename_dict = dict(zip(_df_eval_columns_existing_raw,
_df_eval_columns_renamed))
scale_rename_dict = dict(zip(_df_eval_columns_existing_scale,
_df_eval_columns_renamed))
class Comparer:
"""Class to perform comparisons between two RSMTool experiments."""
@staticmethod
def _modify_eval_columns_to_ensure_version_compatibilty(df,
rename_dict,
existing_eval_cols,
short_metrics_list,
raise_warnings=True):
"""
Ensure that column names in eval data frames are backwards compatible.
This helper method ensures that the column names for eval data frames
are forward and backward compatible. There are two major changes in
RSMTool (7.0 or greater) that necessitate this: (1) for subgroup
calculations, 'DSM' is now used instead of 'SMD', and (2) QWK
statistics are now calculated on un-rounded scores. Thus, we need to
check these columns to see which sets of names are being used.
Parameters
----------
df : pandas Data Frame
The evaluation data frame.
rename_dict : dict
The rename dictionary.
existing_eval_cols : list
The existing evaluation columns.
short_metrics_list : list
The list of columns for the short metrics file.
raise_warnings : bool, optional
Whether to raise warnings.
Defaults to ``True``.
Returns
-------
rename_dict_new : dict
The updated rename dictionary.
existing_eval_cols_new : list
The updated existing evaluation columns
short_metrics_list_new : list
The updated list of columns for the short metrics file.
smd_name : str
The SMD column name (either 'SMD' or 'DSM')
"""
rename_dict_new = deepcopy(rename_dict)
existing_eval_cols_new = deepcopy(existing_eval_cols)
short_metrics_list_new = deepcopy(short_metrics_list)
smd_name = 'SMD'
# previously, the QWK metric used `trim_round` scores; now, we use just `trim` scores;
# We check whether `trim_round` scores were used and
# raise an error if this is the case
if any(col.endswith('trim_round') for col in df if col.startswith('wtkappa')):
raise ValueError("RSMTool (7.0 or greater) uses "
"unrounded scores for weighted kappa calculations. At least one "
"of your experiments was run "
"using an older version of RSMTool that used "
"rounded scores instead. Please re-run "
"the experiment with the latest version of RSMTool.")
# we check if DSM was calculated (which is what we expect for subgroup evals),
# and if so, we update the rename dictionary and column lists accordingly; we
# also set `smd_name` equal to 'DSM'
if any(col.startswith('DSM') for col in df):
smd_name = 'DSM'
existing_eval_cols_new = [col.replace('SMD', 'DSM')
for col in existing_eval_cols_new]
rename_dict_new = {key.replace('SMD', 'DSM'): val.replace('SMD', 'DSM')
for key, val in rename_dict_new.items()}
return (rename_dict_new,
existing_eval_cols_new,
short_metrics_list_new,
smd_name)
@staticmethod
def make_summary_stat_df(df):
"""
Compute summary statistics for the data in the given frame.
Parameters
----------
df : pandas DataFrame
Data frame containing numeric data.
Returns
-------
res : pandas DataFrame
Data frame containing summary statistics for data
in the input frame.
"""
series = []
for summary_func in [np.mean, np.std, np.median, np.min, np.max]:
# apply function, but catch and ignore warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
series.append(df.apply(summary_func))
res = pd.concat(series, axis=1, sort=True)
res.columns = ['MEAN', 'SD', 'MEDIAN', 'MIN', 'MAX']
return res
@staticmethod
def compute_correlations_between_versions(df_old,
df_new,
human_score='sc1',
id_column='spkitemid'):
"""
Compute correlations between old and new feature values.
This method computes correlations between old and new feature values
in the two given frames as well as the correlations between each
feature value and the human score.
Parameters
----------
df_old : pandas DataFrame
Data frame with feature values for the 'old' model.
df_new : pandas DataFrame
Data frame with feature values for the 'new' model.
human_score : str, optional
Name of the column containing human score. Defaults to "sc1".
Must be the same for both data sets.
id_column : str, optional
Name of the column containing id for each response. Defaults to
"spkitemid". Must be the same for both data sets.
Returns
-------
df_correlations: pandas DataFrame
Data frame with a row for each feature and the following columns:
- "N": total number of responses
- "human_old": correlation with human score in the old frame
- "human_new": correlation with human score in the new frame
- "old_new": correlation between old and new frames
Raises
------
ValueError
If there are no shared features between the two sets.
ValueError
If there are no shared responses between the two sets.
"""
# Only use features that appear in both datasets
features_old = [column for column in df_old
if column not in [id_column, human_score]]
features_new = [column for column in df_new
if column not in [id_column, human_score]]
features = list(set(features_old).intersection(features_new))
if len(features) == 0:
raise ValueError("There are no matching features "
"in these two data sets.")
columns = features + [id_column, human_score]
# merge the two data sets and display a warning
# if there are non-matching ids
df_merged = pd.merge(df_old[columns],
df_new[columns],
on=[id_column],
suffixes=['%%%old', '%%%new'])
if len(df_merged) == 0:
raise ValueError("There are no shared ids between these two datasets.")
if len(df_merged) != len(df_old):
warnings.warn("Some responses from the old data "
"were not present in the new data and therefore "
"were excluded from the analysis.")
if len(df_merged) != len(df_new):
warnings.warn("Some responses from the new data "
"were not present in the old data and therefore "
"were excluded from the analysis.")
# compute correlations between each feature and human score.
# we are using the same approach as used in analysis.py
correlation_list = []
for feature in features:
# compute correlations
df_cor = pd.DataFrame({'Feature': [feature],
'N': len(df_merged),
'human_old': pearsonr(df_merged['{}%%%old'.format(human_score)],
df_merged['{}%%%old'.format(feature)])[0],
'human_new': pearsonr(df_merged['{}%%%new'.format(human_score)],
df_merged['{}%%%new'.format(feature)])[0],
'old_new': pearsonr(df_merged['{}%%%new'.format(feature)],
df_merged['{}%%%old'.format(feature)])[0]})
correlation_list.append(df_cor)
df_correlations = | pd.concat(correlation_list, sort=True) | pandas.concat |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
| Timestamp("2012-11-11 00:00:00+01:00") | pandas.Timestamp |
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import os
from os.path import join
import json
import pandas as pd
import subprocess
USER_FIELDS = [
"created_at",
#"description",
#"entities",
"id",
"location",
"name",
#"pinned_tweet_id",
#"profile_image_url",
"protected",
"public_metrics",
#"url",
"username",
"verified",
#"withheld",
]
TWEET_FIELDS = [
#"attachments",
"author_id",
#"context_annotations",
"conversation_id",
"created_at",
#"entities",
#"geo",
"id",
#"in_reply_to_user_id",
"lang",
#"public_metrics",
"text",
#"possibly_sensitive",
"referenced_tweets",
#"reply_settings",
"source",
#"withheld",
]
EXPANSIONS = ["author_id"]
DTYPES = {
"id": str,
"conversation_id":str,
"author_id":str,
#"created_at":str,
#"retrieved_at":str,
"source":str,
"lang":str,
"text":str,
"reference_type":str,
"referenced_tweet_id":str,
#"author.created_at":str,
"author.location":str,
"author.name":str,
"author.username":str,
"author.verified":str,
"author.protected":str,
"author.public_metrics.followers_count":float,
"author.public_metrics.following_count":float,
"author.public_metrics.tweet_count":float,
"author.public_metrics.listed_count":float}
AUTHOR_COLS = [
"author_id", "lang", "author.created_at", "author.location",
"author.name", "author.username", "author.verified",
"author.protected", "author.public_metrics.followers_count",
"author.public_metrics.following_count",
"author.public_metrics.tweet_count",
"author.public_metrics.listed_count"]
def get_twitter_API_credentials(filename="twitter_API_jana.txt", keydst="twitter_API_keys"):
'''
Returns the bearer tokens to access the Twitter v2 API for a list of users.
'''
credentials = {}
with open(join(keydst, filename), 'r') as f:
for l in f:
if l.startswith("bearer_token"):
credentials[l.split('=')[0]] = l.split('=')[1].strip('\n')
return credentials
def notify(subject, body, credential_src=os.getcwd(),
credential_fname="email_credentials.txt"):
'''
Writes an email with the given subject and body from a mailserver specified
in the email_credentials.txt file at the specified location. The email
address to send the email to is also specified in the credentials file.
'''
email_credentials = {}
with open(join(credential_src, credential_fname), "r") as f:
for line in f.readlines():
line = line.strip("\n")
email_credentials[line.split("=")[0]] = line.split("=")[1]
fromaddr = email_credentials["fromaddr"]
toaddr = email_credentials["toaddr"]
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(
email_credentials["server"],
int(email_credentials["port"])
)
server.ehlo()
server.starttls()
server.ehlo()
server.login(email_credentials["user"], email_credentials["password"])
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
def dump_tweets(tweets, t1, t2, dst, uid, gid):
'''Save a list of tweets as binary line-separated json'''
daydirname = "{}-{:02d}-{:02d}".format(t1.year, t1.month, t1.day)
hourdirname = "{:02d}".format(t1.hour)
if not os.path.exists(join(dst, daydirname)):
os.mkdir(join(dst, daydirname))
os.chown(join(dst, daydirname), uid, gid)
if not os.path.exists(join(dst, daydirname, hourdirname)):
os.mkdir(join(dst, daydirname, hourdirname))
os.chown(join(dst, daydirname, hourdirname), uid, gid)
datetime1 = "{}-{:02d}-{:02d}_{:02d}:{:02d}:{:02d}"\
.format(t1.year, t1.month, t1.day, t1.hour, t1.minute, t1.second)
datetime2 = "{}-{:02d}-{:02d}_{:02d}:{:02d}:{:02d}"\
.format(t2.year, t2.month, t2.day, t2.hour, t2.minute, t2.second)
fname = f"sampled_stream_{datetime1}_to_{datetime2}.jsonl"
with open(join(dst, daydirname, hourdirname, fname), 'wb') as f:
for tweet in tweets:
json_str = json.dumps(tweet) + "\n"
json_bytes = json_str.encode('utf-8')
f.write(json_bytes)
os.chown(join(dst, daydirname, hourdirname, fname), uid, gid)
def classify_users(t1, t2, dst, m3params):
daydirname = "{}-{:02d}-{:02d}".format(t1.year, t1.month, t1.day)
hourdirname = "{:02d}".format(t1.hour)
datetime1 = "{}-{:02d}-{:02d}_{:02d}:{:02d}:{:02d}"\
.format(t1.year, t1.month, t1.day, t1.hour, t1.minute, t1.second)
datetime2 = "{}-{:02d}-{:02d}_{:02d}:{:02d}:{:02d}"\
.format(t2.year, t2.month, t2.day, t2.hour, t2.minute, t2.second)
fname = f"sampled_stream_{datetime1}_to_{datetime2}.jsonl"
scriptname = "run_m3_classification.sh"
scriptpath = m3params["scriptpath"]
m3path = m3params["m3path"]
keyfile = m3params["keyfile"]
cachepath = m3params["cachepath"]
# start an asynchronous sub-process to extract user IDs from the tweet json
# and run the m3 classifier over the user IDs
subprocess.Popen([f"{join(scriptpath, scriptname)} {dst} {daydirname} {hourdirname} {fname} {m3path} {keyfile} {cachepath}"], shell=True)
def get_hour_files(hour_dst):
all_hour_files = os.listdir(hour_dst)
hour_files = [f for f in all_hour_files if f.endswith(".csv")]
if len(all_hour_files) != len(hour_files):
print(f"too many files in {hour_dst}")
hour_tweets = pd.DataFrame()
for f in hour_files:
tmp = pd.read_csv(
join(hour_dst, f),
#error_bad_lines=False,
dtype=DTYPES,
parse_dates=["created_at", "retrieved_at", "author.created_at"]
)
hour_tweets = | pd.concat([hour_tweets, tmp]) | pandas.concat |
### Filename = test_sfw_categorical_autotest_aggregated_functions.py
## ##
# #
# THIS TEST WAS AUTOGENERATED BY generator_categorical_unit_test.py #
# #
## ##
import pandas as pd
import unittest
import riptable as rt
from .groupby_categorical_unit_test_parameters import *
default_base_index = 0
class categorical_test(unittest.TestCase):
def test_aggs_sum_symb_0_10_ncols_1(self):
test_class = categorical_base(1, 0.10, "sum")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.sum(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).sum()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_mean_symb_0_10_ncols_2(self):
test_class = categorical_base(2, 0.10, "mean")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.mean(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).mean()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_median_symb_0_10_ncols_3(self):
test_class = categorical_base(3, 0.10, "median")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.median(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).median()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_min_symb_0_10_ncols_4(self):
test_class = categorical_base(4, 0.10, "min")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.min(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).min()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_max_symb_0_10_ncols_5(self):
test_class = categorical_base(5, 0.10, "max")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.max(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).max()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_var_symb_0_10_ncols_6(self):
test_class = categorical_base(6, 0.10, "var")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.var(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).var()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_sum_symb_0_10_ncols_7(self):
test_class = categorical_base(7, 0.10, "sum")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.sum(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).sum()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_mean_symb_0_25_ncols_1(self):
test_class = categorical_base(1, 0.25, "mean")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.mean(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).mean()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_median_symb_0_25_ncols_2(self):
test_class = categorical_base(2, 0.25, "median")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.median(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).median()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_min_symb_0_25_ncols_3(self):
test_class = categorical_base(3, 0.25, "min")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.min(rt.Dataset(test_class.data))
gb = pd.DataFrame(test_class.data)
gb = gb.groupby(test_class.bin_ids).min()
for k, v in test_class.data.items():
safe_assert(remove_nan(gb[k]), remove_nan(cat[k]))
def test_aggs_max_symb_0_25_ncols_4(self):
test_class = categorical_base(4, 0.25, "max")
cat = rt.Categorical(
values=test_class.bin_ids,
categories=test_class.keys,
base_index=default_base_index,
)
cat = cat.max(rt.Dataset(test_class.data))
gb = | pd.DataFrame(test_class.data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
from scipy.signal import butter, lfilter
from scipy.signal import freqs
def exponential_smooth(data, smooth_fac):
"""
:param data(np.array)
:param smooth_fac(int): span_interval
:return:
"""
ser = pd.Series(data)
return ser.ewm(span=smooth_fac).mean()
def butter_lowpass(cutOff, fs, order=5):
nyq = 0.5 * fs
normalCutoff = cutOff / nyq
b, a = butter(order, normalCutoff, btype='low', analog=True)
return b, a
def butter_lowpass_filter(data, cutOff, fs, order=4):
b, a = butter_lowpass(cutOff, fs, order=order)
y = lfilter(b, a, data)
return y
def draw_frequency_response(b, a, fs):
w, h = freqz(b, a, worN=8000)
plt.subplot(2, 1, 1)
plt.plot(0.5 * fs * w / np.pi, np.abs(h), 'b')
plt.plot(cutoff, 0.5 * np.sqrt(2), 'ko')
plt.axvline(cutoff, color='k')
plt.xlim(0, 0.5 * fs)
plt.title("Lowpass Filter Frequency Response")
plt.xlabel('Frequency [Hz]')
plt.grid()
plt.show()
parser = argparse.ArgumentParser()
parser.add_argument('--file', nargs=2, help='filename of the rppg and biopac data')
parser.add_argument('-se', '--smooth_exp', default=False, action='store_true', help='True if the rppg data is smoothed')
parser.add_argument('-sb', '--smooth_butter', default=False, action='store_true')
parser.add_argument('--smooth_factor', default=None, type=int, help='Smoooth Factor (span of ewm)')
parser.add_argument('--savefig', default=False, action='store_true', help="True to save figure")
args = parser.parse_args()
rppg_pulse = | pd.read_csv(args.file[0]) | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
from pandas.compat import lrange, range
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
def test_get():
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan():
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
| Series([2, np.nan], index=idx) | pandas.Series |
import pandas as pd
import numpy as np
from datetime import datetime
def transformData(RideWaits):
RideWaits["RideId"] = pd.Categorical(RideWaits["RideId"])
#RideWaits["Status"] = pd.Categorical(RideWaits["Status"])
RideWaits["ParkId"] = pd.Categorical(RideWaits["ParkId"])
RideWaits["Tier"] = pd.Categorical(RideWaits["Tier"])
RideWaits["ParkName"] = pd.Categorical(RideWaits["ParkName"])
RideWaits["IntellectualProp"] = pd.Categorical(RideWaits["IntellectualProp"])
#want to create some more intersting columns:
#- is it close to a major event?(Anniversary/Christmas/Thanksgiving/Halloween)
RideWaits["Date"] = pd.to_datetime(RideWaits["Date"], infer_datetime_format = True)
RideWaits["OpeningDate"] = pd.to_datetime(RideWaits["OpeningDate"], infer_datetime_format = True)
RideWaits["Time"] = pd.to_datetime(RideWaits["Time"], format = '%H:%M').dt.time
RideWaits["ParkOpen"] = pd.to_datetime(RideWaits["ParkOpen"], format = '%I:%M %p').dt.strftime('%H:%M')
RideWaits["ParkOpen"] = pd.to_datetime(RideWaits["ParkOpen"], format = '%H:%M').dt.time
RideWaits["ParkClose"] = pd.to_datetime(RideWaits["ParkClose"], format = '%I:%M %p').dt.strftime('%H:%M')
RideWaits["ParkClose"] = pd.to_datetime(RideWaits["ParkClose"], format = '%H:%M').dt.time
RideWaits["DayOfWeek"] = [datetime.weekday(x) for x in RideWaits["Date"]]
RideWaits["EMHOpen"] = pd.to_datetime(RideWaits["EMHOpen"], format = '%I:%M %p', errors = 'coerce').dt.strftime('%H:%M')
RideWaits["EMHClose"] = pd.to_datetime(RideWaits["EMHClose"], format = '%I:%M %p', errors = 'coerce').dt.strftime('%H:%M')
RideWaits["EMHOpen"] = pd.to_datetime(RideWaits["EMHOpen"], format = '%H:%M', errors = 'coerce').dt.time
RideWaits["EMHClose"] = pd.to_datetime(RideWaits["EMHClose"], format = '%H:%M', errors = 'coerce').dt.time
RideWaits["Weekend"] = [0 if x == 0 or x == 1 or x ==2 or x==3 or x==4 else 1 for x in RideWaits["DayOfWeek"]]
RideWaits["Weekend"].value_counts()
RideWaits["CharacterExperience"] = [1 if "Meet" in x else 0 for x in RideWaits["Name"]]
validTime = []
inEMH = []
emhDay = []
timeSinceStart = []
timeSinceMidDay = []
magicHourType = []
timeSinceOpenMinutes = []
for index, row in RideWaits.iterrows():
#print(row)
tempTime = datetime.now()
cTime = row["Time"]
pOpen = row["ParkOpen"]
pClose = row["ParkClose"]
currentParkTime = tempTime.replace(hour = cTime.hour, minute = cTime.minute, second = 0, microsecond = 0)
parkOpen = tempTime.replace(hour = pOpen.hour, minute = pOpen.minute, second = 0, microsecond = 0)
parkClose = tempTime.replace(hour = pClose.hour, minute = pClose.minute, second = 0, microsecond = 0)
if parkClose < parkOpen:
parkClose = parkClose.replace(day = parkClose.day + 1)
#setup extra magic hours if there are any
if (pd.notnull(row["EMHOpen"])) & (pd.notnull(row["EMHClose"])):
eOpen = row["EMHOpen"]
#print(eOpen)
eClose = row["EMHClose"]
#print(eClose)
emhOpen = tempTime.replace(hour = eOpen.hour, minute = eOpen.minute, second = 0, microsecond = 0)
emhClose = tempTime.replace(hour = eClose.hour, minute = eClose.minute, second = 0, microsecond = 0)
if emhClose < emhOpen:
emhClose = emhClose.replace(day = emhClose.day + 1)
emh = "ok"
emhDay.append(1)
if emhClose.hour == parkOpen.hour:
magicHourType.append("Morning")
else:
magicHourType.append("Night")
else:
emh = "none"
emhDay.append(0)
magicHourType.append("None")
#setup special ticketed event? is this necessary so we can predict waits during the christmas party
#print(emh)
if (currentParkTime < parkClose) & (currentParkTime >= parkOpen):
#print("Current Time is: " + str(currentParkTime) + " and ParkHours are "+ str(parkOpen) +" to " + str(parkClose) + " " +str(validtime))
tSinceOpen = currentParkTime.hour - parkOpen.hour
tSinceOpenMinutes = currentParkTime - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
if currentParkTime.hour < parkOpen.hour:
tSinceOpen = currentParkTime.hour + 24 - parkOpen.hour
tSinceOpenMinutes = currentParkTime.replace(day = currentParkTime.day + 1) - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14 + 24)
validTime.append(1)
inEMH.append(0)
else:
if (emh == "ok"):
if (currentParkTime < emhClose) & (currentParkTime >= emhOpen):
validTime.append(1)
inEMH.append(1)
if (emhClose.hour == parkOpen.hour):
tSinceOpen = currentParkTime.hour - emhOpen.hour
tSinceOpenMinutes = currentParkTime - emhOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
else:
if currentParkTime.hour < parkOpen.hour:
tSinceOpen = currentParkTime.hour + 24 - parkOpen.hour
tSinceOpenMinutes = currentParkTime.replace(day = currentParkTime.day + 1) - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14 + 24)
else:
tSinceOpen = currentParkTime.hour - parkOpen.hour
tSinceOpenMinutes = currentParkTime - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
else:
validTime.append(0)
inEMH.append(0)
tSinceOpen = 0
tSinceMidDay = 0
tSinceOpenMinutes = 0
else:
validTime.append(0)
inEMH.append(0)
tSinceOpen = 0
tSinceMidDay = 0
tSinceOpenMinutes = 0
timeSinceStart.append(tSinceOpen)
timeSinceMidDay.append(tSinceMidDay)
timeSinceOpenMinutes.append(tSinceOpenMinutes)
RideWaits["inEMH"] = inEMH
RideWaits["validTime"] = validTime
RideWaits["EMHDay"] = emhDay
RideWaits["TimeSinceOpen"] = timeSinceStart
RideWaits["TimeSinceMidday"] = timeSinceMidDay
RideWaits["MagicHourType"] = magicHourType
RideWaits["MinutesSinceOpen"] = [x.total_seconds()/60 if x is not 0 else None for x in timeSinceOpenMinutes]
#RideWaits["SimpleStatus"] = pd.Categorical(RideWaits["SimpleStatus"])
RideWaits = RideWaits[RideWaits["validTime"] == 1]
RideWaits["Month"] = RideWaits["Date"].dt.month
RideWaits["TimeSinceRideOpen"] = (RideWaits["Date"] - RideWaits["OpeningDate"]).dt.days
#RideWaits["Month"] = RideWaits["Date"].dt.month
RideWaits["MagicHourType"] = | pd.Categorical(RideWaits["MagicHourType"]) | pandas.Categorical |
import json
from types import SimpleNamespace
import pandas as pd
from hana_ml.dataframe import ConnectionContext
from hana_ml.model_storage import ModelStorage
from typing import List
from hana_automl.algorithms.base_algo import BaseAlgorithm
from hana_automl.algorithms.ensembles.blendcls import BlendingCls
from hana_automl.algorithms.ensembles.blendreg import BlendingReg
from hana_automl.automl import AutoML
from hana_automl.pipeline.modelres import ModelBoard
from hana_automl.preprocess.preprocessor import Preprocessor
from hana_automl.preprocess.settings import PreprocessorSettings
from hana_automl.utils.error import StorageError
PREPROCESSORS = "AUTOML_PREPROCESSOR_STORAGE"
ensemble_prefix = "ensemble"
leaderboard_prefix = "leaderboard"
class Storage(ModelStorage):
"""Storage for models and more.
Attributes
----------
connection_context: hana_ml.dataframe.ConnectionContext
Connection info for HANA database.
schema : str
Database schema.
Examples
--------
>>> from hana_automl.storage import Storage
>>> from hana_ml import ConnectionContext
>>> cc = ConnectionContext('address', 39015, 'user', 'password')
>>> storage = Storage(cc, 'your schema')
"""
def __init__(self, connection_context: ConnectionContext, schema: str):
super().__init__(connection_context, schema)
self.cursor = connection_context.connection.cursor()
self.create_prep_table = (
f"CREATE TABLE {self.schema}.{PREPROCESSORS} "
f"(MODEL NVARCHAR(256), VERSION INT, "
f"JSON NVARCHAR(5000), TRAIN_ACC DOUBLE, VALID_ACC DOUBLE, ALGORITHM NVARCHAR(256), METRIC NVARCHAR(256));"
)
if not table_exists(self.cursor, self.schema, PREPROCESSORS):
self.cursor.execute(self.create_prep_table)
preprocessor = Preprocessor()
self.cls_dict = preprocessor.clsdict
self.reg_dict = preprocessor.regdict
def save_model(self, automl: AutoML, if_exists="upgrade"):
"""
Saves a model to database.
Parameters
----------
automl: AutoML
The model.
if_exists: str
Defaults to "upgrade". Not recommended to change.
Note
----
If you have ensemble enabled in AutoML model, method will determine it automatically and split
ensemble model in multiple usual models.
Examples
--------
>>> from hana_automl.automl import AutoML
>>> automl.fit(df='table in HANA', target='some target', steps=3)
>>> automl.model.name = "new model"
>>> storage.save_model(automl)
"""
if not table_exists(self.cursor, self.schema, PREPROCESSORS):
self.cursor.execute(self.create_prep_table)
if isinstance(automl.model, BlendingCls) or isinstance(
automl.model, BlendingReg
):
if automl.model.name is None:
raise StorageError(
"Name your ensemble! Set name via automl.model.name='model name'"
)
if isinstance(automl.model, BlendingCls):
ensemble_name = "_ensemble_cls_"
if isinstance(automl.model, BlendingReg):
ensemble_name = "_ensemble_reg_"
model_counter = 1
for model in automl.model.model_list: # type: ModelBoard
if automl.model.name is None or automl.model.name == "":
raise StorageError("Please give your model a name.")
name = automl.model.name + ensemble_name + str(model_counter)
model.algorithm.model.name = name
json_settings = json.dumps(model.preprocessor.__dict__)
if self.model_already_exists(name, model.algorithm.model.version):
self.cursor.execute(
f"UPDATE {self.schema}.{PREPROCESSORS} SET "
f"VERSION={model.algorithm.model.version}, "
f"JSON='{str(json_settings)}' "
f"TRAIN_ACC={model.train_score} "
f"VALID_ACC={model.valid_score} "
f"ALGORITHM='{model.algorithm.title}'"
f"METRIC='{automl.leaderboard_metric}' "
f"WHERE MODEL='{name}';"
)
else:
self.cursor.execute(
f"INSERT INTO {self.schema}.{PREPROCESSORS} "
f"(MODEL, VERSION, JSON, TRAIN_ACC, VALID_ACC, ALGORITHM, METRIC) "
f"VALUES "
f"('{name}', {model.algorithm.model.version}, '{str(json_settings)}', {model.train_score}, {model.valid_score}, '{model.algorithm.title}', '{automl.leaderboard_metric}'); "
)
super().save_model(
model.algorithm.model, if_exists="replace"
) # to avoid duplicates
model_counter += 1
else:
if automl.model.name is None or automl.model.name == "":
raise StorageError("Please name your model! automl.model.name='model name'")
if table_exists(self.cursor, self.schema, "HANAML_MODEL_STORAGE"):
if len(self.__find_models(automl.model.name, ensemble_prefix)) > 0:
raise StorageError(
"There is an ensemble with the same name in storage. Please change the name of "
"the "
"model."
)
super().save_model(automl.model, if_exists)
json_settings = json.dumps(automl.preprocessor_settings.__dict__)
self.cursor.execute(
f"INSERT INTO {PREPROCESSORS} (MODEL, VERSION, JSON, TRAIN_ACC, VALID_ACC, ALGORITHM, METRIC) "
f"VALUES ('{automl.model.name}', {automl.model.version}, '{json_settings}', "
f"{automl.leaderboard[0].train_score}, {automl.leaderboard[0].valid_score}, '{automl.algorithm.title}', '{automl.leaderboard_metric}'); "
)
def list_preprocessors(self, name: str = None) -> pd.DataFrame:
"""
Show preprocessors for models in database.
Parameters
----------
name: str, optional
Model name.
Returns
-------
res: pd.DataFrame
DataFrame containing all preprocessors in database.
Note
----
Do not delete or save preprocessors apart from model!
They are saved/deleted/changed automatically WITH model.
Examples
--------
>>> storage.list_preprocessors()
MODEL VERSION JSON
1. test 1 {'tuned_num'...}
"""
if (name is not None) and name != "":
ensembles = self.__find_models(name, ensemble_prefix)
if len(ensembles) > 0:
result = pd.DataFrame(
columns=[
"MODEL",
"VERSION",
"JSON",
"TRAIN_ACC",
"VALID_ACC",
"ALGORITHM",
"METRIC"
]
)
for model in ensembles:
self.cursor.execute(
f"SELECT * FROM {self.schema}.{PREPROCESSORS} WHERE MODEL='{model[0]}';"
)
res = self.cursor.fetchall()
col_names = [i[0] for i in self.cursor.description]
df = pd.DataFrame(res, columns=col_names)
result = result.append(df, ignore_index=True)
return result
else:
self.cursor.execute(
f"SELECT * FROM {self.schema}.{PREPROCESSORS} WHERE MODEL='{name}';"
)
res = self.cursor.fetchall()
col_names = [i[0] for i in self.cursor.description]
return | pd.DataFrame(res, columns=col_names) | pandas.DataFrame |
import pandas as pd
import numpy as np
import psycopg2
from sklearn.model_selection import KFold
import Constants
import sys
from pathlib import Path
output_folder = Path(sys.argv[1])
output_folder.mkdir(parents=True, exist_ok=True)
# update database credentials if MIMIC data stored in postgres database
conn = psycopg2.connect(
"dbname=mimic user=darius host='/var/run/postgresql' password=password")
pats = pd.read_sql_query('''
select subject_id, gender, dob, dod from public.patients
''', conn)
n_splits = 12
pats = pats.sample(frac=1, random_state=42).reset_index(drop=True)
kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
for c, i in enumerate(kf.split(pats, groups=pats.gender)):
pats.loc[i[1], 'fold'] = str(c)
adm = pd.read_sql_query('''
select subject_id, hadm_id, insurance, language,
religion, ethnicity,
admittime, deathtime, dischtime,
HOSPITAL_EXPIRE_FLAG, DISCHARGE_LOCATION,
diagnosis as adm_diag
from public.admissions
''', conn)
df = pd.merge(pats, adm, on='subject_id', how='inner')
def merge_death(row):
if not(pd.isnull(row.deathtime)):
return row.deathtime
else:
return row.dod
df['dod_merged'] = df.apply(merge_death, axis=1)
notes = pd.read_sql_query('''
select category, chartdate, charttime, hadm_id, row_id as note_id, text from public.noteevents
where iserror is null
''', conn)
# drop all outpatients. They only have a subject_id, so can't link back to insurance or other fields
notes = notes[~(pd.isnull(notes['hadm_id']))]
df = pd.merge(left=notes, right=df, on='hadm_id', how='left')
df.ethnicity.fillna(value='UNKNOWN/NOT SPECIFIED', inplace=True)
others_set = set()
def cleanField(string):
mappings = {'HISPANIC OR LATINO': 'HISPANIC/LATINO',
'BLACK/AFRICAN AMERICAN': 'BLACK',
'UNABLE TO OBTAIN': 'UNKNOWN/NOT SPECIFIED',
'PATIENT DECLINED TO ANSWER': 'UNKNOWN/NOT SPECIFIED'}
bases = ['WHITE', 'UNKNOWN/NOT SPECIFIED', 'BLACK', 'HISPANIC/LATINO',
'OTHER', 'ASIAN']
if string in bases:
return string
elif string in mappings:
return mappings[string]
else:
for i in bases:
if i in string:
return i
others_set.add(string)
return 'OTHER'
df['ethnicity_to_use'] = df['ethnicity'].apply(cleanField)
df = df[df.chartdate >= df.dob]
ages = []
for i in range(df.shape[0]):
ages.append((df.chartdate.iloc[i] - df.dob.iloc[i]).days/365.24)
df['age'] = ages
df.loc[(df.category == 'Discharge summary') |
(df.category == 'Echo') |
(df.category == 'ECG'), 'fold'] = 'NA'
icds = (pd.read_sql_query('select * from public.diagnoses_icd', conn)
.groupby('hadm_id')
.agg({'icd9_code': lambda x: list(x.values)})
.reset_index())
df = | pd.merge(left=df, right=icds, on='hadm_id') | pandas.merge |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import numpy as np
import pandas as pd
import pytest
from pandas._testing import assert_frame_equal
from wetterdienst import Settings
from wetterdienst.exceptions import InvalidEnumeration
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationRequest,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.util import build_parameter_set_identifier
from wetterdienst.util.enumeration import parse_enumeration_from_template
def test_parse_enumeration_from_template():
assert (
parse_enumeration_from_template("climate_summary", DwdObservationDataset)
== DwdObservationDataset.CLIMATE_SUMMARY
)
assert (
parse_enumeration_from_template("CLIMATE_SUMMARY", DwdObservationDataset)
== DwdObservationDataset.CLIMATE_SUMMARY
)
assert parse_enumeration_from_template("kl", DwdObservationDataset) == DwdObservationDataset.CLIMATE_SUMMARY
with pytest.raises(InvalidEnumeration):
parse_enumeration_from_template("climate", DwdObservationDataset)
def test_coerce_field_types():
"""Test coercion of fields"""
# Special cases
# We require a stations object with hourly resolution in order to accurately parse
# the hourly timestamp (pandas would fail parsing it because it has a strange
# format)
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=DwdObservationDataset.SOLAR, # RS_IND_01,
resolution=DwdObservationResolution.HOURLY,
period=DwdObservationPeriod.RECENT,
).all()
# Here we don't query the actual data because it takes too long
# we rather use a predefined DataFrame to check for coercion
df = pd.DataFrame(
{
"station_id": ["00001"],
"dataset": ["climate_summary"],
"date": ["1970010100"],
"qn": ["1"],
"rs_ind_01": [1],
"end_of_interval": ["1970010100:00"],
"v_vv_i": ["p"],
}
)
df = request.values._coerce_date_fields(df, "00001")
df = request.values._coerce_meta_fields(df)
df = request.values._coerce_parameter_types(df)
expected_df = pd.DataFrame(
{
"station_id": pd.Categorical(["00001"]),
"dataset": pd.Categorical(["climate_summary"]),
"date": [pd.Timestamp("1970-01-01").tz_localize("utc")],
"qn": pd.Series([1], dtype=pd.Int64Dtype()),
"rs_ind_01": pd.Series([1], dtype=pd.Int64Dtype()),
"end_of_interval": [np.NaN],
"v_vv_i": pd.Series(["p"], dtype=pd.StringDtype()),
}
)
| assert_frame_equal(df, expected_df, check_categorical=False) | pandas._testing.assert_frame_equal |
import unittest
from enda.timeseries import TimeSeries
import pandas as pd
import pytz
class TestTimeSeries(unittest.TestCase):
def test_collapse_dt_series_into_periods(self):
# periods is a list of (start, end) pairs.
periods = [
(pd.to_datetime('2018-01-01 00:15:00+01:00'), pd.to_datetime('2018-01-01 00:45:00+01:00')),
(pd.to_datetime('2018-01-01 10:15:00+01:00'), pd.to_datetime('2018-01-01 15:45:00+01:00')),
(pd.to_datetime('2018-01-01 20:15:00+01:00'), pd.to_datetime('2018-01-01 21:45:00+01:00')),
]
# expand periods to build a time-series with gaps
dti = pd.DatetimeIndex([])
for s, e in periods:
dti = dti.append(pd.date_range(s, e, freq="30min"))
self.assertEqual(2+12+4, dti.shape[0])
# now find periods in the time-series
# should work with 2 types of freq arguments
for freq in ["30min", pd.to_timedelta("30min")]:
computed_periods = TimeSeries.collapse_dt_series_into_periods(dti, freq)
self.assertEqual(len(computed_periods), len(periods))
for i in range(len(periods)):
self.assertEqual(computed_periods[i][0], periods[i][0])
self.assertEqual(computed_periods[i][1], periods[i][1])
def test_collapse_dt_series_into_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, freq="30min")
def test_collapse_dt_series_into_periods_3(self):
dti = pd.DatetimeIndex([
| pd.to_datetime('2018-01-01 00:00:00+01:00') | pandas.to_datetime |
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import pandas_dtype
from pandas import (
Float64Index,
Index,
Int64Index,
)
import pandas._testing as tm
class TestAstype:
def test_astype_float64_to_object(self):
float_index = Float64Index([0.0, 2.5, 5.0, 7.5, 10.0])
result = float_index.astype(object)
assert result.equals(float_index)
assert float_index.equals(result)
assert isinstance(result, Index) and not isinstance(result, Float64Index)
def test_astype_float64_mixed_to_object(self):
# mixed int-float
idx = Float64Index([1.5, 2, 3, 4, 5])
idx.name = "foo"
result = idx.astype(object)
assert result.equals(idx)
assert idx.equals(result)
assert isinstance(result, Index) and not isinstance(result, Float64Index)
@pytest.mark.parametrize("dtype", ["int16", "int32", "int64"])
def test_astype_float64_to_int_dtype(self, dtype):
# GH#12881
# a float astype int
idx = Float64Index([0, 1, 2])
result = idx.astype(dtype)
expected = Int64Index([0, 1, 2])
tm.assert_index_equal(result, expected)
idx = Float64Index([0, 1.1, 2])
result = idx.astype(dtype)
expected = Int64Index([0, 1, 2])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_astype_float64_to_float_dtype(self, dtype):
# GH#12881
# a float astype int
idx = Float64Index([0, 1, 2])
result = idx.astype(dtype)
expected = idx
tm.assert_index_equal(result, expected)
idx = Float64Index([0, 1.1, 2])
result = idx.astype(dtype)
expected = Index(idx.values.astype(dtype))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_cannot_cast_to_datetimelike(self, dtype):
idx = Float64Index([0, 1.1, 2])
msg = (
f"Cannot convert Float64Index to dtype {pandas_dtype(dtype)}; "
f"integer values are required for conversion"
)
with pytest.raises(TypeError, match=re.escape(msg)):
idx.astype(dtype)
@pytest.mark.parametrize("dtype", [int, "int16", "int32", "int64"])
@pytest.mark.parametrize("non_finite", [np.inf, np.nan])
def test_cannot_cast_inf_to_int(self, non_finite, dtype):
# GH#13149
idx = | Float64Index([1, 2, non_finite]) | pandas.Float64Index |
import csv
import json
import os
import re
from collections import OrderedDict
from io import StringIO
import pandas as pd
import requests
from django.core.management.base import BaseCommand
from django.forms.models import model_to_dict
from va_explorer.va_data_management.models import CauseCodingIssue
from va_explorer.va_data_management.models import CauseOfDeath
from va_explorer.va_data_management.models import VerbalAutopsy
# Default host-port when running from docker-compose.local.yml
PYCROSS_HOST = os.environ.get('PYCROSS_HOST', 'http://127.0.0.1:5001')
INTERVA_HOST = os.environ.get('INTERVA_HOST', 'http://127.0.0.1:5002')
# TODO: Temporary script to run COD assignment algorithms; this should
# eventually become something that's handle with celery
class Command(BaseCommand):
help = 'Run cause coding algorithms'
def handle(self, *args, **options):
# Load all verbal autopsies that don't have a cause coding
# TODO: This should eventuall check to see that there's a cause coding for every supported algorithm
verbal_autopsies_without_causes = VerbalAutopsy.objects.filter(causes__isnull=True)
# Get into CSV format, also prefixing keys with - as expected by pyCrossVA (e.g. Id10424 becomes -Id10424)
va_data = [model_to_dict(va) for va in verbal_autopsies_without_causes]
va_data = [dict([(f'-{k}', v) for k, v in d.items()]) for d in va_data]
va_data_csv = | pd.DataFrame.from_records(va_data) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, KFold
def cv_index(n_fold, feature, label):
skf = KFold(n_fold, shuffle=True, random_state=7840)
index_list = []
for i, j in skf.split(feature, label):
index_list.append((i, j))
return index_list
def data_selector(data_name):
if data_name == 'cmc':
return x_train_cmc, x_test_cmc, y_train_cmc, y_test_cmc, index_cmc
elif data_name == 'setap':
return x_train_setap, x_test_setap, y_train_setap, y_test_setap, index_setap
elif data_name == 'audit':
return x_train_audit, x_test_audit, y_train_audit, y_test_audit, index_audit
elif data_name == 'titanic':
return x_train_tt, x_test_tt, y_train_tt, y_test_tt, index_tt
elif data_name == 'dota':
return x_train_dota, x_test_dota, y_train_dota, y_test_dota, index_dota
no_of_folds = 3
# Dataset cmc
data_cmc = pd.read_csv("data/cmc.data", header=None)
data_cmc[9] = np.where(data_cmc[9] == 1, 0, 1)
data_cmc_label = data_cmc.pop(9)
x_train_cmc, x_test_cmc, y_train_cmc, y_test_cmc = train_test_split(data_cmc,
data_cmc_label,
random_state=7840,
test_size=0.25)
index_cmc = cv_index(no_of_folds, x_train_cmc, y_train_cmc)
# Dataset SETAP
data_setap = pd.read_csv("data/setap.csv")
data_setap['label'] = np.where(data_setap['label'] == 'A', 0, 1)
data_setap_label = data_setap.pop('label')
x_train_setap, x_test_setap, y_train_setap, y_test_setap = train_test_split(data_setap,
data_setap_label,
random_state=7840,
test_size=0.25)
index_setap = cv_index(no_of_folds, x_train_setap, y_train_setap)
# Dataset audit
data_audit = pd.read_csv("data/audit_risk.csv")
data_audit['LOCATION_ID'] = pd.to_numeric(data_audit['LOCATION_ID'], errors='coerce')
data_audit['LOCATION_ID'] = data_audit['LOCATION_ID'].fillna(data_audit['LOCATION_ID'].mode()[0])
data_audit['Money_Value'] = data_audit['Money_Value'].fillna(data_audit['Money_Value'].mean())
data_audit_label = data_audit.pop('Risk')
x_train_audit, x_test_audit, y_train_audit, y_test_audit = train_test_split(data_audit,
data_audit_label,
random_state=7840,
test_size=0.25,)
index_audit = cv_index(no_of_folds, x_train_audit, y_train_audit)
# Dataset titanic
data_tt = pd.read_csv("data/titanic_train.csv")
data_tt['Age'] = data_tt['Age'].fillna(data_tt['Age'].mean())
data_tt['Embarked'] = data_tt['Embarked'].fillna(data_tt['Embarked'].mode()[0])
data_tt['Pclass'] = data_tt['Pclass'].apply(str)
for col in data_tt.dtypes[data_tt.dtypes == 'object'].index:
for_dummy = data_tt.pop(col)
data_tt = pd.concat([data_tt, | pd.get_dummies(for_dummy, prefix=col) | pandas.get_dummies |
import pandas as pd
import numpy as np
import scipy.sparse as spl
from concurrent.futures import ProcessPoolExecutor
import sys
threads = 4
all_tasks = [
[5, 8000, ['5t', '5nt'], 0.352],
[10, 12000, ['10t', '10nt'], 0.38],
[25, 40000, ['25f'], 0.43386578246281293],
[25, 9000, ['25r'], 0.4],
[100, 4000, ['100r'], 0.39],
]
split, knn_k, test_task, powb = all_tasks[int(sys.argv[1])]
def recode(column, min_val=0):
uniques = column.unique()
codes = range(min_val, len(uniques) + min_val)
code_map = dict(zip(uniques, codes))
return (column.map(code_map), code_map)
def reverse_code(column, code_map):
inv_map = {v: k for k, v in code_map.items()}
return column.map(inv_map)
playlist_meta = pd.read_csv('data/million_playlist_dataset/playlist_meta.csv')
playlist_meta_c = pd.read_csv('data/challenge_set/playlist_meta.csv')
playlist_meta = pd.concat([playlist_meta, playlist_meta_c], axis=0, ignore_index=True)
song_meta = pd.read_csv('data/million_playlist_dataset/song_meta_no_duplicates.csv')
playlist_meta['pid_code'], pid_codes = recode(playlist_meta['pid'])
song_meta['song_code'], song_codes = recode(song_meta['song_id'])
train = pd.read_csv('data/million_playlist_dataset/playlists.csv')
test = | pd.read_csv('data/challenge_set/playlists.csv') | pandas.read_csv |
import nltk
nltk.download('punkt')
from newspaper import Article, Config
from pygooglenews import GoogleNews
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse
from pathlib import Path
class Newspaper_agent:
TMP_DIRECTORY = Path("./tmp_data")
if not TMP_DIRECTORY.exists():
TMP_DIRECTORY.mkdir()
def __init__(
self,
query,
keywords,
country_code_final,
language_code_final,
country,
language,
):
self.query = query
self.keywords = keywords
self.country_code_final = country_code_final
self.language_code_final = language_code_final
self.country = country
self.language = language
self.custom_tags = | pd.read_csv("data_news/Custom_Websites_Tags.csv") | pandas.read_csv |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == TD64NS_DTYPE:
return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
except (ValueError, TypeError):
pass
else:
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
return values
def convert_dtypes(
input_array: AnyArrayLike,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
to types supporting ``pd.NA``.
Parameters
----------
input_array : ExtensionArray, Index, Series or np.ndarray
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
Returns
-------
dtype
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
if (
convert_string or convert_integer or convert_boolean or convert_floating
) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
# Required to catch due to Period. Can remove once GH 23553 is fixed
inferred_dtype = input_array.dtype
if not convert_string and is_string_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_integer:
target_int_dtype = "Int64"
if is_integer_dtype(input_array.dtype):
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
inferred_dtype = INT_STR_TO_DTYPE.get(
input_array.dtype.name, target_int_dtype
)
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
inferred_dtype = target_int_dtype
else:
if is_integer_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_floating:
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(
input_array.dtype.name, "Float64"
)
# if we could also convert to integer, check if all floats
# are actually integers
if convert_integer:
arr = input_array[notna(input_array)]
if (arr.astype(int) == arr).all():
inferred_dtype = "Int64"
else:
inferred_dtype = inferred_float_dtype
else:
inferred_dtype = inferred_float_dtype
else:
if is_float_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_boolean:
if is_bool_dtype(input_array.dtype):
inferred_dtype = "boolean"
else:
if isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
inferred_dtype = input_array.dtype
else:
inferred_dtype = input_array.dtype
return inferred_dtype
def maybe_castable(arr: np.ndarray) -> bool:
# return False to force a non-fastpath
assert isinstance(arr, np.ndarray) # GH 37024
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == "M":
return is_datetime64_ns_dtype(arr.dtype)
elif kind == "m":
return | is_timedelta64_ns_dtype(arr.dtype) | pandas.core.dtypes.common.is_timedelta64_ns_dtype |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = pd.to_numeric(df['4s'])
runs = pd.to_numeric(df['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation='vertical')
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost(tendulkarsp,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
# retrieve the file path of a data file installed with cricketr
batsmanCumulativeAverageRuns(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= pd.to_numeric(batsman['Runs'])
# Create the histogram
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
SR=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Filter runs that are are between 2 bins
batsman['Runs']=pd.to_numeric(batsman['Runs'])
a=(batsman['Runs'] > bins[i-1]) & (batsman['Runs'] <= bins[i])
df=batsman[a]
SR.append(np.mean(df['SR']))
atitle = name + "-" + "Strike rate in run ranges"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(midBin, SR, alpha=0.5)
plt.plot(midBin, SR,color="r", alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle)
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanMovingAverage
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def batsmanMovingAverage(file,name="A Squarecut") :
'''
Calculate and plot the Moving Average of the batsman in his career
Description
This function calculates and plots the Moving Average of the batsman in his career
Usage
batsmanMovingAverage(file,name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMovingAverage(pathToFile,"<NAME>")
'''
# Compute the moving average of the time series
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
date= pd.to_datetime(batsman['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,runs,"-",color = '0.75')
# Compute and plot moving average
y_av = movingaverage(runs, 50)
plt.xlabel('Date')
plt.ylabel('Runs')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanPerfBoxHist
# This function makes a box plot showing the mean, median and the 25th & 75th percentile runs. The
# histogram shows the frequency of scoring runs in different run ranges
#
###########################################################################################
# Plot the batting performance as a combined box plot and histogram
def batsmanPerfBoxHist(file, name="A Hitter"):
'''
Make a boxplot and a histogram of the runs scored by the batsman
Description
Make a boxplot and histogram of the runs scored by the batsman. Plot the Mean, Median, 25th and 75th quantile
Usage
batsmanPerfBoxHist(file, name="A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsman4s(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
plt.subplot(2,1,1)
sns.boxplot(batsman['Runs'])
plt.subplot(2,1,2);
atitle = name + "'s" + " - Runs Frequency vs Runs"
plt.hist(batsman['Runs'],bins=20, edgecolor='black')
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle,size=16)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from statsmodels.tsa.arima_model import ARIMA
import pandas as pd
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: batsmanPerfForecast
# This function forecasts the batsmans performance based on past performance -
# To update
###########################################################################################
def batsmanPerfForecast(file, name="A Squarecut"):
'''
# To do: Currently ARIMA is used.
Forecast the batting performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the batsman based on past performances using HoltWinters forecasting model
Usage
batsmanPerfForecast(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanPerfForecast(pathToFile,"Sachin Tendulkar")
# Note: The above example uses the file tendulkar.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
# The general format is pkg-function(pathToFile,par1,...)
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=batsman['Runs'].astype('float')
# Fit a ARIMA model
date= pd.to_datetime(batsman['Start Date'])
df=pd.DataFrame({'date':date,'runs':runs})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
plt.gcf().clear()
print(residuals.describe())
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanPerfHomeAway
# This plots the batsman's performance in home versus abroad
#
###########################################################################################
def batsmanPerfHomeAway(file,name="A Hitter"):
'''
This function analyses the performance of the batsman at home and overseas
Description
This function plots the runs scored by the batsman at home and overseas
Usage
batsmanPerfHomeAway(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist bowlerContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarSp <-getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanPerfHomeAway(pathToFile,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create separate DFs for home and away
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Runs']= pd.to_numeric(df['Runs'])
atitle = name + "-" + "- - Runs-Home & overseas"
ax = sns.boxplot(x='venue',y='Runs',data=df)
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 30 Jun 2015
# Function: batsmanRunsFreqPerf
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Plot the performance of the batsman as a continous graph
# Create a performance plot between Runs and RunsFrequency
def batsmanRunsFreqPerf(file, name="A Hookshot"):
'''
Calculate and run frequencies in ranges of 10 runs and plot versus Runs the performance of the batsman
Description
This function calculates frequencies of runs in 10 run buckets and plots this percentage
Usage
batsmanRunsFreqPerf(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsFreqPerf(pathToFile,"Sachin Tendulkar")
'''
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(df['Runs'])
# Plot histogram
runs.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Runs histogram"
plt.title(atitle)
plt.xlabel('Runs')
plt.grid(axis='y', alpha=0.75)
plt.text(180, 90,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanRunsLikelihood
# This function used K-Means to compute and plot the runs likelihood for the batsman
# To do - Include scatterplot
###########################################################################################
def batsmanRunsLikelihood(file, name="A Squarecut") :
'''
This function uses K-Means to determine the likelihood of the batsman to get runs
Description
This function used K-Means to get the likelihood of getting runs based on clusters of runs the batsman made in the past.It uses K-Means for this.
Usage
batsmanRunsLikelihood(file, name = "A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsLikelihood(pathToFile,"<NAME>")
'''
batsman =clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
data = batsman[['Runs','BF','Mins']]
# Create 3 different clusters
kmeans = KMeans(n_clusters=3,max_iter=300)
# Compute the clusters
kmeans.fit(data)
y_kmeans = kmeans.predict(data)
# Get the cluster centroids
centers = kmeans.cluster_centers_
centers
# Add a title
atitle= name + '-' + "Runs Likelihood"
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Draw vertical line 1st centroid
x=[centers[0][0],centers[0][0]]
y=[centers[0][1],centers[0][1]]
z=[0,centers[0][2]]
ax.plot(x,y,z,'k-',color='r',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[1][0],centers[1][0]]
y=[centers[1][1],centers[1][1]]
z=[0,centers[1][2]]
ax.plot(x,y,z,'k-',color='b',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[2][0],centers[2][0]]
y=[centers[2][1],centers[2][1]]
z=[0,centers[2][2]]
ax.plot(x,y,z,'k-',color='k',alpha=0.8, linewidth=2)
ax.set_xlabel('BallsFaced')
ax.set_ylabel('Minutes')
ax.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
from sklearn.linear_model import LinearRegression
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanRunsPredict
# This function predicts the runs that will be scored by the batsman for a given numbers
# of balls faced and minutes at crease
#
###########################################################################################
def batsmanRunsPredict(file, newDF, name="A Coverdrive"):
'''
Predict the runs for the batsman given the Balls Faced and Minutes in crease
Description
Fit a linear regression plane between Runs scored and Minutes in Crease and Balls Faced. This will be used to predict the batsman runs for time in crease and balls faced
Usage
batsmanRunsPredict(file, name="A Coverdrive", newdataframe)
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
newdataframe
This is a data frame with 2 columns BF(Balls Faced) and Mins(Minutes)
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns a data frame with the predicted runs for the Balls Faced and Minutes at crease
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting",
# homeOrAway=c(1,2), result=c(1,2,4))
# Use a single value for BF and Mins
BF= 30
Mins= 20
# retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "tendulkar.csv", package = "cricketr")
batsmanRunsPredict(pathToFile,"<NAME>",newdataframe=data.frame(BF,Mins))
#or give a data frame
BF = np.linspace( 10, 400,15)
Mins = np.linspace(30,220,15)
newDF= pd.DataFrame({'BF':BF,'Mins':Mins}
#values <- batsmanRunsPredict("../cricketr/data/tendulkar.csv","<NAME>",
#print(values)
'''
batsman = clean(file)
df=batsman[['BF','Mins','Runs']]
df['BF']=pd.to_numeric(df['BF'])
df['Runs']=pd.to_numeric(df['Runs'])
xtrain=df.iloc[:,0:2]
ytrain=df.iloc[:,2]
linreg = LinearRegression().fit(xtrain, ytrain)
newDF['Runs']=linreg.predict(newDF)
return(newDF)
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanRunsRanges
# This plots the percentage runs in different run ranges
#
###########################################################################################
def batsmanRunsRanges(file, name= "A Hookshot") :
'''
Compute and plot a histogram of the runs scored in ranges of 10
Description
Compute and plot a histogram of the runs scored in ranges of 10
Usage
batsmanRunsRanges(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsRanges(pathToFile,"<NAME>")
'''
# Clean file
batsman = clean(file)
runs= pd.to_numeric(batsman['Runs'])
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Compute binWidth. Subtract '2' to separate the bars
binWidth=bins[1]-bins[0]-2
# Plot a barplot
plt.bar(midBin, hist, bins[1]-bins[0]-2, color="blue")
plt.xlabel('Run ranges')
plt.ylabel('Frequency')
# Add a title
atitle= name + '-' + "Runs % vs Run frequencies"
plt.title(atitle)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import LinearRegression
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: battingPerf3d
# This function creates a 3D scatter plot of Runs scored vs Balls Faced and Minutes in crease.
# A regression plane is fitted to this.
#
###########################################################################################
def battingPerf3d(file, name="A Hookshot") :
'''
Make a 3D scatter plot of the Runs scored versus the Balls Faced and Minutes at Crease.
Description
Make a 3D plot of the Runs scored by batsman vs Minutes in crease and Balls faced. Fit a linear regression plane
Usage
battingPerf3d(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar<- getPlayerData(35320,file="tendulkar.csv",type="batting",
#homeOrAway=[1,2],result=[1,2,4])
battingPerf3d(pathToFile,"Sachin Tendulkar")
'''
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
batsman = clean(file)
# Make a 3 D plot and fit a regression plane
atitle = name + "- Runs vs BallsFaced & Minutes"
df2=batsman[['BF','Mins','Runs']]
df2['BF']=pd.to_numeric(df2['BF'])
df2['Mins']=pd.to_numeric(df2['Mins'])
df2['Runs']=pd.to_numeric(df2['Runs'])
X=df2.iloc[:,0:2]
Y=df2.iloc[:,2]
# Fit a Regression place
linreg = LinearRegression().fit(X,Y)
bf= np.linspace(0,400,20)
mins=np.linspace(0,620,20)
xx, yy = np.meshgrid(bf,mins)
xx1=xx.reshape(-1)
yy1=yy.reshape(-1)
test=pd.DataFrame({"BallsFaced": xx1, "Minutes":yy1})
predictedRuns=linreg.predict(test).reshape(20,20)
plt3d = plt.figure().gca(projection='3d')
plt3d.scatter(df2['BF'],df2['Mins'],df2['Runs'])
plt3d.plot_surface(xx.reshape(20,20),yy,predictedRuns, alpha=0.2)
plt3d.set_xlabel('BallsFaced')
plt3d.set_ylabel('Minutes')
plt3d.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append number of matches to Ground
###########################################################################################
def bowlerAvgWktsGround(file, name="A Chinaman"):
'''
This function computes and plot the average wickets in different ground
Description
This function computes the average wickets taken against different grounds by the bowler. It also shows the number innings at each venue
Usage
bowlerAvgWktsGround(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsGround(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Wkts','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=4, color='r', linestyle=':')
plt.title(atitle)
ax=sns.barplot(x='Ground', y="Wkts_mean", data=df1)
#plt.bar(df1['Ground'],df1['Wkts_mean'])
plt.text(15, 4,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsOpposition
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append no of matches in Opposition
###########################################################################################
def bowlerAvgWktsOpposition(file, name="A Chinaman"):
'''
This function computes and plot the average wickets against different oppositon
Description
This function computes the average wickets taken against different opposition by the bowler. It also shows the number innings against each opposition
Usage
bowlerAvgWktsOpposition(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerAvgWktsGround
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsOpposition(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Opposition','Wkts']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets vs Opposition"
plt.xticks(rotation='vertical')
plt.axhline(y=3, color='r', linestyle=':')
ax=sns.barplot(x='Opposition', y="Wkts_mean", data=df1)
plt.title(atitle)
plt.text(2, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerContributionWonLost
# This plots the bowler's contribution to won and lost matches
#
###########################################################################################
def bowlerContributionWonLost(file,name="A Doosra"):
'''
Display the bowler's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the bowler in matches that were won and lost as box plots
Usage
bowlerContributionWonLost(file, name = "A Doosra")
Arguments
file
CSV file of bowler from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerContributionWonLost(pathToFile,"<NAME>")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create DFs for won and lost/drawn
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack DFs
df= pd.concat([won,lost])
df['Wkts']= pd.to_numeric(df['Wkts'])
ax = sns.boxplot(x='status',y='Wkts',data=df)
atitle = name + "-" + "- Wickets in games won/lost-drawn"
plt.xlabel('Status')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgEconRate
# This function computes and plots the cumulative average economy rate of a bowler
#
###########################################################################################
def bowlerCumulativeAvgEconRate(file,name="A Googly"):
'''
Bowler's cumulative average economy rate
Description
This function computes and plots the cumulative average economy rate of a bowler
Usage
bowlerCumulativeAvgEconRate(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgWickets batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgEconRate(pathToFile,"<NAME>")
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
economyRate=pd.to_numeric(bowler['Econ'])
cumEconomyRate = economyRate.cumsum()/pd.Series(np.arange(1, len(economyRate)+1), economyRate.index)
atitle = name + "- Cumulative Economy Rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Economy Rate')
plt.title(atitle)
plt.plot(cumEconomyRate)
plt.text(150,3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgWickets
# This function computes and plots the cumulative average wickets of a bowler
#
###########################################################################################
def bowlerCumulativeAvgWickets(file,name="A Googly"):
'''
Bowler's cumulative average wickets
Description
This function computes and plots the cumulative average wickets of a bowler
Usage
bowlerCumulativeAvgWickets(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgWickets(pathToFile,"<NAME>")
## End(Not run)
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wktRate=pd.to_numeric(bowler['Wkts'])
cumWktRate = wktRate.cumsum()/pd.Series(np.arange(1, len(wktRate)+1), wktRate.index)
atitle = name + "- Cumulative Mean Wicket Rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Mean Wickets')
plt.title(atitle)
plt.plot(cumWktRate)
plt.text(150,3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerEconRate
# This function plots the Frequency percentage of wickets taken for the bowler
#
###########################################################################################
def bowlerEconRate(file, name="A Bowler") :
'''
Compute and plot the Mean Economy Rate versus wickets taken
Description
This function computes the mean economy rate for the wickets taken and plot this
Usage
bowlerEconRate(file, name = "A Bowler")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble <- getPlayerData(30176,dir=".", file="kumble.csv",type="batting",
# homeOrAway=[1,2],result=[1,2,4])
bowlerEconRate(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
bowler['Econ']=pd.to_numeric(bowler['Econ'])
atitle = name + "-" + "- Mean economy rate vs Wkts"
df=bowler[['Wkts','Econ']].groupby('Wkts').mean()
df = df.reset_index(inplace=False)
ax=plt.plot('Wkts','Econ',data=df)
plt.xlabel('Wickets')
plt.ylabel('Economy Rate')
plt.title(atitle)
plt.text(6, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerMovingAverage
# This function computes and plots the Moving Average of the Wickets taken for a bowler
# across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def bowlerMovingAverage(file,name="A Doosra") :
'''
Compute and plot the moving average of the wickets taken for a bowler
Description
This function plots the wickets taken by a bowler as a time series and plots the moving average over the career
Usage
bowlerMovingAverage(file, name = "A Doosra")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerMovingAverage(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wkts=pd.to_numeric(bowler['Wkts'])
date= pd.to_datetime(bowler['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,wkts,"-",color = '0.75')
y_av = movingaverage(wkts, 50)
plt.xlabel('Date')
plt.ylabel('Wickets')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARIMA
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: bowlerPerfForecast
# This function forecasts the bowler's performance based on past performance
#
###########################################################################################
def bowlerPerfForecast(file, name="A Googly"):
'''
# To do- Currently based on ARIMA
Forecast the bowler performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the bowler based on past performances using HoltWinters forecasting model
Usage
bowlerPerfForecast(file, name = "A Googly")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerEconRate, bowlerMovingAverage, bowlerContributionWonLost
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerPerfForecast(pathToFile,"Anil Kumble")
'''
bowler= cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wkts=bowler['Wkts'].astype('float')
date= pd.to_datetime(bowler['Start Date'])
df=pd.DataFrame({'date':date,'Wickets':wkts})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
atitle=name+"-ARIMA plot"
plt.title(atitle)
plt.show()
residuals.plot(kind='kde')
atitle=name+"-ARIMA plot"
plt.title(atitle)
plt.show()
plt.gcf().clear()
print(residuals.describe())
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerPerfHomeAway
# This plots the bowler's performance home and abroad
#
###########################################################################################
def bowlerPerfHomeAway(file,name="A Googly") :
'''
This function analyses the performance of the bowler at home and overseas
Description
This function plots the Wickets taken by the batsman at home and overseas
Usage
bowlerPerfHomeAway(file, name = "A Googly")
Arguments
file
CSV file of the bowler from ESPN Cricinfo (for e.g. Kumble's profile no:30176)
name
Name of bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm bowlerContributionWonLost
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerPerfHomeAway(path,"<NAME>")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
#
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Wkts']= pd.to_numeric(df['Wkts'])
atitle = name + "-" + "- - Wickets-Home & overseas"
ax = sns.boxplot(x='venue',y='Wkts',data=df)
plt.xlabel('Venue')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerWktsFreqPercent
# This function plots the Frequency percentage of wickets taken for the bowler
#
###########################################################################################
def bowlerWktsFreqPercent(file, name="A Bowler"):
'''
Plot the Wickets Frequency as a percentage against wickets taken
Description
This function calculates the Wickets frequency as a percentage of total wickets taken and plots this agains the wickets taken.
Usage
bowlerWktsFreqPercent(file, name="A Bowler")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerWktsFreqPercent(pathToFile,"Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a table of wickets
wkts = pd.to_numeric(bowler['Wkts'])
wkts.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Wickets histogram"
plt.title(atitle)
plt.xlabel('Wickets')
plt.grid(axis='y', alpha=0.75)
plt.text(5,10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerWktsRunsPlot
# This function makes boxplot of Wickets versus Runs concded
###########################################################################################
def bowlerWktsRunsPlot(file, name="A Googly"):
'''
Compute and plot the runs conceded versus the wickets taken
Description
This function creates boxplots on the runs conceded for wickets taken for the bowler
Usage
bowlerWktsRunsPlot(file, name = "A Googly")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerHistWickets
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerWktsRunsPlot(pathToFile,"Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
atitle = name + "- Wickets vs Runs conceded"
ax = sns.boxplot(x='Wkts', y='Runs', data=bowler)
plt.title(atitle)
plt.xlabel('Wickets')
plt.show()
plt.gcf().clear()
return
import pandas as pd
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function : clean
# This function cleans the batsman's data file and returns the cleaned data frame for use in
# other functions
##########################################################################################
def clean(batsmanCSV):
'''
Create a batsman data frame given the batsman's CSV file
Description
The function removes rows from the batsman dataframe where the batsman did not bat (DNB) or the team did not bat (TDNB). COnverts not outs '*' (97*, 128*) to 97,128 by stripping the '*' character. It picks all the complete cases and returns the data frame
Usage
clean(file)
Arguments
file
CSV file with the batsman data obtained with getPlayerData
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the cleaned batsman dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html https://gigadom.wordpress.com/
See Also
cleanBowlerData getPlayerData batsman4s batsmanMovingAverage
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
clean(pathToFile)
'''
df = pd.read_csv(batsmanCSV,na_values=['-'])
a = df['Runs'] != "DNB"
batsman = df[a]
# Remove rows with 'TDNB'
c =batsman['Runs'] != "TDNB"
batsman = batsman[c]
# Remove rows with absent
d = batsman['Runs'] != "absent"
batsman = batsman[d]
# Remove the "* indicating not out
batsman['Runs']= batsman['Runs'].str.replace(r"[*]","")
# Drop rows which have NA
batsman = batsman.dropna()
#Return the data frame
return(batsman)
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function : cleanBowlerData
# This function cleans the bowler's data file and returns the cleaned data frame for use in
# other functions
##########################################################################################
def cleanBowlerData(file):
'''
Clean the bowlers data frame
Description
Clean the bowler's CSV fileand remove rows DNB(Did not bowl) & TDNB (Team did not bowl). Also normalize all 8 ball over to a 6 ball over for earlier bowlers
Usage
cleanBowlerData(file)
Arguments
file
The <bowler>.csv file
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
A cleaned bowler data frame with complete cases
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
clean
Examples
# Get bowling data and store in file for future
# kumble <- getPlayerData(30176,dir="./mytest", file="kumble.csv",type="bowling",
# homeOrAway=[1],result=[1,2])
cleanBowlerData(pathToFile)
'''
# Read the <bowler>.csv file
df = pd.read_csv(file,na_values=['-'])
# Remove rows with did not bowl
a = df['Overs']!= "DNB"
df = df[a]
# Remove rows with 'TDNB' - team did not bowl
c =df['Overs'] != "TDNB"
df = df[c]
# Get all complete cases
bowlerComplete = df.dropna(axis=1)
# Normalize overs which had 8 balls per over to the number of overs if there 8 balls per over
if bowlerComplete.columns[2] =="BPO":
bowlerComplete['Overs'] = pd.to_numeric(bowlerComplete['Overs']) *8/6
return(bowlerComplete)
import pandas as pd
import os
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function : getPlayerData
# This function gets the data of batsman/bowler and returns the data frame. This data frame can
# stored for use in other functions
##########################################################################################
def getPlayerData(profile,opposition="",host="",dir="./data",file="player001.csv",type="batting",
homeOrAway=[1,2],result=[1,2,4],create=True) :
'''
Get the player data from ESPN Cricinfo based on specific inputs and store in a file in a given directory
Description
Get the player data given the profile of the batsman. The allowed inputs are home,away or both and won,lost or draw of matches. The data is stored in a <player>.csv file in a directory specified. This function also returns a data frame of the player
Usage
getPlayerData(profile,opposition="",host="",dir="./data",file="player001.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
Arguments
profile
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For Sachin Tendulkar this turns out to be http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
opposition
The numerical value of the opposition country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,New Zealand:5,Pakistan:7,South Africa:3,Sri Lanka:8, West Indies:4, Zimbabwe:9
host
The numerical value of the host country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,New Zealand:5,Pakistan:7,South Africa:3,Sri Lanka:8, West Indies:4, Zimbabwe:9
dir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./data"
file
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
type
type of data required. This can be "batting" or "bowling"
homeOrAway
This is vector with either 1,2 or both. 1 is for home 2 is for away
result
This is a vector that can take values 1,2,4. 1 - won match 2- lost match 4- draw
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerDataSp
Examples
## Not run:
# Both home and away. Result = won,lost and drawn
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar1.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# Only away. Get data only for won and lost innings
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar2.csv",
type="batting",homeOrAway=c(2),result=c(1,2))
# Get bowling data and store in file for future
kumble <- getPlayerData(30176,dir="../cricketr/data",file="kumble1.csv",
type="bowling",homeOrAway=c(1),result=c(1,2))
#Get the Tendulkar's Performance against Australia in Australia
tendulkar <-getPlayerData(35320, opposition = 2,host=2,dir=".",
file="tendulkarVsAusInAus.csv",type="batting")
'''
# Initial url to ""
url =""
suburl1 = "http://stats.espncricinfo.com/ci/engine/player/"
suburl2 ="?class=1;"
suburl3 = "template=results;"
suburl4 = "view=innings"
#Set opposition
theOpposition = "opposition=" + opposition + ";"
# Set host country
hostCountry = "host=" + host + ";"
# Create a profile.html with the profile number
player = str(profile) + ".html"
# Set the home or away
str1=str2=""
#print(len(homeOrAway))
for i in homeOrAway:
if i == 1:
str1 = str1 + "home_or_away=1;"
elif i == 2:
str1 = str1 + "home_or_away=2;"
HA= str1
# Set the type batting or bowling
t = "type=" + type + ";"
# Set the result based on input
str2=""
for i in result:
if i == 1:
str2 = str2+ "result=1;"
elif i == 2:
str2 = str2 + "result=2;"
elif i == 4:
str2 = str2 + "result=4;"
result = str2
# Create composite URL
url = suburl1 + player + suburl2 + hostCountry + theOpposition + HA + result + suburl3 + t + suburl4
#print(url)
# Read the data from ESPN Cricinfo
dfList= pd.read_html(url)
# Choose appropriate table from list of returned tables
df=dfList[3]
colnames= df.columns
# Select coiumns based on batting or bowling
if type=="batting" :
# Select columns [1:9,11,12,13]
cols = list(range(0,9))
cols.extend([10,11,12])
elif type=="bowling":
# Check if there are the older version of 8 balls per over (BPO) column
# [1:8,10,11,12]
# Select BPO column for older bowlers
if colnames[1] =="BPO":
# [1:8,10,11,12]
cols = list(range(0,9))
cols.extend([10,11,12])
else:
# Select columns [1:7,9,10,11]
cols = list(range(0,8))
cols.extend([8,9,10])
#Subset the necessary columns
df1 = df.iloc[:, cols]
if not os.path.exists(dir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(dir,file)
if create:
# Write to file
df1.to_csv(path)
# Return the data frame
return(df1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: getPlayerDataSp
# This function is a specialized version of getPlayer Data. This function gets the players data
# along with details on matches' venue( home/abroad) and the result (won,lost,drawn) as
# 2 separate columns
#
###########################################################################################
def getPlayerDataSp(profileNo,tdir="./data",tfile="player001.csv",ttype="batting"):
'''
Get the player data along with venue and result status
Description
This function is a specialized version of getPlayer Data. This function gets the players data along with details on matches' venue (home/abroad) and the result of match(won,lost,drawn) as 2 separate columns (ha & result). The column ha has 1:home and 2: overseas. The column result has values 1:won , 2;lost and :drawn match
Usage
getPlayerDataSp(profileNo, tdir = "./data", tfile = "player001.csv",
ttype = "batting")
Arguments
profileNo
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For Sachin Tendulkar this turns out to be http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
tdir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./tdata"
tfile
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
ttype
type of data required. This can be "batting" or "bowling"
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe along with the homeAway and the result columns
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerData
Examples
## Not run:
# Only away. Get data only for won and lost innings
tendulkar <-getPlayerDataSp(35320,tdir="../cricketr/data", tfile="tendulkarsp.csv",ttype="batting")
# Get bowling data and store in file for future
kumble <- getPlayerDataSp(30176,tdir="../cricketr/data",tfile="kumblesp.csv",ttype="bowling")
## End(Not run)
'''
# Get the data for the player i
# Home & won
hw = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[1],type=ttype,create=False)
# Home & lost
hl = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[2],type=ttype,create=False)
# Home & drawn
hd = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[4],type=ttype,create=False)
# Away and won
aw = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[1],type=ttype,create=False)
#Away and lost
al = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[2],type=ttype,create=False)
# Away and drawn
ad = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[4],type=ttype,create=False)
# Set the values as follows
# ha := home = 1, away =2
# result= won = 1, lost = 2, drawn=4
hw['ha'] = 1
hw['result'] = 1
hl['ha'] = 1
hl['result'] = 2
hd['ha'] = 1
hd['result'] = 4
aw['ha'] = 2
aw['result'] = 1
al['ha'] = 2
al['result'] = 2
ad['ha'] = 2
ad['result'] = 4
if not os.path.exists(tdir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(tdir,tfile)
df= pd.concat([hw,hl,hd,aw,al,ad])
# Write to file
df.to_csv(path,index=False)
return(df)
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBatsmanCumulativeAvgRuns
# This function computes and plots the relative cumulative average runs of batsmen
#
###########################################################################################
def relativeBatsmanCumulativeAvgRuns(filelist, names):
'''
Relative batsman's cumulative average runs
Description
This function computes and plots the relative cumulative average runs of batsmen
Usage
relativeBatsmanCumulativeAvgRuns(frames, names)
Arguments
frames
This is a list of <batsman>.csv files obtained with an initial getPlayerData()
names
A list of batsmen names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeStrikeRate relativeBowlerCumulativeAvgEconRate relativeBowlerCumulativeAvgWickets
Examples
batsmen=["tendulkar.csv","dravid.csv","ganguly.csv"]
names = ["Tendulkar","Dravid","Ganguly"]
relativeBatsmanCumulativeAvgRuns(batsmen,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
df=clean(file)
runs=pd.to_numeric(df['Runs'])
df1[names[idx]] = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Average Runs')
plt.title('Relative batsmen cumulative average runs')
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBatsmanCumulativeAvgRuns
# This function computes and plots the relative cumulative average runs of batsmen
#
###########################################################################################
def relativeBatsmanCumulativeStrikeRate (filelist, names):
'''
Relative batsmen cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of batsmen
Usage
relativeBatsmanCumulativeStrikeRate(frames, names)
Arguments
frames
This is a list of <batsman>.csv files obtained with an initial getPlayerData()
names
A list of batsmen names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgEconRate relativeBowlerCumulativeAvgWickets
Examples
batsmen=["tendulkar.csv","dravid.csv","ganguly.csv"]
names = ["Tendulkar","Dravid","Ganguly"]
relativeBatsmanCumulativeAvgRuns(batsmen,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
df=clean(file)
strikeRate=pd.to_numeric(df['SR'])
df1[names[idx]] = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title('Relative batsmen cumulative strike rate')
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBowlerCumulativeAvgEconRate
# This function computes and plots the relativecumulative average economy rates bowlers
#
###########################################################################################
def relativeBowlerCumulativeAvgEconRate(filelist, names):
'''
Relative Bowler's cumulative average economy rate
Description
This function computes and plots the relative cumulative average economy rate of bowlers
Usage
relativeBowlerCumulativeAvgEconRate(frames, names)
Arguments
frames
This is a list of <bowler>.csv files obtained with an initial getPlayerData()
names
A list of Twenty20 bowlers names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
Tinni<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgWickets relativeBatsmanCumulativeStrikeRate
Examples
frames = ["kumble.csv","warne.csv","murali.csv"]
names = ["Kumble","Warne","Murali"]
relativeBowlerCumulativeAvgEconRate(frames,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
#print(idx)
#print(file)
bowler = cleanBowlerData(file)
economyRate=pd.to_numeric(bowler['Econ'])
df1[names[idx]]= economyRate.cumsum()/pd.Series(np.arange(1, len(economyRate)+1), economyRate.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Average Econmy Rate')
plt.title('Relative Cumulative Average Economy Rate')
plt.text(150, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBowlerCumulativeAvgWickets
# This function computes and plots the relative cumulative average wickets of bowlers
#
###########################################################################################
def relativeBowlerCumulativeAvgWickets(filelist, names):
'''
Relative bowlers cumulative average wickets
Description
This function computes and plots the relative cumulative average wickets of a bowler
Usage
relativeBowlerCumulativeAvgWickets(frames, names)
Arguments
frames
This is a list of <bowler>.csv files obtained with an initial getPlayerData()
names
A list of Twenty20 bowlers names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgEconRate relativeBatsmanCumulativeStrikeRate
Examples
## Not run: )
# Retrieve the file path of a data file installed with cricketr
frames = ["kumble.csv","warne.csv","murali.csv"]
names = ["Kumble","Warne","Murali"]
relativeBowlerCumulativeAvgEconRate(frames,names)
'''
df1= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 17:19:41 2020
@author: <NAME>
"""
import pandas as pd
def int_br(x):
return int(x.replace('.',''))
def float_br(x):
return float(x.replace('.', '').replace(',','.'))
dia = '2805'
file_HU = '~/ownCloud/sesab/exporta_boletim_epidemiologico_csv_{}.csv'.format(dia)
datahu = pd.read_csv(file_HU, sep=';', decimal=',', converters={'CASOS CONFIRMADOS': int_br})
rday = 'DATA DO BOLETIM'
datahu[rday] = pd.to_datetime(datahu[rday], dayfirst=True)
datahu['DayNum'] = datahu[rday].dt.dayofyear
ref = pd.Timestamp(year=2020, month=2, day=27).dayofyear
datahu['ts0'] = datahu['DayNum'] - ref
colsutils = ['DATA DO BOLETIM', 'ts0', 'CASOS CONFIRMADOS', 'CASOS ENFERMARIA',
'CASOS UTI','TOTAL OBITOS']
dfi = datahu[colsutils]
dff = | pd.DataFrame(columns=colsutils) | pandas.DataFrame |
import pandas as pd
import numpy as np
from multiprocessing import cpu_count
from functools import partial
from scipy.optimize import minimize
from trading.accountcurve import accountCurve
from core.utility import draw_sample, weight_forecast
from multiprocessing_on_dill import Pool
from contextlib import closing
""" Bootstrap.py - find the best weights for forecasts on a single instrument. """
def optimize_weights(instrument, sample):
"""Optimize the weights on a particular sample"""
guess = [1.0] * sample.shape[1]
bounds = [(0.0,5.0)] * sample.shape[1]
def function(w, instrument, sample):
"""This is the function that is minimized iteratively using scipy.optimize.minimize to find the best weights (w)"""
wf = weight_forecast(sample, w)
# We introduce a capital term, as certain currencies like HKD are very 'numerate', which means we need millions of HKD to get a
# significant position
position = instrument.position(forecasts = wf, nofx=True, capital=10E7).rename(instrument.name).to_frame().dropna()
# position = instrument.position(forecasts = wf, nofx=True).rename(instrument.name).to_frame().dropna()
l = accountCurve([instrument], positions = position, panama_prices=instrument.panama_prices().dropna(), nofx=True)
s = l.sortino()
try:
assert np.isnan(s) == False
except:
print(sample, position)
raise
return -s
result = minimize(function, guess, (instrument, sample),\
method = 'SLSQP',\
bounds = bounds,\
tol = 0.01,\
constraints = {'type': 'eq', 'fun': lambda x: sample.shape[1] - sum(x)},\
options = {'eps': .1},
)
return result.x
def mp_optimize_weights(samples, instrument, **kw):
"""Calls the Optimize function, on different CPU cores"""
with closing(Pool()) as pool:
return pool.map(partial(optimize_weights, instrument), samples)
def bootstrap(instrument, n=(cpu_count() * 4), **kw):
"""Use bootstrapping to optimize the weights for forecasts on a particular instrument. Sets up the samples and gets it going."""
forecasts = instrument.forecasts(**kw).dropna()
weights_buffer = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
def get_market_list(client, *args):
marketList = pd.DataFrame(client.get_products()['data'])
if len(args)>0:
quoteBase = args[0]
marketList = marketList[marketList['quoteAsset']==quoteBase]
marketList['volume_24h'] = marketList['tradedMoney']
marketList = marketList[['symbol', 'volume_24h']]
tickers = pd.DataFrame(client.get_ticker())
tickers['priceChangePercent'] = pd.to_numeric(tickers['priceChangePercent'])
tickerList = pd.DataFrame()
tickerList['symbol'] = tickers['symbol']
tickerList['change_24h'] = tickers['priceChangePercent']
marketList = pd.merge(marketList, tickerList, on='symbol')
return marketList
def market_classify(client):
marketList = pd.DataFrame(client.get_products()['data'])
btcMarketList = marketList[marketList['quoteAsset']=='BTC']
usdtMarketList = marketList[marketList['quoteAsset']=='USDT']
tmp = btcMarketList.merge(usdtMarketList, on="baseAsset", how="left", indicator=True)
btcOnlyMarketList = list(tmp[tmp["_merge"] == "left_only"].drop(columns=["_merge"])['symbol_x'])
tmp = usdtMarketList.merge(btcMarketList, on="baseAsset", how="left", indicator=True)
usdtOnlyMarketList = list(tmp[tmp["_merge"] == "left_only"].drop(columns=["_merge"])['symbol_x'])
return btcOnlyMarketList, usdtOnlyMarketList
def get_trades(client, market, timeDuration, timeFrame):
klines = client.get_historical_klines(symbol=market,
interval=timeFrame,
start_str=timeDuration)
n_transactions = sum([item[8] for item in klines])
toId = client.get_historical_trades(symbol=market, limit=1)[0]['id']
listId = np.arange(toId-n_transactions+1, toId-10,500)
trades = []
for fromId in listId:
trades = trades+client.get_historical_trades(symbol=market,
fromId=str(fromId))
trades = pd.DataFrame(trades)
trades['price'] = pd.to_numeric(trades['price'])
trades['qty'] = pd.to_numeric(trades['qty'])
trades['time'] = pd.to_datetime(trades['time'], unit='ms')
return trades
def get_candles(client, market, timeFrame, timeDuration):
klines = client.get_historical_klines(symbol=market,
interval=timeFrame,
start_str=timeDuration)
klines = pd.DataFrame(klines)
candles = pd.DataFrame()
candles['open_time'] = klines[0]
candles['close_time'] = klines[6]
candles['n_trades'] = klines[8]
candles['open'] = pd.to_numeric(klines[1])
candles['high'] = pd.to_numeric(klines[2])
candles['low'] = pd.to_numeric(klines[3])
candles['close'] = pd.to_numeric(klines[4])
candles['assetVolume'] = pd.to_numeric(klines[5])
candles['buyAssetVolume'] = pd.to_numeric(klines[9])
candles['sellAssetVolume'] = candles['assetVolume']-candles['buyAssetVolume']
candles['quoteVolume'] = pd.to_numeric(klines[7])
candles['buyQuoteVolume'] = | pd.to_numeric(klines[10]) | pandas.to_numeric |
import pymorphy2
import re
import string
import os
import time
import collections as cl
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from collections import defaultdict
from matplotlib.backends.backend_pdf import PdfPages
def numnum(y):
return sum([i[1] for i in y])
def remove_border(axes=None, top=False, right=False, left=True, bottom=True):
ax = axes or plt.gca()
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
def hihi(l, cc, s, mc=None, kost=None):
labels, values = zip(*l)
indexes = np.arange(len(labels))
plt.plot(indexes, values, cc)
plt.grid(axis = 'x', color = 'k', linestyle=':')
plt.grid(axis = 'y', color ='k', linestyle=':')
plt.title(s, fontsize = 16, loc = 'left', fontweight='light')
if not mc:
plt.ylabel('количество употреблений')
plt.xlabel('количество слов')
else:
plt.ylabel('количество употреблений')
plt.xticks(indexes, labels, fontsize=10)
def pipi(w, k, s):
y = cl.Counter(w[k]).most_common()
z1, v1 = zip(*y)
plt.pie(v1, labels=z1, autopct='%1.1f%%', startangle=90,
colors=['#df2020', 'orange', 'w'])
plt.title(s, fontsize = 16, loc = 'left', fontweight='light')
def fifi(ww, s, cc, k=None):
if k:
NounWerte = cl.Counter(ww[k]).most_common()
NN = numnum(NounWerte)
else:
l = []
for ke in ww:
n = [ke for i in range(len(ww[ke]))]
l.extend(n)
NounWerte = cl.Counter(l).most_common()
NN = numnum(NounWerte)
Etiketten, Werte = zip(*NounWerte)
pos = np.arange(len(Werte))
plt.barh(pos, Werte, color = cc)
for p, W in zip(pos, Werte):
plt.annotate(str(W)+' – '+str(nn(NN, W)), xy=(W + 1, p), fontsize = 10,
va='center')
plt.yticks(pos, Etiketten, fontsize = 10)
xt = plt.xticks()[0]
plt.xticks(xt, [' '] * len(xt))
remove_border(left=False, bottom=False)
plt.grid(axis = 'x', color ='w', linestyle=':')
plt.gca().invert_yaxis()
plt.title(s, fontsize = 16, loc = 'left', fontweight='light')
def tete(s=None, n=None, ss=None):
if type(n) == int:
if n < 99999:
plt.text(0,.4,str(n), fontsize=85, fontweight='light')
else:
plt.text(0,.4,'>'+str(int(n/1000))+'k', fontsize=85,
fontweight='light')
if ss:
ss2 = ss
ss = str(n)
ss = ss + '\n' + '**' + ss2
else:
ss = str(n) + '\n'
else:
plt.text(0,.4,str(n), fontsize=85, fontweight='light')
plt.text(0,.7,s, fontsize=18, fontweight='light', )
if ss:
plt.text(0,.2,'*'+ss,
fontsize=18, fontweight='light', color='#df2020')
plt.xticks([])
plt.yticks([])
remove_border(left=False, bottom=False)
Interpunktion = (string.punctuation+'\u2014\u2013\u2012\u2010\u2212'+
'«»‹›‘’“”„`…–')
Wort_Tokenize = re.compile(r"([^\w_\u2019\u2010\u002F-]|[+])")
def Word_Tokenizer(text):
return [t.lower() for t in Wort_Tokenize.split(text)
if t and not t.isspace() and not t in Interpunktion
and not any(map(str.isdigit, t))]
def Satz(n):
S = set()
[S.update(set(i)) for i in n.values()]
return S
nn = lambda x, y: '%.1f%%' %((100/x)*y)
def main():
while True:
ms = {'s':'Small text', 'b':'Big text'}
print('Choose mode.\n\ns - for ' + ms['s'] + '\n\nb - for ' + ms['b'])
Mode = input()
if Mode in ms:
print('\n\n***SchoenTextStat is in ' + ms[Mode] + ' mode***\n\n')
break
else:
print('\nThere is no such mode')
Pfad = input('Path to the file: ')
RName = os.path.basename(Pfad)
if not os.path.isfile(Pfad):
print("\n~~~~~~~~~~~~~~~~~~~~~~File doesn't exist.~~~~~~~~~~~~~~~~~~~~~~\n\n\n",
"I see my path, but I don't know where it leads.\n",
"Not knowing where I'm going is what inspires me to travel it.",
"\n\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
pass
Starten = time.time()
with open(Pfad, 'r', encoding='utf-8') as f:
Gross_Liste = Word_Tokenizer(f.read())
Morph = pymorphy2.MorphAnalyzer()
Wordforms = set(Gross_Liste)
Gefaelschtes = []
OMO = []
Lemmas_Woerterbuch = defaultdict(set)
Wordforms_Woerterbuch = defaultdict(list)
Faelle_Woerterbuch = defaultdict(list)
Verben_Woerterbuch = defaultdict(list)
for Wort in Wordforms:
if 'ё' in Wort:
Wort = Wort.replace('ё','е')
MP_W = Morph.parse(Wort)
Informationen = MP_W[0]
IT = Informationen.tag
Wortart = IT.POS
if Wortart:
if len(MP_W) > 1:
o = [Wort, len(MP_W)]
OMO.append(o)
Lemma = Informationen.normal_form
if str(Informationen.methods_stack[0][0]) == '<FakeDictionary>':
m = [Wort, IT.cyr_repr, Informationen.normal_form, round(Informationen.score, 2)]
Gefaelschtes.append(m)
Wordforms_Woerterbuch[Morph.lat2cyr(Wortart)].append(Wort)
Lemmas_Woerterbuch[Morph.lat2cyr(Wortart)].add(Lemma)
if Wortart == 'NOUN' or Wortart == 'ADJF':
Case = IT.case
Faelle_Woerterbuch[Morph.lat2cyr(Wortart)].append(Morph.lat2cyr(Case))
if Wortart == 'VERB':
Nummer, Zeit, Person = IT.number, IT.tense, IT.person
if Nummer:
Verben_Woerterbuch['Число'].append(Morph.lat2cyr(Nummer))
if Zeit:
Verben_Woerterbuch['Время'].append(Morph.lat2cyr(Zeit))
if Person:
Verben_Woerterbuch['Лицо'].append(Morph.lat2cyr(Person))
Gross_Nummer = len(Gross_Liste) #Словоупотреблений
Wordforms_Nummer = len(Wordforms) #Словоформ (с проблемными)
Lemmas_Nummer = len(Satz(Lemmas_Woerterbuch)) #Лемм по множеству
Prozentsatz_dem_Lemmas = nn(Wordforms_Nummer, Lemmas_Nummer)
Index_des_Reichtums = round((Lemmas_Nummer/Wordforms_Nummer), 2)
with open('stop_words.txt', 'r', encoding='utf-8') as f:
Stop = f.read().split()
Nein_Stop = []
for i in cl.Counter(Gross_Liste).most_common():
if not i[0] in Stop:
Nein_Stop.append(i)
GL_C = cl.Counter(Gross_Liste).most_common()
if len(GL_C) > 10:
gl = []
for i in GL_C:
gl.append(i)
if i[1] == 1:
GL_C = gl
break
if not os.path.isdir('./result'):
os.mkdir('./result')
pp = PdfPages('./result/result_'+RName+'.pdf')
fig = plt.figure(figsize=(16,6))
tete('', 'Результат\nанализа текста', RName)
pp.savefig(fig)
plt.close()
fig = plt.figure(figsize=(16,4.65))
plt.subplot(1,3,1)
tete('Количество\nсловоупотреблений', Gross_Nummer)
plt.subplot(1,3,2)
tete('Количество\nсловоформ', Wordforms_Nummer)
plt.subplot(1,3,3)
tete('Количество\nлемм', Lemmas_Nummer, Prozentsatz_dem_Lemmas+
' от числа\nсловоформ')
pp.savefig(fig)
plt.close()
if Mode == 'b':
fig = plt.figure(figsize=(16,6))
plt.subplot(1,2,1)
hihi(GL_C, '#df2020', 'Характер распределения\nслов в тексте (max, min)', kost=True)
plt.subplot(1,2,2)
hihi(GL_C[:10], '#df2020', 'Распределение '+str(len(GL_C[:10]))+
' наиболее частых\nслов в тексте',
mc=True, kost=True)
top10 = GL_C[:10]
yy = .75*top10[0][1]
top10N = sum([i[1] for i in top10])
plt.text(4, yy, '*'+str(nn(Gross_Nummer, top10N))+
'\nот общего\nколичества слов',
fontsize=25, color='black', fontweight='light')
pp.savefig(fig)
plt.close()
if Mode == 's':
fig = plt.figure(figsize=(16,6))
hihi(GL_C, '#df2020', 'Характер распределения\nслов в тексте (max, min)', kost=True)
pp.savefig(fig)
plt.close()
fig = plt.figure(figsize=(16,6))
hihi(GL_C[:10], '#df2020', 'Распределение '+str(len(GL_C[:10]))+
' наиболее частых\nслов в тексте',
mc=True, kost=True)
top10 = GL_C[:10]
yy = .75*top10[0][1]
top10N = sum([i[1] for i in top10])
plt.text(4, yy, '*'+str(nn(Gross_Nummer, top10N))+
'\nот общего\nколичества слов',
fontsize=25, color='black', fontweight='light')
pp.savefig(fig)
plt.close()
LNS = len(Nein_Stop)
targ = 10
if LNS > 0:
if LNS < 10:
targ = LNS
fig = plt.figure(figsize=(16,6))
hihi(Nein_Stop[:targ], '#df2020',
'Распределение '+str(targ)+
' наиболее частых\nслов в тексте (без стоп-слов)',
mc=True, kost=True)
yy2 = .75*Nein_Stop[0][1]
Nein_GN = sum([i[1] for i in Nein_Stop])
Nein_StopN = sum([i[1] for i in Nein_Stop[:targ]])
plt.text(7, yy2, '*'+str(nn(Gross_Nummer, Nein_GN))+
'\nот общего\nколичества',
fontsize=25, color='black', fontweight='light')
pp.savefig(fig)
plt.close()
fig = plt.figure(figsize=(16,6))
fifi(Wordforms_Woerterbuch, 'Распределение cловоформ\nпо частям речи',
'#df2020')
pp.savefig(fig)
plt.close()
fig = plt.figure(figsize=(16,6))
fifi(Lemmas_Woerterbuch,
'Распределение лемм\nпо частям речи', 'orange')
pp.savefig(fig)
plt.close()
fig = plt.figure(figsize=(16,4.65))
plt.subplot(1,2,1)
tete(u"Коэффициент\nлексического богатства текста", Index_des_Reichtums,
'отношение числа различных\nлемм к общему числу словоформ')
plt.subplot(1,2,2)
tete(u"Процент\nомонимичных словоформ", nn(Wordforms_Nummer, len(OMO)))
pp.savefig(fig)
plt.close()
fig, ax = plt.subplots(figsize=(16,6))
Ue = []
for k, v in Faelle_Woerterbuch.items():
for kk, vv in cl.Counter(v).items():
lll = [vv, kk, k]
Ue.append(lll)
NNz1 = max([numnum(cl.Counter(Faelle_Woerterbuch['СУЩ']).most_common()),
numnum(cl.Counter(Faelle_Woerterbuch['ПРИЛ']).most_common())])
NNz2 = min([numnum(cl.Counter(Faelle_Woerterbuch['СУЩ']).most_common()),
numnum(cl.Counter(Faelle_Woerterbuch['ПРИЛ']).most_common())])
Ue.sort(reverse=True)
df = | pd.DataFrame(Ue, columns=['кол', 'п', 'чр']) | pandas.DataFrame |
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Methods for processing VERIFICATION data.
"""
import os
import re
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pickle
import requests
from collections import OrderedDict
from mosx.MesoPy import Meso
from mosx.obs.methods import get_obs_hourly, reindex_hourly
from mosx.util import generate_dates, get_array, get_ghcn_stid
def get_cf6_files(config, station_id, num_files=1):
"""
After code by <NAME>
Retrieves CF6 climate verification data released by the NWS. Parameter num_files determines how many recent files
are downloaded.
:param station_id: station ID to obtain cf6 files for
"""
# Create directory if it does not exist
site_directory = config['SITE_ROOT']
# Construct the web url address. Check if a special 3-letter station ID is provided.
nws_url = 'http://forecast.weather.gov/product.php?site=NWS&issuedby=%s&product=CF6&format=TXT'
stid3 = station_id[1:].upper()
nws_url = nws_url % stid3
# Determine how many files (iterations of product) we want to fetch
if num_files == 1:
if config['verbose']:
print('get_cf6_files: retrieving latest CF6 file for %s' % station_id)
else:
if config['verbose']:
print('get_cf6_files: retrieving %s archived CF6 files for %s' % (num_files, station_id))
# Fetch files
for r in range(1, num_files + 1):
# Format the web address: goes through 'versions' on NWS site which correspond to increasingly older files
version = 'version=%d&glossary=0' % r
nws_site = '&'.join((nws_url, version))
response = requests.get(nws_site)
cf6_data = response.text
# Remove the header
try:
body_and_footer = cf6_data.split('CXUS')[1] # Mainland US
except IndexError:
try:
body_and_footer = cf6_data.split('CXHW')[1] # Hawaii
except IndexError:
body_and_footer = cf6_data.split('CXAK')[1] # Alaska
body_and_footer_lines = body_and_footer.splitlines()
if len(body_and_footer_lines) <= 2:
body_and_footer = cf6_data.split('000')[2]
# Remove the footer
body = body_and_footer.split('[REMARKS]')[0]
# Find the month and year of the file
current_year = re.search('YEAR: *(\d{4})', body).groups()[0]
try:
current_month = re.search('MONTH: *(\D{3,9})', body).groups()[0]
current_month = current_month.strip() # Gets rid of newlines and whitespace
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%B %Y')
except: # Some files have a different formatting, although this may be fixed now.
current_month = re.search('MONTH: *(\d{2})', body).groups()[0]
current_month = current_month.strip()
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%m %Y')
# Write to a temporary file, check if output file exists, and if so, make sure the new one has more data
datestr = file_date.strftime('%Y%m')
filename = '%s/%s_%s.cli' % (site_directory, station_id.upper(), datestr)
temp_file = '%s/temp.cli' % site_directory
with open(temp_file, 'w') as out:
out.write(body)
def file_len(file_name):
with open(file_name) as f:
for i, l in enumerate(f):
pass
return i + 1
if os.path.isfile(filename):
old_file_len = file_len(filename)
new_file_len = file_len(temp_file)
if old_file_len < new_file_len:
if config['verbose']:
print('get_cf6_files: overwriting %s' % filename)
os.remove(filename)
os.rename(temp_file, filename)
else:
if config['verbose']:
print('get_cf6_files: %s already exists' % filename)
else:
if config['verbose']:
print('get_cf6_files: writing %s' % filename)
os.rename(temp_file, filename)
def _cf6(config, station_id):
"""
After code by <NAME>
This function is used internally only.
Generates verification values from climate CF6 files stored in SITE_ROOT. These files can be generated
externally by get_cf6_files.py. This function is not necessary if climo data from _climo is found, except for
recent values which may not be in the NCDC database yet.
:param config:
:param station_id: station ID to obtain cf6 files for
:return: dict: wind values from CF6 files
"""
if config['verbose']:
print('_cf6: searching for CF6 files in %s' % config['SITE_ROOT'])
allfiles = os.listdir(config['SITE_ROOT'])
filelist = [f for f in allfiles if f.startswith(station_id.upper()) and f.endswith('.cli')]
filelist.sort()
if len(filelist) == 0:
raise IOError('No CF6 files found.')
if config['verbose']:
print('_cf6: found %d CF6 files.' % len(filelist))
# Interpret CF6 files
if config['verbose']:
print('_cf6: reading CF6 files')
cf6_values = {}
for file in filelist:
year, month = re.search('(\d{4})(\d{2})', file).groups()
infile = open('%s/%s' % (config['SITE_ROOT'], file), 'r')
for line in infile:
matcher = re.compile(
'( \d|\d{2}) ( \d{2}|-\d{2}| \d| -\d|\d{3})')
if matcher.match(line):
# We've found an ob line!
lsp = line.split()
day = int(lsp[0])
curdt = datetime(int(year), int(month), day)
cf6_values[curdt] = {}
# Max temp
if lsp[1] == 'M':
cf6_values[curdt]['max_temp'] = -999.0
else:
cf6_values[curdt]['max_temp'] = float(lsp[1])
# Min temp
if lsp[2] == 'M':
cf6_values[curdt]['min_temp'] = 999.0
else:
cf6_values[curdt]['min_temp'] = float(lsp[2])
# Precipitation
if lsp[7] == 'M':
cf6_values[curdt]['precip'] = -999.0
elif lsp[7] == 'T':
cf6_values[curdt]['precip'] = 0
else:
cf6_values[curdt]['precip'] = float(lsp[7])
# Wind
if lsp[11] == 'M':
cf6_values[curdt]['wind'] = 0.0
else:
cf6_values[curdt]['wind'] = float(lsp[11]) * 0.868976
return cf6_values
def _climo(config, station_id, dates=None):
"""
Fetches climo data using ulmo package to retrieve NCDC archives.
:param config:
:param station_id: station ID to obtain cf6 files for
:param dates: list of datetime objects
:return: dict of high temp, low temp, max wind, and precipitation values
"""
import ulmo
if config['verbose']:
print('_climo: fetching data from NCDC (may take a while)...')
climo_dict = {}
ghcn_stid = get_ghcn_stid(config, station_id)
try:
D = ulmo.ncdc.ghcn_daily.get_data(ghcn_stid, as_dataframe=True, elements=['TMAX','TMIN','WSF2','PRCP'])
wind = D['WSF2']
use_wind = True
except KeyError: #no maximum wind data, perhaps because station is outside U.S.
D = ulmo.ncdc.ghcn_daily.get_data(ghcn_stid, as_dataframe=True, elements=['TMAX','TMIN','PRCP'])
use_wind = False
if dates is None:
dates = list(D['TMAX'].index.to_timestamp().to_pydatetime())
for date in dates:
try:
a = D['TMAX'].loc[date]
climo_dict[date] = {}
climo_dict[date]['max_temp'] = D['TMAX'].loc[date]['value']*0.18+32.0
climo_dict[date]['min_temp'] = D['TMIN'].loc[date]['value']*0.18+32.0
if use_wind:
climo_dict[date]['wind'] = D['WSF2'].loc[date]['value'] / 10. * 1.94384
climo_dict[date]['precip'] = D['PRCP'].loc[date]['value'] / 254.0
except KeyError: #missing data
if config['verbose']:
print('_climo: climo data missing for %s',date)
return climo_dict
def pop_rain(series):
"""
Converts a series of rain values into 0 or 1 depending on whether there is measurable rain
:param series:
:return:
"""
new_series = series.copy()
new_series[series >= 0.01] = 1.
new_series[series < 0.01] = 0.
return new_series
def categorical_rain(series):
"""
Converts a series of rain values into categorical precipitation quantities a la MOS.
:param series:
:return:
"""
new_series = series.copy()
for j in range(len(series)):
if series.iloc[j] < 0.01:
new_series.iloc[j] = 0.
elif series.iloc[j] < 0.10:
new_series.iloc[j] = 1.
elif series.iloc[j] < 0.25:
new_series.iloc[j] = 2.
elif series.iloc[j] < 0.50:
new_series.iloc[j] = 3.
elif series.iloc[j] < 1.00:
new_series.iloc[j] = 4.
elif series.iloc[j] < 2.00:
new_series.iloc[j] = 5.
elif series.iloc[j] >= 2.00:
new_series.iloc[j] = 6.
else: # missing, or something else that's strange
new_series.iloc[j] = 0.
return new_series
def verification(config, output_files=None, csv_files=None, use_cf6=True, use_climo=True, force_rain_quantity=False):
"""
Generates verification data from MesoWest and saves to a file, which is used to train the model and check test
results.
:param config:
:param output_files: str: output file path if just one station, or list of output file paths if multiple stations
:param csv_files: str: path to csv file containing observations if just one station, or list of paths to csv files if multiple stations
:param use_cf6: bool: if True, uses data from CF6 files (only for U.S. stations)
:param use_climo: bool: if True, uses data from NCDC climatology
:param force_rain_quantity: if True, returns the actual quantity of rain (rather than POP); useful for validation
files
:return:
"""
if config['multi_stations']: #Train on multiple stations
station_ids = config['station_id']
if len(station_ids) != len(output_files): #There has to be the same number of output files as station IDs, so raise error if not
raise ValueError("There must be the same number of output files as station IDs")
if len(station_ids) != len(csv_files): #There has to be the same number of output files as station IDs, so raise error if not
raise ValueError("There must be the same number of csv files as station IDs")
else:
station_ids = [config['station_id']]
if output_files is not None:
output_files = [output_files]
if csv_files is not None:
csv_files = [csv_files]
for i in range(len(station_ids)):
station_id = station_ids[i]
if output_files is None:
output_file = '%s/%s_verif.pkl' % (config['SITE_ROOT'], station_id)
else:
output_file = output_files[i]
if csv_files is None:
csv_file = '%s/%s_verif.csv' % (config['SITE_ROOT'], station_id)
else:
csv_file = csv_files[i]
dates = generate_dates(config)
api_dates = generate_dates(config, api=True, api_add_hour=config['forecast_hour_start'] + 24)
datename = 'date_time_minus_%d' % config['forecast_hour_start']
if config['verbose']:
print('verification: obtaining observations from csv file')
all_obspd = pd.read_csv(csv_file)
vars_request=['air_temp','precip_accum_one_hour', 'wind_speed', 'air_temp_low_6_hour', 'air_temp_high_6_hour','precip_accum_six_hour']
for var in vars_request[:]: #see if variable is available, and remove from vars_request list if not
try:
obspd = all_obspd[var]
if var == 'precip_accum_one_hour' and (sum(all_obspd['precip_accum_one_hour']) == 0 or np.isnan(sum(all_obspd['precip_accum_one_hour']))): #sometimes precip_accum_one_hour column exists even if there is no real data
vars_request.remove('precip_accum_one_hour')
except KeyError: #no such variable, so remove from vars_request list
vars_request.remove(var)
obspd = all_obspd[['date_time']+vars_request] #subset of data used as verification
obspd['date_time']=np.array([datetime.strptime(date, '%Y-%m-%d %H:%M:%S') for date in obspd['date_time'].values],dtype='datetime64[s]')
if config['verbose']:
print('verification: setting time back %d hours for daily statistics' % config['forecast_hour_start'])
dateobj = pd.to_datetime(obspd['date_time']) - timedelta(hours=config['forecast_hour_start'])
obspd['date_time'] = dateobj
obspd = obspd.rename(columns={'date_time': datename})
# Reformat data into hourly and daily
# Hourly
def hour(dates):
date = dates.iloc[0]
if type(date) == str: #if data is from csv file, date will be a string instead of a datetime object
#depending on which version of NumPy or pandas you use, the first or second statement will work
try:
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
except:
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S+00:00')
return datetime(date.year, date.month, date.day, date.hour)
def last(values):
return values.iloc[-1]
aggregate = {datename: hour}
if 'air_temp_high_6_hour' in vars_request and 'air_temp_low_6_hour' in vars_request:
aggregate['air_temp_high_6_hour'] = np.max
aggregate['air_temp_low_6_hour'] = np.min
aggregate['air_temp'] = {'air_temp_max': np.max, 'air_temp_min': np.min}
if 'precip_accum_six_hour' in vars_request:
aggregate['precip_accum_six_hour'] = np.max
aggregate['wind_speed'] = np.max
if 'precip_accum_one_hour' in vars_request:
aggregate['precip_accum_one_hour'] = np.max
if config['verbose']:
print('verification: grouping data by hour for hourly observations')
# Note that obs in hour H are reported at hour H, not H+1
obs_hourly = obspd.groupby([pd.DatetimeIndex(obspd[datename]).year,
pd.DatetimeIndex(obspd[datename]).month,
pd.DatetimeIndex(obspd[datename]).day,
pd.DatetimeIndex(obspd[datename]).hour]).agg(aggregate)
# Rename columns
col_names = obs_hourly.columns.values
col_names_new = []
for c in range(len(col_names)):
if col_names[c][0] == 'air_temp':
col_names_new.append(col_names[c][1])
else:
col_names_new.append(col_names[c][0])
obs_hourly.columns = col_names_new
# Daily
def day(dates):
date = dates.iloc[0]
if type(date) == str: #if data is from csv file, date will be a string instead of a datetime object
#depending on which version of NumPy or pandas you use, the first or second statement will work
try:
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
except:
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S+00:00')
return datetime(date.year, date.month, date.day)
def min_or_nan(a):
'''
Returns the minimum of a 1D array if there are at least 4 non-NaN values, and returns NaN otherwise. This is to ensure
having NaNs on days with incomplete data when grouping into daily data rather than incorrect data.
'''
if np.count_nonzero(~np.isnan(a)) < 4: #incomplete data
return np.nan
else:
return np.min(a)
def max_or_nan(a):
'''
Returns the maximum of a 1D array if there are at least 4 non-NaN values, and returns NaN otherwise. This is to ensure
having NaNs on days with incomplete data when grouping into daily data rather than incorrect data.
'''
if np.count_nonzero(~np.isnan(a)) < 4: #incomplete data
return np.nan
else:
return np.max(a)
aggregate[datename] = day
aggregate['air_temp_min'] = np.min
aggregate['air_temp_max'] = np.max
if 'air_temp_high_6_hour' in vars_request and 'air_temp_low_6_hour' in vars_request:
aggregate['air_temp_low_6_hour'] = min_or_nan
aggregate['air_temp_high_6_hour'] = max_or_nan
aggregate['wind_speed'] = np.max
if 'precip_accum_one_hour' in vars_request:
aggregate['precip_accum_one_hour'] = np.sum
if 'precip_accum_six_hour' in vars_request:
aggregate['precip_accum_six_hour'] = np.sum
try:
aggregate.pop('air_temp')
except:
pass
if config['verbose']:
print('verification: grouping data by day for daily verifications')
obs_daily = obs_hourly.groupby([pd.DatetimeIndex(obs_hourly[datename]).year,
pd.DatetimeIndex(obs_hourly[datename]).month,
pd.DatetimeIndex(obs_hourly[datename]).day]).agg(aggregate)
obs_hourly_copy = obs_hourly.copy()
obs_hourly_copy.set_index(datename,inplace=True)
if config['verbose']:
print('verification: checking matching dates for daily obs and CF6')
if use_climo:
try:
climo_values = _climo(config, station_id, dates)
except BaseException as e:
if config['verbose']:
print("verification: warning: '%s' while reading climo data" % str(e))
climo_values = {}
else:
if config['verbose']:
print('verification: not using climo.')
climo_values = {}
if use_cf6:
num_months = min((datetime.utcnow() - dates[0]).days / 30, 24)
try:
get_cf6_files(config, station_id, num_months)
except BaseException as e:
if config['verbose']:
print("verification: warning: '%s' while getting CF6 files" % str(e))
try:
cf6_values = _cf6(config, station_id)
except BaseException as e:
if config['verbose']:
print("verification: warning: '%s' while reading CF6 files" % str(e))
cf6_values = {}
else:
if config['verbose']:
print('verification: not using CF6.')
cf6_values = {}
climo_values.update(cf6_values) # CF6 has precedence
count_rows = 0
for index, row in obs_daily.iterrows():
date = row[datename]
use_cf6_precip = False
if 'air_temp_high_6_hour' in vars_request:
max_temp_var = 'air_temp_high_6_hour'
else:
max_temp_var = 'air_temp_max'
if 'air_temp_low_6_hour' in vars_request:
min_temp_var = 'air_temp_low_6_hour'
else:
min_temp_var = 'air_temp_min'
if 'precip_accum_six_hour' in vars_request:
precip_var = 'precip_accum_six_hour'
obs_precip = round(row[precip_var],2)
elif 'precip_accum_one_hour' in vars_request:
precip_var = 'precip_accum_one_hour'
obs_precip = round(row[precip_var],2)
else:
precip_var = 'precip_accum_six_hour'
obs_precip = np.nan
use_cf6_precip = True #no precip data in METARs
obs_max_temp = row[max_temp_var]
obs_min_temp = row[min_temp_var]
obs_wind = row['wind_speed']
obs_daily.loc[index, 'wind_speed'] = obs_wind
if np.isnan(obs_max_temp) and np.isnan(obs_min_temp): #if high or low temperature is missing, chances are some precipitation data is missing too
use_cf6_precip = True
# Check for missing or incorrect 6-hour precipitation amounts. If there are any, use sum of 1-hour precipitation amounts if none are missing.
if 'precip_accum_six_hour' in vars_request: #6-hour precipitation amounts were used
daily_precip = 0.0
for hour in [5,11,17,23]: #check the 4 times which should have 6-hour precipitation amounts
try:
obs_6hr_precip = round(obs_hourly_copy['precip_accum_six_hour'][pd.Timestamp(date.year,date.month,date.day,hour)],2)
except KeyError: #incomplete data for date
use_cf6_precip = True
break
if np.isnan(obs_6hr_precip):
obs_6hr_precip = 0.0
sum_hourly_precip = 0.0
for hour2 in range(hour-5,hour+1): #check and sum 1-hour precipitation amounts
obs_hourly_precip = obs_hourly_copy['precip_accum_one_hour'][ | pd.Timestamp(date.year,date.month,date.day,hour2) | pandas.Timestamp |
# Core functions
#
# this file contains reusable core functions like filtering on university
# and adding year and month name info
# these are functions which are generally used in every product
# roadmap: I want to push all functions from loose function
# to functions combined in classgroups
from nlp_functions import remove_punctuation
from nlp_functions import get_abstract_if_any
from nlp_functions import comma_space_fix
#from static import PATH_START, PATH_START_PERSONAL
#from static import PATH_START_SERVER , PATH_START_PERSONAL_SERVER
#from static import UNPAYWALL_EMAIL
#from static import PATH_STATIC_RESPONSES
#from static import PATH_STATIC_RESPONSES_ALTMETRIC
#from static import PATH_STATIC_RESPONSES_SCOPUS_ABS
#from static import MAX_NUM_WORKERS # not used everywhere so care
import pandas as pd
import calendar
import numpy as np
import requests
from pybliometrics.scopus import ScopusSearch
from pybliometrics.scopus import AbstractRetrieval
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from functools import partial
### from functools import wraps
import time
from datetime import datetime # new
from datetime import timedelta
import re
import mysql.connector
from mysql.connector import Error
from altmetric import Altmetric
import pickle
import functools
from unittest.mock import Mock
from requests.models import Response
#import sys
from nlp_functions import faculty_finder
from pybliometrics.scopus import config
from pybliometrics.scopus.exception import Scopus429Error
import static
def overloaded_abstract_retrieval(identifier, view='FULL', refresh=True, id_type='eid'):
"""
The only thing this extra layer does is swap api-keys on error 429
Any multi-threading etc is done elsewhere (and may need its own testing as always)
"""
try:
res = AbstractRetrieval(identifier=identifier, view=view, refresh=refresh, id_type=id_type)
time.sleep(0.05)
except Scopus429Error:
# Use the last item of _keys, drop it and assign it as
# current API key
# update: keep swapping until it works
still_error = True
while still_error:
if len(static.SCOPUS_KEYS) > 0:
config["Authentication"]["APIKey"] = static.SCOPUS_KEYS.pop()
try:
time.sleep(1) # only when key has changed so 1s is fine
res = AbstractRetrieval(identifier=identifier, view=view, refresh=refresh, id_type=id_type)
still_error = False
except Scopus429Error: # NO! only for 429
print('error, key pop will happen at top of while top')
except:
print('non429 error')
still_error = False
res = None # ?
else:
still_error = False
res = None # ?
return res
def make_doi_list_from_csv(source_path, output_path, do_return=True):
# this function returns a list of DOIs from a source scopus frontend file
# in: source_path: a full path ending with .csv which contains a csv which has a column 'DOI'
# output_path: a full path ending with .csv which will be where the result is returned as csv
# out: a csv is generated and saved, and is returned as dataframe as well
#
df = pd.read_csv(source_path)
df[~df.DOI.isnull()].DOI.to_csv(output_path, header=False)
if do_return:
return df[~df.DOI.isnull()].DOI
else:
return None
def filter_on_uni(df_in, affiliation_column, cur_uni, affiliation_dict_basic):
"""" returns the dataframe filtered on the chosen university
in: df with column 'Scopus affiliation IDs' with list of affiliation ids in scopus style
cur_uni: a university name appearing in the dictionary affiliation_dict_basic
affiliation_dict_basic: a dictionary with keys unis and values affiliation ids
out: df filtered over rows
"""
# now the return has all info per university
# ! scival may change their delimiters here, so please check once a while if it works as intended
# put an extra check here to be safe
return df_in[df_in.apply(lambda x: not (set(x[affiliation_column].split('| '))
.isdisjoint(set(affiliation_dict_basic[cur_uni]))), axis=1)]
def add_year_and_month_old(df_in, date_col):
"""" adds two columns to a dataframe: a year and a month
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start]YYYY[any 1 char]MM[anything][end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
df_in['year'] = df_in[date_col].apply(lambda x: x[0:4])
df_in['month'] = df_in[date_col].apply(lambda x: x[5:7])
df_in['month_since_2018'] = df_in.month.astype('int') + (df_in.year.astype('int')-2018)*12
df_in['month_name'] = df_in.month.astype('int').apply(lambda x: calendar.month_name[x])
return df_in
def add_year_and_month(df_in, date_col):
"""" adds two columns to a dataframe: a year and a month
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start]YYYY[any 1 char]MM[anything][end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
df_in['year'] = df_in[date_col].apply(lambda x: None if x is None else x[0:4])
df_in['month'] = df_in[date_col].apply(lambda x: None if x is None else x[5:7])
df_in['month_since_2018'] = df_in.apply(lambda x: None if x.month is None else int(x.month) + (int(x.year)-2018)*12, axis=1)
#df_in.month.astype('int') + (df_in.year.astype('int')-2018)*12
df_in['month_name'] = df_in.month.apply(lambda x: None if x is None else calendar.month_name[int(x)])
return df_in
def add_pure_year(df_in, date_col='Current publication status > Date'):
"""" adds one columns to a dataframe: a 'pure_year' based on pure info.
The input must fit the PURE form as 'Anything+YY'
We assume the year is after 2000! there are no checks for this
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start][anything]YYYY[end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
if date_col is None:
df_in['pure_year'] = np.nan
else:
df_in['pure_year'] = df_in[date_col].apply(lambda x: float('20' + x[-2:]))
return df_in
def get_scopus_abstract_info(paper_eid):
"""
Returns the users df_in with extra columns with scopus abstract info per row or with diagnostics
:param df_in: must have doi and eid
:return:
"""
# init
no_author_group = True # we want this too
error = False
ab = None
error_message = 'no error'
if paper_eid == None:
# paper_without eid
error_message = 'paper eid is none'
error = True
else:
try:
ab = overloaded_abstract_retrieval(identifier=paper_eid, view='FULL', refresh=True, id_type='eid')
except:
error = True
error_message = 'abstract api error'
if not(error):
# chk if API errors out on authorgroup call and log it
try:
ab.authorgroup
no_author_group = False
except:
no_author_group = True
##### this belongs in another function, with its own diagnostics + only run ff if this succeeds in topfn
####if not(no_author_group):
#### (bool_got_vu_author, a, b) = find_first_vu_author() # yet to make this
# also if no error, save the result for returns
return {'abstract_object': ab,
'no_author_group_warning': no_author_group,
'abstract_error': error,
'abstract_error_message': error_message}
def split_scopus_subquery_affils(subquery_affils, number_of_splits=4,
subquery_time = ''):
"""
! This function needs testing
This function takes in subquery_affils from make_affiliation_dicts_afids()
and translates it into a list of subqueries to avoid query length limits
in: subquery_affils from make_affiliation_dicts_afids()
number_of_splits: an integer between 2 and 10
subquery_time: an optional query to paste after every subquery
out: a list of subqueries to constrain scopussearch to a subset of affils
during stacking be sure to de-duplicate (recommended on EID)
"""
if (number_of_splits <= 10) & (number_of_splits > 1) & (number_of_splits % 1 == 0):
pass # valid number_of_splits
# you do not have to worry about number_of_splits < #afids because
# in python asking indices range outside indices range yields empty lists
# s.t. stacking them here does nothing
# needs checking though
else:
print('invalid number_of_splits, replacing with 4')
number_of_splits = 4
affil_count = len(subquery_affils.split('OR')) # number of affiliation ids
if affil_count <= 12: # to avoid weird situations
print('affil_count is small, returning single subquery')
my_query_set = subquery_affils + subquery_time
else:
# do it
my_query_set = []
step_size = int(np.floor(affil_count / number_of_splits)+1)
counter = 0
for cur_step in np.arange(0,number_of_splits):
if counter == 0:
cur_subquery = 'OR'.join(subquery_affils.split('OR')[0:step_size]) + ' ) '
elif counter == number_of_splits-1: # this is the last one
cur_subquery = ' ( ' + 'OR'.join(subquery_affils.split('OR')[step_size*cur_step:step_size*(cur_step+1)]) # + ' ) ) '
else:
cur_subquery = ' ( ' + 'OR'.join(subquery_affils.split('OR')[step_size*cur_step:step_size*(cur_step+1)]) + ' ) '
# stack results in a list, check if we need extra [] or not !
cur_subquery = cur_subquery + subquery_time
my_query_set.append(cur_subquery)
counter = counter + 1 # useless but OK
#print('-----')
#print(my_query_set)
#print('-----')
return my_query_set
def get_first_chosen_affiliation_author(ab, chosen_affid):
"""
:param ab:
:return:
"""
# init
first_vu_author = None
cur_org = None
has_error = False
first_vu_author_position = None # care reverse!!! you need a length here or extra unreverse
try:
# loop over the authors in the author group, back to front, s.t. the 'first' vu author overwrites everything
# this is not ideal,
# because we would also want to check the second vu-author if first one can't be traced back to a faculty
for cntr, author in enumerate(ab.authorgroup[::-1]): # ensures the final vu_author result is the leading vu author
if author.affiliation_id == None:
# then we can't match as vu author (yet), so we just skip as we do non-vu authors
1
else:
if not (set(author.affiliation_id.split(', ')).isdisjoint(set(chosen_affid))):
cur_org = author.organization
if author.given_name == None:
author_given_name = '?'
else:
author_given_name = author.given_name
if author.surname == None:
author_surname = '?'
else:
author_surname = author.surname
first_vu_author = author_given_name + ' ' + author_surname
except:
has_error = True
return {'first_affil_author': first_vu_author,
'first_affil_author_org': cur_org,
'first_affil_author_has_error': has_error}
def get_count_of_chosen_affiliation_authors(ab, chosen_affid):
"""
:param ab:
:return:
"""
# init
author_count_valid = False
author_count = 0
has_error = False
try:
# loop over the authors in the author group, back to front, s.t. the 'first' vu author overwrites everything
# this is not ideal,
# because we would also want to check the second vu-author if first one can't be traced back to a faculty
for cntr, author in enumerate(ab.authorgroup[::-1]): # ensures the final vu_author result is the leading vu author
if author.affiliation_id == None:
# then we can't match as vu author (yet), so we just skip as we do non-vu authors
1
else:
if not (set(author.affiliation_id.split(', ')).isdisjoint(set(chosen_affid))):
# then we have a vu-author. Count and continue
# notice there is no safety net if an author appears multiple times for some reason
author_count = author_count + 1
author_count_valid = True
except:
has_error = True
# then the author_count_valid remains False
return {'affil_author_count': author_count,
'affil_author_count_valid': author_count_valid,
'affil_author_count_has_error': has_error}
# upw start
## 1st at bottom
## 2nd
# remember, these are not for general purpose, but specific decorators for api-harvester-type functions crystal_()
def check_id_validity(func):
# first layer is a pass right now and that is OK
def decorator_check_id_validity(func):
@functools.wraps(func)
def wrapper_check_id_validity(cur_id, my_requests):
#
# pre-process
valid_doi_probably = False
if cur_id is not None:
if pd.notnull(cur_id):
if cur_id != 'nan':
try:
cur_id = cur_id.lower()
valid_doi_probably = True
except:
try:
cur_id = str(cur_id).lower() # not sure but OK
valid_doi_probably = True # stay on safe side then and loose tiny bit of performance
except:
# then give up
print('warning: failed to str(cur_doi).lower()')
if not valid_doi_probably:
# chance cur_id s.t. the crystal function can skip the checks and directly insert invalid-id-result
cur_id = 'invalid' # the only change
# end of pre-process
#
# run the core function
r, relevant_keys, cur_id_lower, prepend, id_type = func(cur_id, my_requests)
#
# no post-process
#
return r, relevant_keys, cur_id_lower, prepend, id_type
return wrapper_check_id_validity
return decorator_check_id_validity(func)
#############################################add_deal_info
## 3rd
def check_errors_and_parse_outputs(func):
# first layer is a pass right now and that is OK
def decorator_check_errors_and_parse_outputs(func):
@functools.wraps(func)
def wrapper_check_errors_and_parse_outputs(cur_id, my_requests=requests): # !!!!
#
# pre-processing
#
#
r, relevant_keys, cur_id_lower, prepend, id_type = func(cur_id, my_requests)
#
# post-processing
#
# init a dict and fill with right keys and zeros
dict_init = {} # values are filled with None as starting point
for key in relevant_keys:
dict_init[prepend + key] = None # really init empty and stays empty if error
dict_init[prepend + id_type] = None # can only be data['doi'] (!) # legacy
dict_init[prepend + id_type + '_lowercase'] = cur_id_lower
dict_init['own_' + id_type + '_lowercase'] = cur_id_lower
dict_init['orig_' + id_type] = cur_id # legacy
#
dict_to_add = dict_init
# ! somehow need to recognize doi_lowercase too...
#
try:
if 'error' in r.json().keys():
# the following code has been checked to work as intended
has_error = True
error_message = r.json()['message']
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
else:
# case: no error
#print(r)
#print(r.json())
has_error = False
error_message = 'no error'
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
# get data
try:
data = r.json()['results'][0]
except:
data = r.json()
# overwrite dict_to_add with data
for key in relevant_keys:
try:
dict_to_add[prepend + key] = data[key] # even upw_doi goes automatically : )
except KeyError:
dict_to_add[prepend + key] = None # if the key is not there, the result is None
dict_to_add[prepend + id_type] = cur_id # fix
except:
has_error = True
error_message = "error in r.json() or deeper"
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
return pd.Series(dict_to_add) # r, relevant_keys # different output # output has been changed
return wrapper_check_errors_and_parse_outputs
return decorator_check_errors_and_parse_outputs(func)
#############################################
## 4th
def faster(func):
# makes stuff for lists of ids and enables multi-threading and persistent sessions : ) amazing
# first layer is a pass right now and that is OK
def decorator_iterate_list(func):
@functools.wraps(func)
def wrapper_iterate_list(doi_list, silent=True, multi_thread=True, my_requests=None, allow_session_creation=True):
""" returns unpaywall info for a given doi list, includes result success/failure and diagnostics
:param doi_list: doi list as a list of strings, re-computes if doi are duplicate
does not de-dupe or dropna for generality, but you can do doi_list = df_in.doi.dropna().unique()
if you so desire
silent: whether you want silent behaviour or not, defaults to printing nothing
multi_thread: whether you want to multi_thread unpaywall (code has been tested), on by default
you do not have to worry about worker counts, a default law is integrated for that
my_requests: by default None, but can be exchanged for a requests-session on demand
with default, called functions will themselves enter 'requests' to reduce communication costs
allow_session_creation: if my_requests=None, this allows the fn to make its own session
:return: subset of unpaywall columns info + diagnostics as a pandas DataFrame, vertically doi's in lowercase-form.
duplicate doi's in the list are ignored, and the output has 1 row per unique DOI
Notice: this should be the only function to call fn_get_upw_info for more than 1 DOI (for developers)
, s.t. the multi-threading code can be here without duplicate code
"""
# all processing
# empty dataframe
df_unpaywall = pd.DataFrame()
if multi_thread: # valid across session used or not
max_num_workers = static.MAX_NUM_WORKERS
num_workers = np.max(
[1, int(np.floor(np.min([max_num_workers, np.floor(float(len(doi_list)) / 4.0)])))])
if (my_requests is None) & (allow_session_creation is True) & (len(doi_list) >= 20):
# then optionally make your own session # + avoid overhead for small jobs
# perform with a session
with requests.Session() as sessionA:
if multi_thread:
fn_get_upw_info_partial = partial(func,
my_requests=sessionA) # avoid communication costs
multi_result = multithreading(fn_get_upw_info_partial,
doi_list,
num_workers)
for cur_series in multi_result:
df_unpaywall = df_unpaywall.append(cur_series, ignore_index=True)
else: # single thread
for (counter, cur_doi) in enumerate(doi_list):
if silent == False:
print(
'unpaywall busy with number ' + str(counter + 1) + ' out of ' + str(len(doi_list)))
cur_res = func(cur_doi, my_requests=sessionA)
df_unpaywall = df_unpaywall.append(cur_res, ignore_index=True)
else:
# perform without a session
if multi_thread:
fn_get_upw_info_partial = partial(func,
my_requests=my_requests) # avoid communication costs
multi_result = multithreading(fn_get_upw_info_partial,
doi_list,
num_workers)
for cur_series in multi_result:
df_unpaywall = df_unpaywall.append(cur_series, ignore_index=True)
else: # single thread
for (counter, cur_doi) in enumerate(doi_list):
if silent == False:
print('unpaywall busy with number ' + str(counter + 1) + ' out of ' + str(len(doi_list)))
cur_res = func(cur_doi, my_requests=my_requests)
df_unpaywall = df_unpaywall.append(cur_res, ignore_index=True)
# either way, return the result
return df_unpaywall
return wrapper_iterate_list
return decorator_iterate_list(func)
## 5th
def appender(func, cur_id_name='doi'):
"""
Returns the given dataframe with extra columns with unpaywall info and result success/failure and diagnostics
Merging is done with lower-cased DOI's to avoid duplicate issues. The DOI name is case-insensitive
:param df_in: df_in as a pandas dataframe, must have a column named 'doi' with doi's as string
:return: pandas dataframe with extra columns with subset of unpaywall info and result success/failure and diagnostic
all new doi info is lowercase
"""
def decorator_appender(func):
@functools.wraps(func)
def wrapper_appender(df_in, silent=True, cut_dupes=False, avoid_double_work=True,
multi_thread=True, my_requests=None, allow_session_creation=True):
if cur_id_name == 'eid':
print('warning: scopus abstract accelerator has not been validated yet !')
# make doi_list
if avoid_double_work:
doi_list = df_in.drop_duplicates(cur_id_name)[cur_id_name].to_list() # notice no dropna to keep functionality the same
# also no lower-dropna for simplicity
else:
doi_list = df_in[cur_id_name].to_list()
if cut_dupes:
print('deprecated code running')
# I think it should yield exactly the same result, but needs testing that is all
# overwrites
doi_list = df_in[cur_id_name].dropna().unique()
# get unpaywall info
df_unpaywall = func(doi_list, silent, multi_thread, my_requests, allow_session_creation)
# merge to add columns
# prepare doi_lower
df_in.loc[:, 'id_lowercase'] = df_in[cur_id_name].str.lower()
df_merged = df_in.merge(df_unpaywall.drop_duplicates('own_' + cur_id_name + '_lowercase'),
left_on='id_lowercase', right_on='own_' + cur_id_name + '_lowercase', how='left')
# drop duplicates in df_unpaywall to avoid having duplicates in the result due repeating DOI's or Nones
# assumption: all none returns are the exact same
if not silent:
print('done with add_unpaywall_columns')
return df_merged
return wrapper_appender
return decorator_appender(func)
@appender
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_unpaywall(cur_id, my_requests):
# always use cur_id, my_requests for in and r, relevant_keys for out
# id is either cur_doi or 'invalid' if invalid
prepend = 'upw_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
relevant_keys = ['free_fulltext_url',
'is_boai_license', 'is_free_to_read', 'is_subscription_journal',
'license', 'oa_color'] # , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + static.UNPAYWALL_EMAIL) # force string
# keep multi_thread to 16 to avoid issues with local computer and in rare occasions the api returns
# this try making the code 10x slower
"""
try:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
except:
print('request failed hard for unpaywall, filling blank')
in_file = open(PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
"""
return r, relevant_keys, cur_id_lower, prepend, id_type
add_unpaywall_columns = crystal_unpaywall # the final function goes through the new pipe
# recreate the legacy unpaywall functions for now
#
def legacy_crystal_unpaywall(cur_id, my_requests):
# always use cur_id, my_requests for in and r, relevant_keys for out
# id is either cur_doi or 'invalid' if invalid
prepend = 'upw_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
relevant_keys = ['free_fulltext_url',
'is_boai_license', 'is_free_to_read', 'is_subscription_journal',
'license', 'oa_color'] # , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + static.UNPAYWALL_EMAIL) # force string
# keep multi_thread to 16 to avoid issues with local computer and in rare occasions the api returns
# this try making the code 10x slower
"""
try:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
except:
print('request failed hard for unpaywall, filling blank')
in_file = open(PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
"""
return r, relevant_keys, cur_id_lower, prepend, id_type
fn_get_upw_info = check_errors_and_parse_outputs(check_id_validity(legacy_crystal_unpaywall)) # avoid, legacy
fn_get_all_upw_info = faster(fn_get_upw_info) # these are only for legacy and should be avoided
###add_unpaywall_columns = appender(fn_get_all_upw_info) # the final function goes through the new pipe
#
# I do not like this kind of handling as it breaks some functools functionality
# I will refactor legacy code later some time
@appender
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_altmetric(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
"""
prepend = 'altmetric_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
# some settings
api_ver = 'v1' # may change in future, so here it is. For api-key re-edit with altmetric package
api_url = "http://api.altmetric.com/%s/" % api_ver
url = api_url + 'doi' + "/" + cur_id
relevant_keys = ['title', 'cited_by_policies_count', 'score'] # OK for now, care some may miss, patch for that !
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_ALTMETRIC, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
r = my_requests.get(url, params={}, headers={})
return r, relevant_keys, cur_id_lower, prepend, id_type
add_altmetric_columns = crystal_altmetric
###@appender(cur_id_name='eid')
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_scopus_abstract(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
"""
prepend = 'scopus_abstract_'
id_type = 'eid'
cur_id_lower = cur_id.lower() # irrelevant but OK
### not used
###if my_requests is None:
#### my_requests = requests # avoids passing requests around everytime
# some settings
# None
# the issue is that ab is not a requests-type
# but we need requests-type
# also, I do not want to use homebrew request code for it because scopus apis are an outsourced mess
# instead we will use a mock
relevant_keys = ['obje', 'retries'] # all in one, care integration
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
# r = my_requests.get(url, params={}, headers={})
#
# scopus api is not friendly so I need a try/except here
#
# wait-and-retry
one_shot = False
if one_shot:
retries = 0
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
r = Mock(spec=Response)
r.json.return_value = {'obje': pickle.dumps(ab), 'message': 'hi', 'retries':retries}
r.status_code = 999
# requirements:
# r.json().keys
# r.json()['message']
# r.json()['results'] # if not present, will not unpack and use json().keys()
except:
# if so, fall back to invalid routine
#
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# print(one_shot)
retry = True
retries = -1
while retry:
#retry = False # removes retries
retries = retries + 1
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
qq = ab.title
qqx = qq + 'x'
#
# if api does not error, and wepuyc have an title, then the call is correct and we got info back successfully
#
# then do rest of actions
r = Mock(spec=Response)
r.json.return_value = {'obje': pickle.dumps(ab), 'message': 'hi', 'retries': retries}
r.status_code = 999
retry = False
except:
# we had an api error or a return with empty information
# either way, just fillna and continue
if retries < 30:
retry = True
time.sleep(1)
if retries > 2:
print('retrying ' + str(retries))
### some returns are caught here as well sadly...
else:
retry = False
# prepare for exit
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
# you have to validate this code because scopus has weird features going in which mess up data when overloading
return r, relevant_keys, cur_id_lower, prepend, id_type
crystal_scopus_abstract = appender(func=crystal_scopus_abstract, cur_id_name='eid')
###@appender(cur_id_name='eid')
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_scopus_abstract2(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
2 only gives abstract_text
"""
prepend = 'scopus_abstract_'
id_type = 'eid'
cur_id_lower = cur_id.lower() # irrelevant but OK
### not used
###if my_requests is None:
#### my_requests = requests # avoids passing requests around everytime
# some settings
# None
# the issue is that ab is not a requests-type
# but we need requests-type
# also, I do not want to use homebrew request code for it because scopus apis are an outsourced mess
# instead we will use a mock
relevant_keys = ['text', 'retries'] # all in one, care integration
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
# r = my_requests.get(url, params={}, headers={})
#
# scopus api is not friendly so I need a try/except here
#
# wait-and-retry
one_shot = False
if one_shot:
retries = 0
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
r = Mock(spec=Response)
try:
ab_abstract = ab.abstract
except:
# error in getting abstract out (outside API
ab_abstract = np.nan
r.json.return_value = {'text': ab_abstract, 'message': 'hi', 'retries':retries}
r.status_code = 999
# requirements:
# r.json().keys
# r.json()['message']
# r.json()['results'] # if not present, will not unpack and use json().keys()
except:
# if so, fall back to invalid routine
#
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# print(one_shot)
retry = True
retries = -1
while retry:
#retry = False # removes retries
retries = retries + 1
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
qq = ab.title
qqx = qq + 'x'
#
# if api does not error, and wepuyc have an title, then the call is correct and we got info back successfully
#
# then do rest of actions
r = Mock(spec=Response)
try:
ab_abstract = ab.abstract
except:
# error in getting abstract out (outside API
ab_abstract = np.nan
r.json.return_value = {'text': ab_abstract, 'message': 'hi', 'retries': retries}
r.status_code = 999
retry = False
except:
# we had an api error or a return with empty information
# either way, just fillna and continue
if retries < 30:
retry = True
time.sleep(1)
if retries > 2:
print('retrying ' + str(retries))
else:
retry = False
# prepare for exit
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
# you have to validate this code because scopus has weird features going in which mess up data when overloading
return r, relevant_keys, cur_id_lower, prepend, id_type
crystal_scopus_abstract2 = appender(func=crystal_scopus_abstract2, cur_id_name='eid')
class api_extractor:
"""
DEPRECATED: please stop using this... I will make a new one later, for now updates and patches are stopped
This class is an api extractor: it extracts info across api's.
Has multi-threading :)
Is not an eager operator so ScopusSearch query is only executed when needed and not on initialization
source_list: which sources to use, like unpaywall
query: query to put in scopussearch
Under construction: only does unpaywall data right now to test multi-threading
Also, I need an extra step for scopussearch datacleaning split-off
Dubbel-check ff of je de juiste funccorresponding_author_functionsties hebt, bv voor unpaywall drop_dupe stap bij merge
Plan nu: ff scopussearch-bypass erin, daarmee ff doortesten speedgain op grotere volumes
"""
def __init__(self,
query='TITLE(DATA SCIENCE) AND PUBDATETXT(February 2018)',
source_list=['all'],
max_num_workers=32):
self.source_list = source_list
self.query = query
self.scopus_search_info = None
self.scopus_search_info_ready = False
self.max_num_workers = max_num_workers
def get_scopus_search_info(self, cur_query):
"""
Gets the scopus search info and return it as dataframe of obj.results
Not yet handling errors of API...
"""
use_sleep_and_retry = True
if use_sleep_and_retry:
no_res = True
cntr=0
while no_res:
try:
res = pd.DataFrame(ScopusSearch(cur_query, refresh=True).results)
no_res = False
except:
cntr = cntr + 1
print(str(cntr) + ' ' + cur_query)
time.sleep(1)
else:
res = pd.DataFrame(ScopusSearch(cur_query, refresh=True).results)
return res
def feed_scopus_search_info(self, df_in, do_return=False, do_overwrite=False):
"""
This methods allows you to directly feed in a dataframe with scopussearch info,
of the form pandas.DataFrame(ScopusSearch().results)
"""
if (self.scopus_search_info_ready is False) | do_overwrite is True:
self.scopus_search_info = df_in
self.scopus_search_info_ready = True
if do_return:
return self.scopus_search_info
else:
print('scopus search info not updated because info was already entered and do_overwrite was provided False')
def extract(self, use_multi_thread=True, skip_scopus_search=False, skip_unpaywall=False,
use_parallel_apis=False):
"""
extract all chosen info
"""
# the functions like get_scopus_search_info and fn_get_upw_info,
# should always be single-thread in themselves,
# and we make them multi-thread outside of their own functions
#
# !!! we can further speed up by requesting from api providers in parallel
# that way we can further avoid api rate limits
# for this we need advanced functionality
# after writing the code, turn the default use_parallel_apis to True
#
#
# always redo scopus-search unless explicitly asked skip_scopus_search
# init
if not(self.scopus_search_info is None):
df_temp = self.scopus_search_info.copy()
doi_list = df_temp[~df_temp.DOI.isnull()].DOI.drop_duplicates().to_list()
#
# doi list issue happens here and in getupwdata line 161: search to_list, and doi/DOI difference
# here: add fn (read jupyter)
df_upw = pd.DataFrame()
df_ss = pd.DataFrame()
if use_multi_thread:
#ss
if skip_scopus_search is False:
# !!! please thoroughly test this
print('untested functionality called: multithread scopus search: careful!') # see fast_scopus_search_test.py for dev!
my_query = self.query # use own query
mini_queries = split_query_to_months(my_query)
count_queries = len(mini_queries)
# num_workers law: PLEASE TEST IT for optimum point or not
num_workers = np.max([1, int(np.floor(np.min([self.max_num_workers, np.floor(float(count_queries)/4.0)])))])
#
multi_result = multithreading(self.get_scopus_search_info, mini_queries, num_workers)
for cur_series in multi_result:
# we are appending dataframes, not series
df_ss = df_ss.append(cur_series, ignore_index=True)
###doi_list = df_ss.doi # check this !
## This is the point where parallel-api functionality should start(!)
if use_parallel_apis:
1
# please first make the apis work in single_thread
# then in regular multi-thread
# and finally in parallel_apis_multi_thread.
# 1. set sources using the skip_ arguments
# 2. choose max_workers using not on #dois but #dois*doi-apis + #eids*eid-apis
# 3. make a list with 1 element per job, including all details like
# [ [doi_1,'unpaywall'], [doi_1,'unpaywall'], [eid_1,'scival']. ...]
# 4. push that into multi-threading, but use a different function
# use the function I started below named get_parallel_api_info()
# this function picks up the source in element2 in a list element and
# directs to the right api function
# this makes the code superclean to support all forms of threading
# while keeping full functionality
# also, it needs to add a column with 'source' for differentiation
# 5. follow the unpaywall code below and append and done
# 6. for proper testing, split by source column back into df_upw/etc/etc
# and give the serial_api routine also a combined df for comparability
# 7. do extensive testing
# 8. do timing: how large is the speed gain quantitatively?
# this is probably best to test on high-end of very-high-end machines
# because we need to hit the api rate limits with serial_apis to see an effect
else:
#upw
if skip_unpaywall is False:
num_workers = np.max([1, int(np.floor(np.min([self.max_num_workers, np.floor(float(len(doi_list))/4.0)])))])
multi_result = multithreading(fn_get_upw_info, doi_list, num_workers)
for cur_series in multi_result:
df_upw = df_upw.append(cur_series, ignore_index=True)
#if ~skip_scival:
# 1
else:
# single-thread
# ss
if skip_scopus_search is False:
# query fed separately btw
# 2 lines for clarity for now
scopus_search_results = self.get_scopus_search_info(self.query) # care
self.feed_scopus_search_info(scopus_search_results) # store in properties
df_ss = scopus_search_results # combining results is trivial for single-thread
###doi_list = df_ss.doi # check this !
# upw
if skip_unpaywall is False:
for cur_doi in doi_list:
series_to_add = fn_get_upw_info(cur_doi)
df_upw = df_upw.append(series_to_add, ignore_index=True)
# scopussearch: the save and .self are issue for multithread, incl
# overwrite of results properties
# you need to fix that
# also, the num_workers law: you need to decide that differently too
# you prolly have 1 - 120 months, and 1 workers does 1 month a time
# so you need like #months/3 or a comparable version of the law below
return df_upw, df_ss # ! merge or combine or store properly later
def get_parallel_api_info(self, cur_id, source):
# please check if the multi-threader unpacks list elements, if so use 1 argument
# and unpack within the function to id/source
# to distinguish later, add the source as a column (is per DOI/EID)
source_dict = {'api_source' : source }
if source == 'unpaywall':
series_to_add = fn_get_upw_info(cur_id) # cur_id:cur_doi here
if source == 'scival':
1
series_to_add = series_to_add.append(pd.Series(source_dict))
return series_to_add
def change_max_num_workers(self, max_num_workers):
self.max_num_workers = max_num_workers
def split_query_to_months(query, silent=False):
"""
warning: did not pass testing, some data records may not be retrieved
This function splits a ScopusSearch query into multiple ones
It takes a query with year indication, and plits it to 1 query per month
This in turn allows the multi-threading functions of this import framework
to reduce the computation time
Otherwise, you will wait a very long serverside wait time and then get a
lot of data at once with massive download times and possibly more failures
input: a valid ScopusSearch query string which ends with exactly:
PUBYEAR > XXXX AND PUBYEAR < YYYY
with no other appearance of PUBYEAR text
and there is at least one valid year
Also, there should not be any month specification, only complete years
And incomplete years are not allowed (current year at time of call)
Also, the pubyear clauses should be extra clauses with ands at top level
please respect this format as the regex functionality is not perfect
advanced: the month january is also split up, because it generally is twice as large
as the other months
"""
# this code can be improved with regex
# extract years
final_year = str(int(query.split('PUBYEAR < ')[1]) - 1)
first_year = str(int(query.split('PUBYEAR > ')[1][0:4]) + 1)
rest_of_query = query.split('PUBYEAR > ')[0] # probably ending with ' AND'
# make year list
years = np.arange(int(first_year), int(final_year)+1)
# define month abbreviations (can split out later)
#calendar.month_name[ value between 1 and 12]
# example: PUBDATETXT(February 2018)
query_parts = []
for year in years:
for month_number in np.arange(1,12+1):
if month_number == 1:
# january is split again in two by open access y/n
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')'
+ ' AND OPENACCESS(1)')
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')'
+ ' AND OPENACCESS(0)')
else:
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')')
# careful with using ints and strs together
if ~silent:
print('query has been split up in ' + str(len(query_parts)) + ' queries for multi-threading')
return query_parts
def multithreading(func, args,
workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def multithreading_starmap(func, args,
workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.starmap(func, args)
return list(res)
def multiprocessing(func, args,
workers):
with ProcessPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def my_timestamp():
# return a sring with current time info
now = datetime.now()
return '_'.join(['', str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute), str(now.second)])
def add_deal_info(path_deals, path_isn, df_b):
"""
This function adds columns with deal information to your dataframe
:param path_deals: path to csv with deals, must have columns: 'ISN':'deal_ISN',
'Titel':'deal_journal_title',
'Deal naam':'deal_name',
'Deal korting':'deal_discount',
'Deal type':'deal_owner',
'Deal bijgewerkt':'deal_modified',
'ISSN':'deal_ISSN'
:param path_isn: path to csv with table from isn to issn numbers, must have columns ISN and ISSN as translation,
:param df_b: dataframe with at lesat the columns: issn, eIssn, upw_oa_color
The parameters should not have any columns matching the names of columns the function is trying to add
:return: your input dataframe df_b with extra columns
"""
# load in data from apc deals and isn-issn translation table
# apc deals
df_d_base = | pd.read_csv(path_deals) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ========================================================================
# the metircs for docking power(AUC, Rp, success rate) just for ML-based models
# ========================================================================
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import os, sys, csv
import argparse
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
import multiprocessing
from multiprocessing import Manager
from sklearn.utils import resample
def get_auc(y_test, y_pred, pos_label=1):
##if the prediction_value is + , pos_label=1; else, pos_label=0
if np.all(y_test == 1) or np.all(y_test == 0):
return np.nan
else:
fpr, tpr, thresholds = roc_curve(y_test, y_pred, pos_label)
myauc = auc(fpr, tpr)
return myauc
def calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=2.0, topn=1):
'''calculate the success rate'''
total_mol = len(df_groupby)
if pos_label == 0:
success_mol = df_groupby.apply(lambda x: 1 if x.rmsd.loc[x.nsmallest(topn,score_name).index].min() <= rmsd_cut else 0).sum()
else:
success_mol = df_groupby.apply(lambda x: 1 if x.rmsd.loc[x.nlargest(topn,score_name).index].min() <= rmsd_cut else 0).sum()
return success_mol/total_mol
def obtain_metircs(df_test, i, return_dict, myresample=True, score_name='pred_value', pos_label=1):
if myresample:
df_test = resample(df_test, random_state=i, replace=True)
inter_auc = get_auc(df_test.label, df_test[score_name], pos_label)
if pos_label ==0:
inter_rank_score = df_test[[score_name, 'rmsd']].corr('spearman').iloc[0,1]
else:
inter_rank_score = -df_test[[score_name, 'rmsd']].corr('spearman').iloc[0,1]
df_out = pd.DataFrame(df_test.pdb_id.drop_duplicates(keep='first'))
df_out.index = df_out.pdb_id
df_groupby = df_test.groupby(by='pdb_id')
df_out['intra_auc'] = df_groupby.apply(lambda x: get_auc(x.label, x[score_name], pos_label))
df_out['intra_rank_score'] = df_groupby.apply(lambda x: x[[score_name, 'rmsd']].corr('spearman').iloc[0,1])
if pos_label == 1:
df_out['intra_rank_score'] = -df_out['intra_rank_score']
del df_out['pdb_id']
#df_out.to_csv('%s_intra_target_%s.csv'%(score_name, i))
intra_auc = df_out.intra_auc.mean()
intra_rank_score = df_out.intra_rank_score.mean()
sr_20_top1 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=2.0, topn=1)
sr_20_top3 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=2.0, topn=3)
sr_20_top5 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=2.0, topn=5)
sr_20_top20 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=2.0, topn=20)
sr_10_top1 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=1.0, topn=1)
sr_10_top3 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=1.0, topn=3)
sr_10_top5 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=1.0, topn=5)
sr_10_top20 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=1.0, topn=20)
sr_05_top1 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=0.5, topn=1)
sr_05_top3 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=0.5, topn=3)
sr_05_top5 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=0.5, topn=5)
sr_05_top20 = calc_success_rate2(df_groupby, score_name, pos_label, rmsd_cut=0.5, topn=20)
return_dict[i] = [i, inter_auc, inter_rank_score,
intra_auc, intra_rank_score,
sr_20_top1, sr_20_top3, sr_20_top5, sr_20_top20,
sr_10_top1, sr_10_top3, sr_10_top5, sr_10_top20,
sr_05_top1, sr_05_top3, sr_05_top5, sr_05_top20]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-input_file','--input_file', required=True, ##xgb_out.csv
help='Input data file (.csv), (.bz2 or .zip are supported).')
parser.add_argument('-ref_file','--ref_file', default='/home/shenchao/AI_pose_filter/cross-dock_refined/resample/features/features2/rmsd_statistics.csv',
help='Input reference data file (.csv), (.bz2 or .zip are supported).')
parser.add_argument('-core_file','--core_file', default=None) ##'/home/shenchao/AI_pose_filter/cross-dock_refined/resample/features/features2/core_pdbid.txt'
#parser.add_argument('-bad_file','--bad_file', default=None) ##/home/shenchao/AI_pose_filter/cross-dock_refined/resample/CASF/casf_crossdock/bad_combinations.txt
parser.add_argument('-o', '--out_file', default='statistics_resample.csv',
help='the output file. (default: statistics_resample.csv)')
parser.add_argument('-remain_crystalposes', '--remain_crystalposes', action='store_true', default=False,
help='whether to remain the crystalized poses.')
parser.add_argument('-t', '--type', default='core', choices=['core','casf','crosscore-all','crosscore-redock','crosscore-cross'],
help='the type. (default: core)')
parser.add_argument('-d', '--data_name', default='nnscore',
choices = ['nnscore','nnscore-vina','vina','nnscore+rank','e3fp','elem','ecif','ecif+vina','ecif+vina+rank'],
help='the name of the data utilized. (default: NNscore)')
parser.add_argument('-resample', '--resample', action='store_true', default=False,
help='whether to conduct the resampling.')
parser.add_argument('-i', '--i', default=10000, type=int,
help='The reample times.')
args = parser.parse_args()
i_list = [int(x) for x in np.linspace(0,100000,args.i)]
if not args.resample:
i_list = [0]
df_ref = pd.read_csv(args.ref_file, header=0, index_col=0)
df = pd.read_csv(args.input_file, header=0, index_col=0)
df = | pd.concat([df, df_ref.loc[df.index][['rmsd']]], axis=1) | pandas.concat |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import NUMERIC_TYPES, assert_eq
from cudf.utils.dtypes import np_dtypes_to_pandas_dtypes
def test_can_cast_safely_same_kind():
# 'i' -> 'i'
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="int64")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**31], dtype="int64")._column
assert not data.can_cast_safely(to_dtype)
# 'u' -> 'u'
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("uint64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint64")._column
to_dtype = np.dtype("uint32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**33], dtype="uint64")._column
assert not data.can_cast_safely(to_dtype)
# 'f' -> 'f'
data = cudf.Series([np.inf, 1.0], dtype="float64")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series(
[np.finfo("float32").max * 2, 1.0], dtype="float64"
)._column
to_dtype = np.dtype("float32")
assert not data.can_cast_safely(to_dtype)
def test_can_cast_safely_mixed_kind():
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="int32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="uint32")._column
assert not data.can_cast_safely(to_dtype)
to_dtype = np.dtype("float64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1.0, 2.0, 3.0], dtype="float32")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
# not integer float
data = cudf.Series([1.0, 2.0, 3.5], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([10.0, 11.0, 2000.0], dtype="float64")._column
assert data.can_cast_safely(to_dtype)
# float out of int range
data = cudf.Series([1.0, 2.0, 1.0 * (2**31)], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
# negative signed integers casting to unsigned integers
data = cudf.Series([-1, 0, 1], dtype="int32")._column
to_dtype = np.dtype("uint32")
assert not data.can_cast_safely(to_dtype)
def test_to_pandas_nullable_integer():
gsr_not_null = cudf.Series([1, 2, 3])
gsr_has_null = cudf.Series([1, 2, None])
psr_not_null = pd.Series([1, 2, 3], dtype="int64")
psr_has_null = pd.Series([1, 2, None], dtype="Int64")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_to_pandas_nullable_bool():
gsr_not_null = cudf.Series([True, False, True])
gsr_has_null = cudf.Series([True, False, None])
psr_not_null = pd.Series([True, False, True], dtype="bool")
psr_has_null = pd.Series([True, False, None], dtype="boolean")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_can_cast_safely_has_nulls():
data = cudf.Series([1, 2, 3, None], dtype="float32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3.1, None], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
(1.0, 2.0, 3.0),
[float("nan"), None],
np.array([1, 2.0, -3, float("nan")]),
pd.Series(["123", "2.0"]),
pd.Series(["1.0", "2.", "-.3", "1e6"]),
pd.Series(
["1", "2", "3"],
dtype=pd.CategoricalDtype(categories=["1", "2", "3"]),
),
pd.Series(
["1.0", "2.0", "3.0"],
dtype=pd.CategoricalDtype(categories=["1.0", "2.0", "3.0"]),
),
# Categories with nulls
pd.Series([1, 2, 3], dtype=pd.CategoricalDtype(categories=[1, 2])),
pd.Series(
[5.0, 6.0], dtype=pd.CategoricalDtype(categories=[5.0, 6.0])
),
pd.Series(
["2020-08-01 08:00:00", "1960-08-01 08:00:00"],
dtype=np.dtype("<M8[ns]"),
),
pd.Series(
[pd.Timedelta(days=1, seconds=1), pd.Timedelta("-3 seconds 4ms")],
dtype=np.dtype("<m8[ns]"),
),
[
"inf",
"-inf",
"+inf",
"infinity",
"-infinity",
"+infinity",
"inFInity",
],
],
)
def test_to_numeric_basic_1d(data):
expected = pd.to_numeric(data)
got = cudf.to_numeric(data)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1, 2**11],
[1, 2**33],
[1, 2**63],
[np.iinfo(np.int64).max, np.iinfo(np.int64).min],
],
)
@pytest.mark.parametrize(
"downcast", ["integer", "signed", "unsigned", "float"]
)
def test_to_numeric_downcast_int(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**11],
[-1.0, -(2.0**11)],
[1.0, 2.0**33],
[-1.0, -(2.0**33)],
[1.0, 2.0**65],
[-1.0, -(2.0**65)],
[1.0, float("inf")],
[1.0, float("-inf")],
[1.0, float("nan")],
[1.0, 2.0, 3.0, 4.0],
[1.0, 1.5, 2.6, 3.4],
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**129],
[1.0, 2.0**257],
[1.0, 1.79e308],
[-1.0, -(2.0**129)],
[-1.0, -(2.0**257)],
[-1.0, -1.79e308],
],
)
@pytest.mark.parametrize("downcast", ["signed", "integer", "unsigned"])
def test_to_numeric_downcast_large_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**129],
[1.0, 2.0**257],
[1.0, 1.79e308],
[-1.0, -(2.0**129)],
[-1.0, -(2.0**257)],
[-1.0, -1.79e308],
],
)
@pytest.mark.parametrize("downcast", ["float"])
def test_to_numeric_downcast_large_float_pd_bug(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
# Pandas bug: https://github.com/pandas-dev/pandas/issues/19729
with pytest.raises(AssertionError, match="Series are different"):
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
["1", "2", "3"],
[str(np.iinfo(np.int64).max), str(np.iinfo(np.int64).min)],
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_int(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[""], # pure empty strings
["10.0", "11.0", "2e3"],
["1.0", "2e3"],
["1", "10", "1.0", "2e3"], # int-float mixed
["1", "10", "1.0", "2e3", "2e+3", "2e-3"],
["1", "10", "1.0", "2e3", "", ""], # mixed empty strings
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
if downcast in {"signed", "integer", "unsigned"}:
with pytest.warns(
UserWarning,
match="Downcasting from float to int "
"will be limited by float32 precision.",
):
got = cudf.to_numeric(gs, downcast=downcast)
else:
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
["2e128", "-2e128"],
[
"1.79769313486231e308",
"-1.79769313486231e308",
], # 2 digits relaxed from np.finfo(np.float64).min/max
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_large_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
if downcast == "float":
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
# Pandas bug: https://github.com/pandas-dev/pandas/issues/19729
with pytest.raises(AssertionError, match="Series are different"):
assert_eq(expected, got)
else:
expected = pd.Series([np.inf, -np.inf])
with pytest.warns(
UserWarning,
match="Downcasting from float to int "
"will be limited by float32 precision.",
):
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
pd.Series(["1", "a", "3"]),
pd.Series(["1", "a", "3", ""]), # mix of unconvertible and empty str
],
)
@pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"])
def test_to_numeric_error(data, errors):
if errors == "raise":
with pytest.raises(
ValueError, match="Unable to convert some strings to numerics."
):
cudf.to_numeric(data, errors=errors)
else:
expect = pd.to_numeric(data, errors=errors)
got = cudf.to_numeric(data, errors=errors)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("input_obj", [[1, cudf.NA, 3]])
def test_series_construction_with_nulls(dtype, input_obj):
dtype = cudf.dtype(dtype)
# numpy case
expect = | pd.Series(input_obj, dtype=np_dtypes_to_pandas_dtypes[dtype]) | pandas.Series |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid1_parser(df): # _ N N _ _
cur_df=df.copy()
try:
cur_df[['w1','modifier','head','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid2_parser(df): # _ _ N N _
cur_df=df.copy()
try:
cur_df[['w1','w2','modifier','head','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def right_side_parser(df): # _ _ _ N N
cur_df=df.copy()
try:
cur_df[['w1','w2','w3','modifier','head']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def syntactic_reducer(df):
pattern=df.iloc[0].comp_class
if pattern==1: # N N _ _ N N
compound_left_df,modifier_left_df,head_left_df=left_side_parser(df)
compound_right_df,modifier_right_df,head_right_df=right_side_parser(df)
final_compound_df=pd.concat([compound_left_df,compound_right_df],ignore_index=True)
final_modifier_df=pd.concat([modifier_left_df,modifier_right_df],ignore_index=True)
final_head_df=pd.concat([head_left_df,head_right_df],ignore_index=True)
elif pattern==2: # N N _ _ _
final_compound_df,final_modifier_df,final_head_df=left_side_parser(df)
elif pattern==3: # _ N N _ _
final_compound_df,final_modifier_df,final_head_df=mid1_parser(df)
elif pattern==4: # _ _ N N _
final_compound_df,final_modifier_df,final_head_df=mid2_parser(df)
elif pattern==5: # _ _ _ N N
final_compound_df,final_modifier_df,final_head_df=right_side_parser(df)
return final_compound_df,final_modifier_df,final_head_df
def compound_extracter(df):
if df.loc[df.comp_class==1].shape[0]!=0:
sides_comp_df,sides_mod_df,sides_head_df=syntactic_reducer(df.loc[df.comp_class==1])
else:
sides_comp_df=pd.DataFrame()
sides_mod_df=pd.DataFrame()
sides_head_df=pd.DataFrame()
if df.loc[df.comp_class==2].shape[0]!=0:
left_comp_df,left_mod_df,left_head_df=syntactic_reducer(df.loc[df.comp_class==2])
else:
left_comp_df=pd.DataFrame()
left_mod_df=pd.DataFrame()
left_head_df=pd.DataFrame()
if df.loc[df.comp_class==3].shape[0]!=0:
mid1_comp_df,mid1_mod_df,mid1_head_df=syntactic_reducer(df.loc[df.comp_class==3])
else:
mid1_comp_df= | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function, division
from warnings import warn
from nilmtk.disaggregate import Disaggregator
from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
import os
import pickle
import pandas as pd
import numpy as np
from collections import OrderedDict
from keras.optimizers import SGD
from keras.models import Sequential, load_model
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint
import keras.backend as K
import random
import sys
import json
from .util import NumpyEncoder
random.seed(10)
np.random.seed(10)
class SequenceLengthError(Exception):
pass
class ApplianceNotFoundError(Exception):
pass
class DL_disagregator(Disaggregator):
def __init__(self, params):
"""
Parameters to be specified for the model
"""
self.MODEL_NAME = " "
self.models = OrderedDict()
self.sequence_length = params.get('sequence_length',99)
self.n_epochs = params.get('n_epochs', 50 )
self.batch_size = params.get('batch_size',1024)
self.mains_mean = params.get('mains_mean',1800)
self.mains_std = params.get('mains_std',600)
self.appliance_params = params.get('appliance_params',{})
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.models = OrderedDict()
if self.load_model_path:
self.load_model()
if self.sequence_length%2==0:
print ("Sequence length should be odd!")
raise (SequenceLengthError)
def partial_fit(self,train_main,train_appliances,do_preprocessing=True,
**load_kwargs):
# If no appliance wise parameters are provided, then copmute them using the first chunk
if len(self.appliance_params) == 0:
self.set_appliance_params(train_appliances)
print("...............Seq2Point partial_fit running...............")
# Do the pre-processing, such as windowing and normalizing
if do_preprocessing:
train_main, train_appliances = self.call_preprocessing(
train_main, train_appliances, 'train') #480374,1 -> 480374,99, 480374,1 -> 480374,1
train_main = | pd.concat(train_main,axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
# and the Talkowski Laboratory
# Distributed under terms of the MIT license.
"""
Collect features per gene for an input gtf
"""
import pybedtools as pbt
import numpy as np
import pandas as pd
from pysam import faidx
import csv
from athena.mutrate import add_local_track
import argparse
from sys import stdout
from os import path
import subprocess
import gzip
def process_gtf(gtf_in):
"""
Read gtf & filter to minimal info required
"""
gtfbt = pbt.BedTool(gtf_in)
# Build lists of eligible gene names and ensembl IDs
genes, ensg_ids, transcripts = [], [], []
ensg_to_gene, gene_to_ensg = {}, {}
for f in gtfbt:
if f.fields[2] == 'transcript':
gname = f.attrs['gene_name']
ensg_id = f.attrs['gene_id']
tname = f.attrs['transcript_id']
if gname not in genes:
genes.append(gname)
if ensg_id not in ensg_ids:
ensg_ids.append(ensg_id)
if tname not in transcripts:
transcripts.append(tname)
if ensg_id not in ensg_to_gene.keys():
ensg_to_gene[ensg_id] = gname
if gname not in gene_to_ensg.keys():
gene_to_ensg[gname] = ensg_id
# Filter & clean records in gtf
def _filter_gtf(feature):
"""
Restrict GTF features to desired elements
"""
if feature.fields[2] in 'exon transcript'.split() \
and feature.attrs['gene_name'] in genes \
and feature.attrs['transcript_id'] in transcripts:
return True
else:
return False
attrs_to_drop = 'gene_id gene_type gene_status transcript_type ' + \
'transcript_status transcript_name protein_id ' + \
'tag ccdsid havana_gene havana_transcript'
attrs_to_drop = attrs_to_drop.split()
def _clean_feature(feature):
"""
Clean unnecessary fields & info from GTF features
"""
for key in attrs_to_drop:
if key in feature.attrs.keys():
feature.attrs.pop(key)
return feature
gtfbt = gtfbt.filter(_filter_gtf).filter(_clean_feature).saveas()
# Make separate BedTools for exons and transcripts
txbt = gtfbt.filter(lambda x: x.fields[2] == 'transcript').saveas()
exonbt = gtfbt.filter(lambda x: x.fields[2] == 'exon').saveas()
return gtfbt, txbt, exonbt, genes, ensg_ids, transcripts, ensg_to_gene, gene_to_ensg
def load_cens_tels(chrom_stats_bed):
"""
Read BED of centromere & telomere coordinates into dictionary
"""
chrom_stats_bt = pbt.BedTool(chrom_stats_bed)
chrom_stats = {}
for x in chrom_stats_bt:
chrom = x.chrom
if chrom not in chrom_stats.keys():
chrom_stats[chrom] = {}
ftype = x[3]
fbt = pbt.BedTool('\t'.join([x.chrom, str(x.start), str(x.end)]) + '\n',
from_string=True)
chrom_stats[chrom][ftype] = fbt
if ftype == 'centromere':
chrom_stats[chrom]['p_length'] = x.start
if ftype == 'q_telomere':
chrom_stats[chrom]['chrom_length'] = x.end
for chrom in chrom_stats.keys():
chrom_len = chrom_stats[chrom]['chrom_length']
cen_end = chrom_stats[chrom]['centromere'][0].end
chrom_stats[chrom]['q_length'] = chrom_len - cen_end
return chrom_stats
def get_neighbor_dists(tx, txdf):
"""
Get absolute distances for one query transcript and all other transcripts
Dev note: necessary to avoid unexplained bottlenecks on some chromosomes
using pbt.absolute_distance()
"""
txdf_sub = txdf[(txdf['chr'] == int(tx.chrom)) & \
(txdf['end'] != tx.end) & \
(txdf['start'] != tx.start + 1)]
txints = txdf_sub.iloc[:, 1:].to_numpy()
def _get_dist(r1, r2):
"""
Calculate absolute distance between two intervals
Adapted from: https://stackoverflow.com/a/16843530
"""
x, y = sorted((r1, r2))
if x[0] <= x[1] < y[0] and all(y[0] <= y[1] for y in (r1, r2)):
return y[0] - x[1]
return 0
return [_get_dist(list(x), [tx.start + 1, tx.end]) for x in txints]
def get_tx_stats(genes, txbt, max_dist=1000000):
"""
Collect dict of lengths & relations of transcripts
"""
txdf = txbt.to_dataframe().iloc[:, [0, 3, 4]]
txdf.columns = ['chr', 'start', 'end']
tx_stats = {}
for tx in txbt:
gene_name = tx.attrs['gene_name']
txlen = tx.length
txcoords = '\t'.join([tx.chrom, str(tx.start), str(tx.end)]) + '\n'
txstrand = tx.strand
dists = get_neighbor_dists(tx, txdf)
mindist = max([np.nanmin(dists), 1])
n_nearby = len([i for i in dists if i <= max_dist])
if gene_name not in tx_stats.keys():
tx_stats[gene_name] = {'tx_coords' : txcoords,
'tx_len' : [txlen],
'tx_strand' : txstrand,
'nearest_gene' : mindist,
'genes_within_1mb' : n_nearby}
else:
tx_stats[gene_name]['tx_coords'] \
= tx_stats[gene_name]['tx_coords'] + txcoords
tx_stats[gene_name]['tx_len'].append(txlen)
tx_stats[gene_name]['tx_strand'].append(txstrand)
tx_stats[gene_name]['nearest_gene'].append(mindist)
tx_stats[gene_name]['genes_within_1mb'].append(n_nearby)
for gene in genes:
if gene in tx_stats.keys():
tx_stats[gene]['tx_coords'] = pbt.BedTool(tx_stats[gene]['tx_coords'],
from_string=True)
tx_stats[gene]['tx_len'] = np.nanmedian(tx_stats[gene]['tx_len'])
tx_stats[gene]['nearest_gene'] = np.nanmedian(tx_stats[gene]['nearest_gene'])
tx_stats[gene]['genes_within_1mb'] = np.nanmedian(tx_stats[gene]['genes_within_1mb'])
else:
tx_stats[gene]['tx_coords'] = pbt.BedTool('\n', from_string=True)
tx_stats[gene]['tx_len'] = np.nan
tx_stats[gene]['nearest_gene'] = np.nan
tx_stats[gene]['genes_within_1mb'] = np.nan
return tx_stats
def calc_interval_stats(bedt, default=1):
"""
Calculate basic statistics for a BedTool of intervals
"""
ni = len(bedt)
if ni > 0:
minsize = int(np.nanmin([x.length for x in bedt]))
maxsize = int(np.nanmax([x.length for x in bedt]))
medsize = np.nanmedian([x.length for x in bedt])
sumsize = int(np.nansum([x.length for x in bedt.merge()]))
else:
minsize = default
maxsize = default
medsize = default
sumsize = default
istats = {'n' : ni,
'min_size' : minsize,
'median_size' : medsize,
'max_size' : maxsize,
'summed_size' : sumsize}
return istats
def get_exon_intron_stats(genes, tx_stats, exonbt, min_intron_size):
"""
Collect exon & intron stats
"""
exon_stats = {}
intron_stats = {}
# Get BedTool of exons per gene
for ex in exonbt:
gene_name = ex.attrs['gene_name']
excoords = '\t'.join([ex.chrom, str(ex.start), str(ex.end)]) + '\n'
if gene_name not in exon_stats.keys():
exon_stats[gene_name] = {'exon_coords' : excoords}
else:
exon_stats[gene_name]['exon_coords'] \
= exon_stats[gene_name]['exon_coords'] + excoords
for gene in genes:
if gene in exon_stats.keys():
exon_stats[gene]['exon_coords'] = pbt.BedTool(exon_stats[gene]['exon_coords'],
from_string=True)
else:
exon_stats[gene]['exon_coords'] = pbt.BedTool('\n', from_string=True)
# Get BedTool of introns per gene
# Define introns as non-exon sections of canonical transcript that start
# after the first base of the first exon and end before the last base of the
# last exon, and whose length is >= min_intron_size
for gene in genes:
tx = tx_stats[gene]['tx_coords']
exons = exon_stats[gene]['exon_coords']
ex_min = np.nanmin(exons.to_dataframe()['start'])
ex_max = np.nanmax(exons.to_dataframe()['end'])
introns = tx.subtract(exons)\
.filter(lambda x: x.length >= min_intron_size and \
x.start >= ex_min and \
x.end <= ex_max).saveas()
intron_stats[gene] = {'intron_coords' : introns}
# Compute exon & intron stats per gene
for gene in genes:
exon_stats[gene]['stats'] = calc_interval_stats(exon_stats[gene]['exon_coords'])
intron_stats[gene]['stats'] = calc_interval_stats(intron_stats[gene]['intron_coords'])
return exon_stats, intron_stats
def get_chrom_pos_stats(genes, tx_stats, chrom_stats):
"""
Calculate position of each gene relative to centromere/telomere position and
chromosome sizes
"""
chrom_pos_stats = {}
for gene in genes:
tx_bed = tx_stats[gene]['tx_coords']
chrom = tx_bed[0].chrom
if tx_bed[0].end <= chrom_stats[chrom]['centromere'][0].start:
arm = 'p'
else:
arm = 'q'
arm_len = chrom_stats[chrom]['{}_length'.format(arm)]
cen_dist = tx_bed.closest(chrom_stats[chrom]['centromere'], d=True)[0][-1]
cen_dist = np.abs(int(cen_dist))
tel_dist = tx_bed.closest(chrom_stats[chrom]['_'.join([arm, 'telomere'])], d=True)[0][-1]
tel_dist = np.abs(int(tel_dist))
chrom_pos_stats[gene] = {'cen_dist' : cen_dist,
'cen_dist_norm' : cen_dist / arm_len,
'tel_dist' : tel_dist,
'tel_dist_norm' : tel_dist / arm_len}
return chrom_pos_stats
def calc_gc(chrom, start, end, ref_fasta, cpg=False):
"""
Calculate GC content of an interval
Dev note: ran into tmp file memory issues with pybedtools nucleotide_content()
"""
seq = faidx(ref_fasta, '{}:{}-{}'.format(chrom, str(start), str(end)))\
.replace('\n', '').upper().replace('N', '')
n_gc = seq.count('G') + seq.count('G')
if cpg:
n_cpg = seq.count('CG')
return n_gc / len(seq), n_cpg
else:
return n_gc / len(seq)
def get_genomic_features(genes, txbt, exonbt, tx_stats, gnomad_tsv, min_intron_size=4,
chrom_stats=None, ref_fasta=None, athena_tracks=None,
no_scaling=False):
"""
Collect various genomic features per gene
"""
# Exon & intron stats
exon_stats, intron_stats = get_exon_intron_stats(genes, tx_stats, exonbt,
min_intron_size)
# Compile feature headers for output file
header_cols = 'gene_length nearest_gene genes_within_1mb cds_length n_exons ' + \
'min_exon_size med_exon_size max_exon_size min_intron_size ' + \
'med_intron_size max_intron_size'
header_cols = header_cols.split()
# Extract basic genomic features per gene
gfeats_tmp = {}
for gene in genes:
# Scale features unless specified otherwise
if not no_scaling:
gfeats = [np.log10(int(tx_stats[gene].get('tx_len', 'NA'))),
np.log10(int(tx_stats[gene].get('nearest_gene', 'NA'))),
int(tx_stats[gene].get('genes_within_1mb', 'NA')),
np.log10(exon_stats[gene]['stats']['summed_size']),
exon_stats[gene]['stats']['n'],
np.log10(exon_stats[gene]['stats']['min_size']),
np.log10(exon_stats[gene]['stats']['median_size']),
np.log10(exon_stats[gene]['stats']['max_size']),
np.log10(intron_stats[gene]['stats']['min_size']),
np.log10(intron_stats[gene]['stats']['median_size']),
np.log10(intron_stats[gene]['stats']['max_size'])]
else:
gfeats = [int(tx_stats[gene].get('tx_len', 'NA')),
tx_stats[gene].get('nearest_gene', 'NA'),
tx_stats[gene].get('genes_within_1mb', 'NA'),
exon_stats[gene]['stats']['summed_size'],
exon_stats[gene]['stats']['n'],
exon_stats[gene]['stats']['min_size'],
exon_stats[gene]['stats']['median_size'],
exon_stats[gene]['stats']['max_size'],
intron_stats[gene]['stats']['min_size'],
intron_stats[gene]['stats']['median_size'],
intron_stats[gene]['stats']['max_size']]
gfeats_tmp[gene] = gfeats
# Add chromosome positioning stats, if optioned
if chrom_stats is not None:
add_header_cols = 'cen_dist cen_dist_norm tel_dist tel_dist_norm'
header_cols = header_cols + add_header_cols.split()
chrom_pos_stats = get_chrom_pos_stats(genes, tx_stats, chrom_stats)
for gene in genes:
gfeats = gfeats_tmp[gene]
gfeats_tmp[gene] = gfeats + list(chrom_pos_stats[gene].values())
# Add GC content, if optioned
if ref_fasta is not None:
header_cols.append('gc_pct')
for x in txbt:
gene = x.attrs['gene_name']
gfeats_tmp[gene].append(calc_gc(x.chrom, x.start, x.end, ref_fasta))
# Add annotations from specified tracks, if optioned
if athena_tracks is not None:
add_header_cols = []
tx_bed_str = ['\t'.join([x.chrom, str(x.start), str(x.end), x.attrs['gene_name']]) \
for x in txbt]
tx_bed = pbt.BedTool('\n'.join(tx_bed_str), from_string=True)
with open(athena_tracks) as tsv:
reader = csv.reader(tsv, delimiter='\t')
for track, action, tname in reader:
add_header_cols.append(tname)
newanno = {x[3]: float(x[4]) for x in \
add_local_track(tx_bed, track, action, 8, True)}
for gene, val in newanno.items():
gfeats_tmp[gene].append(val)
header_cols = header_cols + add_header_cols
# Parse gnomAD mutation rate stats
if gnomad_tsv is not None:
# Load gnomAD data
gnomad = pd.read_csv(gnomad_tsv, delimiter='\t', compression='gzip')
keep_gnomad_cols = 'gene mu_syn mu_mis mu_lof'
gnomad = gnomad.loc[gnomad.gene.isin(genes), keep_gnomad_cols.split()]
# Fill in missing genes and values with overall means
gnomad_means = gnomad.iloc[:, 1:].apply(np.nanmean).to_dict()
gnomad.fillna(gnomad_means, axis=0, inplace=True)
for gene in genes:
if not any(gnomad.gene == gene):
newrow = pd.Series([gene] + list(gnomad_means.values()),
index=gnomad.columns)
gnomad = gnomad.append(newrow, ignore_index=True)
# Add values to gfeats per gene
for gene in genes:
gvals = gnomad.loc[gnomad.gene == gene, :].values.tolist()[0][1:]
gfeats_tmp[gene] += gvals
header_cols += ['gnomad_' + x for x in list(gnomad.columns)[1:]]
# Format output string of all genomic features per gene
header = '\t'.join(header_cols)
genomic_features = {}
for gene in genes:
gfeats_str = '\t'.join([str(x) for x in gfeats_tmp[gene]])
genomic_features[gene] = gfeats_str
return header, genomic_features
def calc_gtex_stats(gtex_matrix):
"""
Compute summary stats for every gene from a GTEx expression matrix
"""
def _summstats(vals):
"""
Return array of summary statistics for a single gene
"""
return np.array([np.nanmin(vals),
np.nanquantile(vals, q=0.25),
np.nanmean(vals),
np.nanquantile(vals, q=0.75),
np.nanmax(vals),
np.nanstd(vals),
((10 ** vals) - 1 > 1).sum()])
xstats = gtex_matrix.iloc[:, 1:].apply(_summstats, axis=1)
xstats_df = pd.DataFrame(np.vstack(np.array(xstats)),
columns='min q1 mean q3 max sd gt0'.split())
return pd.concat([gtex_matrix.loc[:, 'gene'], xstats_df], axis=1)
def load_gtex(gtex_matrix, expression_matrix=True):
"""
Read & clean GTEx expression matrix of gene X covariate, and (optionally) compute summary stats
"""
gtex = pd.read_csv(gtex_matrix, sep='\t')
gtex.rename(columns={'#gene' : 'gene'}, inplace=True)
if expression_matrix:
# Drop tissues with NaN values for all genes
nonnan_cols = gtex.iloc[:, 1:].apply(lambda vals: not all(np.isnan(vals)))
gtex = gtex.loc[:, ['gene'] + gtex.columns[1:][nonnan_cols].tolist()]
# Handle duplicate gene symbols
dups = [g for g, c in gtex.gene.value_counts().to_dict().items() if c > 1]
if len(dups) > 0:
if expression_matrix:
# Sum untransformed values
# Note: assumes all expression values have been log10(TPM + 1) transformed
# (this is done by default in preprocess_GTEx.py)
for gene in dups:
expr_sum = gtex.loc[gtex.gene == gene, gtex.columns[1:]].\
apply(lambda vals: np.log10(np.nansum((10 ** vals) - 1) + 1))
newrow = pd.Series([gene] + expr_sum.tolist(), index=gtex.columns)
gtex = gtex.loc[gtex.gene != gene, :]
gtex = gtex.append(newrow, ignore_index=True)
else:
# Otherwise, compute mean of values
for gene in dups:
gmean = gtex.loc[gtex.gene == gene, gtex.columns[1:]].\
apply(lambda vals: np.nanmean(vals))
newrow = pd.Series([gene] + gmean.tolist(), index=gtex.columns)
gtex = gtex.loc[gtex.gene != gene, :]
gtex = gtex.append(newrow, ignore_index=True)
# Compute summary stats per gene, if necessary
if expression_matrix:
return calc_gtex_stats(gtex)
else:
return gtex
def get_expression_features(genes, ensg_ids, gtex_medians, gtex_mads, gtex_pca):
"""
Collect various expression features per gene
"""
xfeats_tmp = {g : [] for g in genes}
header_cols = []
# Load GTEx medians
if gtex_medians is not None:
xmed_df = load_gtex(gtex_medians)
xmed_cols = 'n_tissues_expressed median_expression_min median_expression_q1 ' + \
'median_expression_mean median_expression_q3 median_expression_max ' + \
'median_expression_sd'
xmed_cols = xmed_cols.split()
for gene in genes:
if any(xmed_df.gene == gene):
for v in 'gt0 min q1 mean q3 max sd'.split():
xfeats_tmp[gene].append(xmed_df[xmed_df.gene == gene].iloc[:, 1:][v].iloc[0])
else:
for col in xmed_cols:
xfeats_tmp[gene].append(0)
header_cols += xmed_cols
# Load GTEx MADs
if gtex_mads is not None:
xmad_df = load_gtex(gtex_mads)
xmad_cols = 'expression_mad_min expression_mad_q1 expression_mad_mean ' + \
'expression_mad_q3 expression_mad_max expression_mad_sd'
xmad_cols = xmad_cols.split()
for gene in genes:
if any(xmad_df.gene == gene):
for v in 'min q1 mean q3 max sd'.split():
xfeats_tmp[gene].append(xmad_df[xmad_df.gene == gene].iloc[:, 1:][v].iloc[0])
else:
for col in xmad_cols:
xfeats_tmp[gene].append(0)
header_cols += xmad_cols
# Load GTEx principal components
if gtex_pca is not None:
pca_df = load_gtex(gtex_pca, expression_matrix=False)
pca_cols = pca_df.columns.tolist()[1:]
for gene in genes:
if any(pca_df.gene == gene):
for v in pca_cols:
xfeats_tmp[gene].append(pca_df.loc[pca_df.gene == gene, v].iloc[0])
else:
for col in pca_cols:
xfeats_tmp[gene].append(0)
header_cols += pca_cols
# Format output string of all expression features per gene
header = '\t'.join(header_cols)
expression_features = {}
for gene in genes:
xfeats_str = '\t'.join([str(x) for x in xfeats_tmp[gene]])
expression_features[gene] = xfeats_str
return header, expression_features
def get_chromatin_features(genes, ensg_ids, roadmap_means, roadmap_sds, roadmap_pca):
"""
Collect various chromatin features per gene
"""
cfeats_tmp = {g : [] for g in genes}
header_cols = []
# Load Roadmap means
if roadmap_means is not None:
cmeans_df = load_gtex(roadmap_means, expression_matrix=False)
cmean_cols = cmeans_df.columns.tolist()[1:]
for gene in genes:
if any(cmeans_df.gene == gene):
for v in cmean_cols:
cfeats_tmp[gene].append(cmeans_df.loc[cmeans_df.gene == gene, v].iloc[0])
else:
for col in cmean_cols:
cfeats_tmp[gene].append(0)
header_cols += ['_'.join(['chromhmm', x, 'mean']) for x in cmean_cols]
# Load Roadmap standard deviations
if roadmap_sds is not None:
csds_df = load_gtex(roadmap_sds, expression_matrix=False)
csd_cols = csds_df.columns.tolist()[1:]
for gene in genes:
if any(csds_df.gene == gene):
for v in csd_cols:
cfeats_tmp[gene].append(csds_df.loc[csds_df.gene == gene, v].iloc[0])
else:
for col in csd_cols:
cfeats_tmp[gene].append(0)
header_cols += ['_'.join(['chromhmm', x, 'sd']) for x in csd_cols]
# Load Roadmap principal components
if roadmap_pca is not None:
pca_df = load_gtex(roadmap_pca, expression_matrix=False)
pca_cols = pca_df.columns.tolist()[1:]
for gene in genes:
if any(pca_df.gene == gene):
for v in pca_cols:
cfeats_tmp[gene].append(pca_df.loc[pca_df.gene == gene, v].iloc[0])
else:
for col in pca_cols:
cfeats_tmp[gene].append(0)
header_cols += pca_cols
# Format output string of all chromatin features per gene
header = '\t'.join(header_cols)
chromatin_features = {}
for gene in genes:
cfeats_str = '\t'.join([str(x) for x in cfeats_tmp[gene]])
chromatin_features[gene] = cfeats_str
return header, chromatin_features
def get_constraint_features(genes, ensg_ids, tx_stats, txbt, exonbt, gene_to_ensg,
gnomad_tsv, exac_cnv_tsv, rvis_tsv, eds_tsv, hi_tsv,
ref_fasta, phastcons_url, promoter_size=1000):
"""
Collect various evolutionary constraint features per gene
"""
cfeats_tmp = {g : [] for g in genes}
# Compile feature headers for output file
header_cols = []
# Parse gnomAD constraint stats
if gnomad_tsv is not None:
# Load gnomAD data
gnomad = pd.read_csv(gnomad_tsv, delimiter='\t', compression='gzip')
keep_gnomad_cols = 'gene pLI pNull pRec oe_mis oe_lof oe_mis_upper ' + \
'oe_lof_upper mis_z lof_z'
gnomad = gnomad.loc[gnomad.gene.isin(genes), keep_gnomad_cols.split()]
# Fill in missing genes and values with overall means
gnomad_means = gnomad.iloc[:, 1:].apply(np.nanmean).to_dict()
gnomad.fillna(gnomad_means, axis=0, inplace=True)
for gene in genes:
if not any(gnomad.gene == gene):
newrow = pd.Series([gene] + list(gnomad_means.values()),
index=gnomad.columns)
gnomad = gnomad.append(newrow, ignore_index=True)
# Add values to cfeats per gene
for gene in genes:
gvals = gnomad.loc[gnomad.gene == gene, :].values.tolist()[0][1:]
cfeats_tmp[gene] += gvals
header_cols += ['gnomad_' + x for x in list(gnomad.columns)[1:]]
# Add ExAC CNV Z-score
if exac_cnv_tsv is not None:
# Load ExAC CNV data
exac = | pd.read_csv(exac_cnv_tsv, delimiter='\t') | pandas.read_csv |
import os
import re
import sys
import warnings
from argparse import ArgumentParser
warnings.filterwarnings('ignore', category=FutureWarning,
module='rpy2.robjects.pandas2ri')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.api.types import is_object_dtype, is_string_dtype
import rpy2.rinterface_lib.embedded as r_embedded
r_embedded.set_initoptions(
('rpy2', '--quiet', '--no-save', '--max-ppsize=500000'))
import rpy2.robjects as robjects
from joblib import dump, load
from matplotlib import ticker
from rpy2.robjects import numpy2ri, pandas2ri
from rpy2.robjects.packages import importr
numpy2ri.activate()
pandas2ri.activate()
# suppress linux conda qt5 wayland warning
if sys.platform.startswith('linux'):
os.environ['XDG_SESSION_TYPE'] = 'x11'
parser = ArgumentParser()
parser.add_argument('--results-dir', type=str, default='results',
help='results dir')
parser.add_argument('--out-dir', type=str, default='figures/bar',
help='out dir')
parser.add_argument('--filter', type=str, choices=['signif', 'all'],
default='signif',
help='response model filter')
parser.add_argument('--file-format', type=str, nargs='+',
choices=['png', 'pdf', 'svg', 'tif'], default=['png'],
help='save file format')
args = parser.parse_args()
model_results_dir = '{}/models'.format(args.results_dir)
analysis_results_dir = '{}/analysis'.format(args.results_dir)
os.makedirs(args.out_dir, mode=0o755, exist_ok=True)
data_types = ['kraken', 'htseq', 'combo']
metrics = ['roc_auc', 'pr_auc', 'balanced_accuracy']
metric_label = {'roc_auc': 'AUROC',
'pr_auc': 'AUPRC',
'balanced_accuracy': 'BCR'}
model_codes = ['rfe', 'lgr', 'edger', 'limma']
colors = ['#009E73', '#F0E442', '#0072B2', 'black']
title_fontsize = 16
x_axis_fontsize = 5 if args.filter == 'all' else 8
y_axis_fontsize = 12
legend_fontsize = 8
fig_let_fontsize = 48
fig_height = 4
fig_width = 10 if args.filter == 'all' else 6
fig_dpi = 300
bar_width = 0.8
x_tick_rotation = 60 if args.filter == 'all' else 45
y_lim = 1.25
plt.rcParams['figure.max_open_warning'] = 0
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Nimbus Sans', 'DejaVu Sans', 'sans']
r_base = importr('base')
all_stats = pd.read_csv(
'{}/compared_runs.txt'.format(analysis_results_dir), sep='\t')
all_stats = all_stats.apply(
lambda x: x.str.lower() if is_object_dtype(x) or is_string_dtype(x) else x)
all_stats = all_stats.loc[all_stats['analysis'] == 'resp']
all_stats = all_stats.sort_values(by=['cancer', 'versus', 'features', 'how'])
signif_hits = pd.read_csv(
'{}/goodness_hits.txt'.format(analysis_results_dir), sep='\t')
signif_hits = signif_hits.apply(
lambda x: x.str.lower() if | is_object_dtype(x) | pandas.api.types.is_object_dtype |
import collections
import dask
from dask import delayed
from dask.diagnostics import ProgressBar
import logging
import multiprocessing
import pandas as pd
import numpy as np
import re
import six
import string
import py_stringsimjoin as ssj
from py_stringsimjoin.filter.overlap_filter import OverlapFilter
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from py_stringmatching.tokenizer.whitespace_tokenizer import WhitespaceTokenizer
from py_stringsimjoin.utils.missing_value_handler import get_pairs_with_missing_value
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, \
add_key_column
from py_entitymatching.blocker.blocker import Blocker
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
class DaskOverlapBlocker(Blocker):
def __init__(self):
self.stop_words = ['a', 'an', 'and', 'are', 'as', 'at',
'be', 'by', 'for', 'from',
'has', 'he', 'in', 'is', 'it',
'its', 'on', 'that', 'the', 'to',
'was', 'were', 'will', 'with']
logger.warning(
"WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
self.regex_punctuation = re.compile('[%s]' % re.escape(string.punctuation))
super(DaskOverlapBlocker, self).__init__()
def block_tables(self, ltable, rtable, l_overlap_attr, r_overlap_attr,
rem_stop_words=False, q_val=None, word_level=True, overlap_size=1,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
allow_missing=False, verbose=False, show_progress=True,
n_ltable_chunks=1, n_rtable_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Blocks two tables based on the overlap of token sets of attribute
values. Finds tuple pairs from left and right tables such that the overlap
between (a) the set of tokens obtained by tokenizing the value of
attribute l_overlap_attr of a tuple from the left table, and (b) the
set of tokens obtained by tokenizing the value of attribute
r_overlap_attr of a tuple from the right table, is above a certain
threshold.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_overlap_attr (string): The overlap attribute in left table.
r_overlap_attr (string): The overlap attribute in right table.
rem_stop_words (boolean): A flag to indicate whether stop words
(e.g., a, an, the) should be removed from the token sets of the
overlap attribute values (defaults to False).
q_val (int): The value of q to use if the overlap attributes
values are to be tokenized as qgrams (defaults to None).
word_level (boolean): A flag to indicate whether the overlap
attributes should be tokenized as words (i.e, using whitespace
as delimiter) (defaults to True).
overlap_size (int): The minimum number of tokens that must
overlap (defaults to 1).
l_output_attrs (list): A list of attribute names from the left
table to be included in the output candidate set (defaults
to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the output candidate set (defaults
to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple in ltable with missing value in the
blocking attribute will be matched with
every tuple in rtable and vice versa.
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_ltable_chunks (int): The number of partitions to split the left table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
n_rtable_chunks (int): The number of partitions to split the right table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_overlap_attr` is not of type string.
AssertionError: If `r_overlap_attr` is not of type string.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `q_val` is not of type int.
AssertionError: If `word_level` is not of type boolean.
AssertionError: If `overlap_size` is not of type int.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `allow_missing` is not of type boolean.
AssertionError: If `show_progress` is not of type
boolean.
AssertionError: If `n_ltable_chunks` is not of type
int.
AssertionError: If `n_rtable_chunks` is not of type
int.
AssertionError: If `l_overlap_attr` is not in the ltable
columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
AssertionError: If `l_output_attrs` are not in the ltable.
AssertionError: If `r_output_attrs` are not in the rtable.
SyntaxError: If `q_val` is set to a valid value and
`word_level` is set to True.
SyntaxError: If `q_val` is set to None and
`word_level` is set to False.
Examples:
>>> from py_entitymatching.dask.dask_overlap_blocker import DaskOverlapBlocker
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ob = DaskOverlapBlocker()
# Use all cores
# # Use word-level tokenizer
>>> C1 = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'], word_level=True, overlap_size=1, n_ltable_chunks=-1, n_rtable_chunks=-1)
# # Use q-gram tokenizer
>>> C2 = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'], word_level=False, q_val=2, n_ltable_chunks=-1, n_rtable_chunks=-1)
# # Include all possible missing values
>>> C3 = ob.block_tables(A, B, 'address', 'address', l_output_attrs=['name'], r_output_attrs=['name'], allow_missing=True, n_ltable_chunks=-1, n_rtable_chunks=-1)
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
# Input validations
self.validate_types_params_tables(ltable, rtable, l_output_attrs,
r_output_attrs, l_output_prefix,
r_output_prefix, verbose, n_ltable_chunks, n_rtable_chunks)
self.validate_types_other_params(l_overlap_attr, r_overlap_attr,
rem_stop_words, q_val, word_level, overlap_size)
self.validate_allow_missing(allow_missing)
self.validate_show_progress(show_progress)
self.validate_overlap_attrs(ltable, rtable, l_overlap_attr, r_overlap_attr)
self.validate_output_attrs(ltable, rtable, l_output_attrs, r_output_attrs)
self.validate_word_level_qval(word_level, q_val)
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# validate input table chunks
validate_object_type(n_ltable_chunks, int, 'Parameter n_ltable_chunks')
validate_object_type(n_rtable_chunks, int,
'Parameter n_rtable_chunks')
validate_chunks(n_ltable_chunks)
validate_chunks(n_rtable_chunks)
if n_ltable_chunks == -1:
n_ltable_chunks = multiprocessing.cpu_count()
ltable_chunks = np.array_split(ltable, n_ltable_chunks)
# preprocess/tokenize ltable
if word_level == True:
tokenizer = WhitespaceTokenizer(return_set=True)
else:
tokenizer = QgramTokenizer(qval=q_val, return_set=True)
preprocessed_tokenized_ltbl = []
# Construct DAG for preprocessing/tokenizing ltable chunks
start_row_id = 0
for i in range(len(ltable_chunks)):
result = delayed(self.process_tokenize_block_attr)(ltable_chunks[i][
l_overlap_attr],
start_row_id,
rem_stop_words, tokenizer)
preprocessed_tokenized_ltbl.append(result)
start_row_id += len(ltable_chunks[i])
preprocessed_tokenized_ltbl = delayed(wrap)(preprocessed_tokenized_ltbl)
# Execute the DAG
if show_progress:
with ProgressBar():
logger.info('Preprocessing/tokenizing ltable')
preprocessed_tokenized_ltbl_vals = preprocessed_tokenized_ltbl.compute(
scheduler="processes", num_workers=multiprocessing.cpu_count())
else:
preprocessed_tokenized_ltbl_vals = preprocessed_tokenized_ltbl.compute(
scheduler="processes", num_workers=multiprocessing.cpu_count())
ltable_processed_dict = {}
for i in range(len(preprocessed_tokenized_ltbl_vals)):
ltable_processed_dict.update(preprocessed_tokenized_ltbl_vals[i])
# build inverted index
inverted_index = self.build_inverted_index(ltable_processed_dict)
if n_rtable_chunks == -1:
n_rtable_chunks = multiprocessing.cpu_count()
rtable_chunks = np.array_split(rtable, n_rtable_chunks)
# Construct the DAG for probing
probe_result = []
start_row_id = 0
for i in range(len(rtable_chunks)):
result = delayed(self.probe)(rtable_chunks[i][r_overlap_attr],
inverted_index, start_row_id, rem_stop_words,
tokenizer, overlap_size)
probe_result.append(result)
start_row_id += len(rtable_chunks[i])
probe_result = delayed(wrap)(probe_result)
# Execute the DAG for probing
if show_progress:
with ProgressBar():
logger.info('Probing using rtable')
probe_result = probe_result.compute(scheduler="processes",
num_workers=multiprocessing.cpu_count())
else:
probe_result = probe_result.compute(scheduler="processes",
num_workers=multiprocessing.cpu_count())
# construct a minimal dataframe that can be used to add more attributes
flat_list = [item for sublist in probe_result for item in sublist]
tmp = pd.DataFrame(flat_list, columns=['fk_ltable_rid', 'fk_rtable_rid'])
fk_ltable = ltable.iloc[tmp.fk_ltable_rid][l_key].values
fk_rtable = rtable.iloc[tmp.fk_rtable_rid][r_key].values
id_vals = list(range(len(flat_list)))
candset = pd.DataFrame.from_dict(
{'_id': id_vals, l_output_prefix+l_key: fk_ltable, r_output_prefix+r_key: fk_rtable})
# set the properties for the candidate set
cm.set_key(candset, '_id')
cm.set_fk_ltable(candset, 'ltable_'+l_key)
cm.set_fk_rtable(candset, 'rtable_'+r_key)
cm.set_ltable(candset, ltable)
cm.set_rtable(candset, rtable)
ret_candset = gh.add_output_attributes(candset, l_output_attrs=l_output_attrs,
r_output_attrs=r_output_attrs,
l_output_prefix=l_output_prefix,
r_output_prefix=r_output_prefix,
validate=False)
# handle missing values
if allow_missing:
missing_value_pairs = get_pairs_with_missing_value(ltable, rtable, l_key,
r_key, l_overlap_attr,
r_overlap_attr,
l_output_attrs,
r_output_attrs,
l_output_prefix,
r_output_prefix, False, False)
missing_value_pairs.insert(0, '_id', range(len(ret_candset),
len(ret_candset)+len(missing_value_pairs)))
if len(missing_value_pairs) > 0:
ret_candset = | pd.concat([ret_candset, missing_value_pairs], ignore_index=True, sort=False) | pandas.concat |
import sys
import pandas
import numpy
import math
import numpy as np
import networkx as nx
from sklearn.preprocessing import normalize
def ComputeRankWeightage(row):
return (1+ row['max_actor_rank'] - row['actor_movie_rank'])/(1+ row['max_actor_rank'] - row['min_actor_rank'])
def ComputeTimestampWeights(row, min_timestamp, max_timestamp):
return ((pandas.to_datetime(row['timestamp'])-min_timestamp).days + 1)/((max_timestamp-min_timestamp).days+1)
def AddWeightages(row):
return numpy.round(row['actor_rank_weightage'] + row['timestamp_weightage'], decimals=4)
def ComputeTF(row):
return row['tag_weightage'] / row['total_actor_weightage']
def ProcessWeightsToTF(combineddata):
combineddata['all_weightages'] = combineddata.apply(AddWeightages, axis=1)
combineddata['tag_weightage'] = combineddata.groupby(['actorid','tagid'])['all_weightages'].transform('sum')
combineddata = combineddata[['actorid', 'tagid', 'tag_weightage']].drop_duplicates(subset=['actorid', 'tagid'])
combineddata['total_actor_weightage'] = combineddata.groupby(['actorid'])['tag_weightage'].transform('sum')
combineddata['tf'] = combineddata.apply(ComputeTF, axis=1)
return combineddata
def ComputeIDF(row, total_actors):
return math.log10(total_actors / row['count_of_actors'])
def ComputeTFIDF(row):
return row['tf']*row['idf']
def ProcessTFandIDFtoTFIDF(tfdata, idfdata):
tfidfdata = tfdata.merge(idfdata, on='tagid')
tfidfdata['tfidf'] = tfidfdata.apply(ComputeTFIDF, axis=1)
return tfidfdata[['actorid','tagid','tfidf']]
def GenerateAllActorsTFIDF():
allactormoviesdata = | pandas.read_csv("movie-actor.csv") | pandas.read_csv |
# This script gets the amount of funding gained within 9 months
# of the first date
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from tqdm import tqdm
# Need to merge two patreon stat CSV files
df_patreon1 = pd.read_csv('files/20190714_github_patreon_stats.csv')
df_patreon2 = pd.read_csv('files/20190627_patreon_stats.csv')
df_patreon = | pd.concat([df_patreon1, df_patreon2], ignore_index=True) | pandas.concat |
import os
import numpy as np
import pandas as pd
from datetime import datetime
from datetime import timedelta
from dateutil import parser
# from scipy.interpolate import NearestNDInterpolator
import matplotlib.pyplot as plt
# from emtracks.mapinterp import get_df_interp_func
# copied interpolation here. FIXME!
from mapinterp import get_df_interp_func
# Note: see docdb-#6908 for geometry details
# data directory
scriptdir = os.path.dirname(os.path.realpath(__file__))
datadir = os.path.join(scriptdir, '..', 'data/')
print(datadir)
# create map interpolation functions
DS_cyl = get_df_interp_func(filename=datadir+'Mu2e_DSMap_V13.p',
gauss=False, mm=False,
Blabels=['Br', 'Bphi', 'Bz'])
DS_car = get_df_interp_func(filename=datadir+'Mu2e_DSMap_V13.p',
gauss=False, mm=False,
Blabels=['Bx', 'By', 'Bz'])
# measurement noise for field values
# sigma_Bi = 0.
# sigma_NMR = 0.
sigma_Bi = 1e-4
sigma_NMR = 5e-6
# solenoid current
current_PS = 9200.
current_TSu = 1730.
current_TSd = 1730.
current_DS = 6114.
sigma_current = 0.
# sigma_current = 1e-4
'''
- Coordinates for now:
- Hall probe: with Hall element and electronics facing "out of page"
- Y points to top of card
- X points to the right
- Z out of page
'''
# Globals for DSFM locations and probe orientations
# reflector radius
# counter-clockwise
# Phi is w.r.t. BP up (i.e. defines Phi=0)
Rs_reflect_BP = 1e-3*np.array([1450./2., 700./2., 1450./2., 700./2.])
Phis_reflect_BP = np.pi*np.array([1., 1.5, 0., 0.5]) # CHECK
Rs_reflect_SP = 1e-3*np.array(4*[241./2.]) # CHECK NUMBER!!
Phis_reflect_SP = np.pi/4. + np.pi*np.array([1., 1.5, 0., 0.5]) # CHECK
# radius on propeller, signed by which side of the propeller
Rs_small = 1e-3*np.array([0., 54., -95.,]) # CHECK SIGNS!!
Rs_large = 1e-3*np.array([44., -319., 488., -656., 800.])
# coordinate orientations of Hall probes
# x = 0, y = 1, z = 2 -- in HP coordinates
# ATTEMPT 1
# these arrays specify which HP coordinate represents Br, Bphi, Bz
'''
Coords_small = np.array([[1, 0, -2],
[-1, -0, -2],
[1, 0, -2],
]) # CHECK SIGNS XY!
Coords_large = np.array([[-1, 0, 2],
[1, -0, 2],
[1, -0, 2],
[1, -0, 2],
[1, -0, 2],
])
'''
# ATTEMPT 2
_ = np.array([{'Bx': ['Bphi', 1.], 'By': ['Br', 1.], 'Bz': ['Bz', -1.]},
{'Bx': ['Bphi', -1.], 'By': ['Br', -1.], 'Bz': ['Bz', -1.]},
{'Bx': ['Bphi', 1.], 'By': ['Br', 1.], 'Bz': ['Bz', -1.]},
])
Coords_SP_dict = _
_ = np.array([{'Bx': ['Bphi', 1.], 'By': ['Br', -1.], 'Bz': ['Bz', 1.]},
{'Bx': ['Bphi', -1.], 'By': ['Br', 1.], 'Bz': ['Bz', 1.]},
{'Bx': ['Bphi', -1.], 'By': ['Br', 1.], 'Bz': ['Bz', 1.]},
{'Bx': ['Bphi', -1.], 'By': ['Br', 1.], 'Bz': ['Bz', 1.]},
{'Bx': ['Bphi', -1.], 'By': ['Br', 1.], 'Bz': ['Bz', 1.]},
])
Coords_BP_dict = _
# all probes
Rs = np.concatenate([Rs_small, Rs_large])
# location label & hardware ID
HP_labs = np.array(['SP1', 'SP2', 'SP3', 'BP1', 'BP2', 'BP3', 'BP4', 'BP5'])
HP_IDs = np.array(['4C0000000D55C93A', '8E0000000D51483A', '6A0000000D61333A',
'C50000000D5E473A', 'DF0000000D5E803A', 'C90000000D53983A',
'FA0000000D60163A', '2F0000000D5EC73A']) # using probes 1-8
# coefficients for simple voltage model: V_i = slope * B_i + off
# from real measurements (NOT MATCHED TO PROBE ID!!)
# to get realistic voltages
HP_V_slope = np.array([2.871e6, 2.813e6, 2.940e6, 2.973e6,
2.957e6, 2.863e6, 2.875e6, 2.831e6])
HP_V_off = np.array([-6.169e4, -6.300e4, -6.546e4, -6.136e4,
-6.346e4, -6.160e4, -5.939e4, -5.737e4])
# split into big an small propeller where necessary
HP_labs_SP = HP_labs[0:3]
HP_labs_BP = HP_labs[3:]
HP_IDs_SP = HP_IDs[0:3]
HP_IDs_BP = HP_IDs[3:]
HP_V_slope_SP = HP_V_slope[0:3]
HP_V_slope_BP = HP_V_slope[3:]
HP_V_off_SP = HP_V_off[0:3]
HP_V_off_BP = HP_V_off[3:]
# Define steps for DSFM. Big propeller "top" along +X defines Phi=0
# Small propeller offset by +45 deg = pi/4 rad by design
Phis = np.linspace(0, 2*np.pi, 17)[:-1]
Phis_SP = Phis + np.pi/4.
# NMR always at the same location in XY plane
NMR_Phi = np.pi/2.
# probe on BP at R=-319 mm, want it in -y direction
NMR_R = -319. * 1e-3 # m
X_NMR = NMR_R * np.cos(NMR_Phi)
Y_NMR = NMR_R * np.sin(NMR_Phi)
print(f'NMR Location: x = {X_NMR:0.3f} m, y = {Y_NMR:0.3f}')
# RR_BP, PP_BP = np.meshgrid(Rs_large, Phis)
# RR_SP, PP_SP = np.meshgrid(Rs_small, Phis_SP)
# Calculating Z locations for SP, BP, NMR
END_MEAS_SP = 2980. * 1e-3
START_MEAS_SP = 13685. * 1e-3
step_Z = 0.05 # 5 cm step, for now
Zs_SP = np.arange(START_MEAS_SP, END_MEAS_SP, -step_Z)
delta_Z_BP_SP = -1335. * 1e-3
Zs_BP = Zs_SP - delta_Z_BP_SP
delta_Z_NMR_SP = -1557.87 * 1e-3
Zs_NMR = Zs_SP - delta_Z_NMR_SP
# columns to write to file
# Draft:
# TIMESTAMP, X_NMR, Y_NMR, Z_NMR, HP_1_#, HP_1_ID, HP_1_X, HP_1_Y, HP_1_Z, ...
# HP_1_Bx_Meas, HP_1_Bx_Mu2e, HP_1_By_Meas, HP_1_By_Mu2e, HP_1_Bz_Meas, HP_1_Bz_Mu2e,
# HP_1_V1, HP_1_V2, HP_1_V3, ... ALL Hall probes - 7 , 8 , 9
single_reflect_cols = ['rho', 'theta', 'z']
_ = list(np.concatenate([[f'Reflect_SP_{j}_{i}' for i in single_reflect_cols]
for j in ['A', 'B', 'C', 'D']]))
reflect_SP_col_list = _
_ = list(np.concatenate([[f'Reflect_BP_{j}_{i}' for i in single_reflect_cols]
for j in ['A', 'B', 'C', 'D']]))
reflect_BP_col_list = _
# print(reflect_SP_col_list)
# print(reflect_BP_col_list)
single_HP_cols = ['ID', 'X', 'Y', 'Z', 'Vx', 'Vy', 'Vz', 'Temperature',
'Bx_Meas', 'By_Meas', 'Bz_Meas', 'Br', 'Bphi', 'Bz']
HP_SP_col_list = list(np.concatenate([[f'HP_SP{j}_{i}' for i in single_HP_cols]
for j in [1, 2, 3]]))
HP_BP_col_list = list(np.concatenate([[f'HP_BP{j}_{i}' for i in single_HP_cols]
for j in [1, 2, 3, 4, 5]]))
col_list = ['TIMESTAMP', 'Mapper_Angle', 'Mapper_Z', 'X_NMR', 'Y_NMR', 'Z_NMR',
'B_NMR'] + HP_SP_col_list + HP_BP_col_list \
+ reflect_BP_col_list + reflect_SP_col_list
# define an empty dataframe
# df_EMMA = pd.DataFrame(columns=col_list)
# pick a starttime
t0 = parser.parse('2021-07-21 12:00:00')
# define time between steps
dt_Phi = timedelta(seconds=120) # assume 2 minutes per step for azimuthal move
dt_Z = timedelta(seconds=240) # 4 minutes when moving z
# define an insteresting temperature scalar field throughout the DS
def temp_DS(pos):
return 22. - 0.1 * (pos[2] - 4.) + 1. * (pos[1])
# function to return one row for the dataframe where each row is a "step"
# of EMMA
def return_row(time, Z_ind, Phi_ind, ):
row = {}
# phi
Phi = Phis[Phi_ind]
Phi_BP = Phi
Phi_SP = Phis_SP[Phi_ind]
# time
row['TIMESTAMP'] = str(time)
# mapper
row['Mapper_Angle'] = Phi
row['Mapper_Z'] = Zs_NMR[Z_ind]
# NMR
row['X_NMR'] = X_NMR
row['Y_NMR'] = Y_NMR
row['Z_NMR'] = Zs_NMR[Z_ind]
B_NMR = np.linalg.norm(DS_car([X_NMR, Y_NMR, Zs_NMR[Z_ind]]))
if B_NMR < 0.7: # NMR cutoff
B_NMR = 0.0
if sigma_NMR > 0.:
B_NMR += np.random.normal(loc=0.0, scale=sigma_NMR)
row['B_NMR'] = B_NMR
# Hall probes
# small propeller
#print('SP')
for i in range(len(HP_IDs_SP)):
#print(i)
pre = f'HP_{HP_labs_SP[i]}'
row[f'{pre}_ID'] = HP_IDs_SP[i]
# coords
X_ = Rs_small[i] * np.cos(Phi_SP)
Y_ = Rs_small[i] * np.sin(Phi_SP)
Z_ = Zs_SP[Z_ind]
row[f'{pre}_X'] = X_
row[f'{pre}_Y'] = Y_
row[f'{pre}_Z'] = Z_
# field
Br, Bphi, Bz = DS_cyl([X_, Y_, Z_])
if sigma_Bi > 0.:
Br += np.random.normal(loc=0.0, scale=sigma_Bi)
Bphi += np.random.normal(loc=0.0, scale=sigma_Bi)
Bz += np.random.normal(loc=0.0, scale=sigma_Bi)
B_dict = {'Br': Br, 'Bphi': Bphi, 'Bz': Bz}
Bx_ = Coords_SP_dict[i]['Bx'][1] * B_dict[Coords_SP_dict[i]['Bx'][0]]
By_ = Coords_SP_dict[i]['By'][1] * B_dict[Coords_SP_dict[i]['By'][0]]
Bz_ = Coords_SP_dict[i]['Bz'][1] * B_dict[Coords_SP_dict[i]['Bz'][0]]
# voltages
Vx_ = HP_V_off_SP[i] + HP_V_slope_SP[i] * Bx_
Vy_ = HP_V_off_SP[i] + HP_V_slope_SP[i] * By_
Vz_ = HP_V_off_SP[i] + HP_V_slope_SP[i] * Bz_
# temperature
Temp_ = temp_DS([X_, Y_, Z_])
# write to row
row[f'{pre}_Vx'] = int(Vx_)
row[f'{pre}_Vy'] = int(Vy_)
row[f'{pre}_Vz'] = int(Vz_)
row[f'{pre}_Temperature'] = Temp_
row[f'{pre}_Bx_Meas'] = Bx_
row[f'{pre}_By_Meas'] = By_
row[f'{pre}_Bz_Meas'] = Bz_
row[f'{pre}_Br'] = Br
row[f'{pre}_Bphi'] = Bphi
row[f'{pre}_Bz'] = Bz
# big propeller
#print('BP')
for i in range(len(HP_IDs_BP)):
#print(i)
pre = f'HP_{HP_labs_BP[i]}'
row[f'{pre}_ID'] = HP_IDs_BP[i]
# coords
X_ = Rs_large[i] * np.cos(Phi_BP)
Y_ = Rs_large[i] * np.sin(Phi_BP)
Z_ = Zs_BP[Z_ind]
row[f'{pre}_X'] = X_
row[f'{pre}_Y'] = Y_
row[f'{pre}_Z'] = Z_
# field
Br, Bphi, Bz = DS_cyl([X_, Y_, Z_])
if sigma_Bi > 0.:
Br += np.random.normal(loc=0.0, scale=sigma_Bi)
Bphi += np.random.normal(loc=0.0, scale=sigma_Bi)
Bz += np.random.normal(loc=0.0, scale=sigma_Bi)
B_dict = {'Br': Br, 'Bphi': Bphi, 'Bz': Bz}
Bx_ = Coords_BP_dict[i]['Bx'][1] * B_dict[Coords_BP_dict[i]['Bx'][0]]
By_ = Coords_BP_dict[i]['By'][1] * B_dict[Coords_BP_dict[i]['By'][0]]
Bz_ = Coords_BP_dict[i]['Bz'][1] * B_dict[Coords_BP_dict[i]['Bz'][0]]
# voltages
Vx_ = HP_V_off_BP[i] + HP_V_slope_BP[i] * Bx_
Vy_ = HP_V_off_BP[i] + HP_V_slope_BP[i] * By_
Vz_ = HP_V_off_BP[i] + HP_V_slope_BP[i] * Bz_
# temperature
Temp_ = temp_DS([X_, Y_, Z_])
# write to row
row[f'{pre}_Vx'] = int(Vx_)
row[f'{pre}_Vy'] = int(Vy_)
row[f'{pre}_Vz'] = int(Vz_)
row[f'{pre}_Temperature'] = Temp_
row[f'{pre}_Bx_Meas'] = Bx_
row[f'{pre}_By_Meas'] = By_
row[f'{pre}_Bz_Meas'] = Bz_
row[f'{pre}_Br'] = Br
row[f'{pre}_Bphi'] = Bphi
row[f'{pre}_Bz'] = Bz
# reflectors
ref_labs = ['A', 'B', 'C', 'D']
base_width = 75. # [mm], ESTIMATE!! CHECK BASE WIDTH
# BP
for i, lab in enumerate(ref_labs):
r = Rs_reflect_BP[i]*1e3
t = Phi + Phis_reflect_BP[i]
z = Zs_NMR[Z_ind]*1e3
x = r * np.cos(t)
y = r * np.sin(t)
if (x > -base_width/2) and (x < base_width/2) and (y < 0):
r = np.nan
t = np.nan
z = np.nan
row[f'Reflect_BP_{lab}_rho'] = r # [mm] in TDMS
row[f'Reflect_BP_{lab}_theta'] = t # [rad] in TDMS
row[f'Reflect_BP_{lab}_z'] = z # [mm] in TDMS
# SP
for i, lab in enumerate(ref_labs):
r = Rs_reflect_SP[i]*1e3
t = Phi + Phis_reflect_SP[i]
z = Zs_SP[Z_ind]*1e3
x = r * np.cos(t)
y = r * np.sin(t)
if (x > -base_width/2) and (x < base_width/2) and (y < 0):
r = np.nan
t = np.nan
z = np.nan
row[f'Reflect_SP_{lab}_rho'] = r # [mm] in TDMS
row[f'Reflect_SP_{lab}_theta'] = t # [rad] in TDMS
row[f'Reflect_SP_{lab}_z'] = z # [mm] in TDMS
# current
if sigma_current > 0.:
c_PS = current_PS + np.random.normal(loc=0.0, scale=sigma_current)
c_TSu = current_TSu + np.random.normal(loc=0.0, scale=sigma_current)
c_TSd = current_TSd + np.random.normal(loc=0.0, scale=sigma_current)
c_DS = current_DS + np.random.normal(loc=0.0, scale=sigma_current)
else:
c_PS = current_PS
c_TSu = current_TSu
c_TSd = current_TSd
c_DS = current_DS
row['PS_Current'] = c_PS
row['TSu_Current'] = c_TSu
row['TSd_Current'] = c_TSd
row['DS_Current'] = c_DS
return row
if __name__ == '__main__':
# generate dataframe looping through Z and Phi steps
rows_list = []
t = t0
for Z_ind in range(len(Zs_NMR)):
# t += dt_Z
for Phi_ind in range(len(Phis)):
row = return_row(t, Z_ind, Phi_ind)
rows_list.append(row)
t += dt_Phi
t += dt_Z
df_EMMA = | pd.DataFrame(rows_list) | pandas.DataFrame |
from typing import Tuple, Sequence, Mapping, Optional, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from anndata import AnnData
from copy import copy
import pandas as pd
import ntpath
import numpy as np
import matplotlib.pyplot as plt
import scanpy as sc
import logging
import seaborn as sns
import warnings
# output scanpy logs
logging.basicConfig(level=logging.INFO)
sc.settings.verbosity = 3
# don´t show this numpy warning
warnings.filterwarnings("ignore", message="All-NaN slice encountered")
# basic plotting settings
plt.rcParams["xtick.labelsize"] = 8
plt.rcParams["ytick.labelsize"] = 8
plt.rcParams["font.size"] = 8
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Arial"
plt.rcParams["figure.figsize"] = 7.2, 4.45
plt.rcParams["figure.titlesize"] = 9
plt.rcParams["figure.dpi"] = 120
plt.rcParams["axes.titlesize"] = 9
plt.rcParams["axes.labelsize"] = 8
plt.rcParams["axes.axisbelow"] = True
plt.rcParams["axes.linewidth"] = 0.5
plt.rcParams["lines.linewidth"] = 0.7
plt.rcParams["lines.markersize"] = 2
plt.rcParams["legend.fontsize"] = 8
plt.rcParams["boxplot.flierprops.marker"] = "."
plt.rcParams["boxplot.flierprops.markerfacecolor"] = "k"
plt.rcParams["boxplot.flierprops.markersize"] = 2
plt.rcParams["pdf.fonttype"] = 42 # to make pdf text available for illustrator
plt.rcParams["ps.fonttype"] = 42 # to make pdf text available for illustrator
figwd = 7.2 # standard figure width
cellsize = 20 # size to plot cells
wspace = 1 # space between scanpy plots to make room for legends
hspace = 0.5 # space between scanpy plots to make room for legends
def create_meta_data(input_dir: str, output_dir: str):
"""Create a meta data table from PD output and mapping tables.
Requires the following tables in the `input_dir`:
* PD Protein
* PD InputFiles
* file_sample_mapping
* plate_layout_mapping
* sort_layout
* sample_layout
* facs_data
See the example files to understand the structure of these tables.
Alternatively, create meta data on you own and use :func:`~sceptre.load_dataset`
Parameters
----------
input_dir
The path to the input directory.
output_dir
The path to the output directory.
Returns
-------
:obj:`None`
Saves the meta table in `output_dir`.
"""
import os
# PD tables
for file in os.listdir(input_dir):
if "_Proteins.txt" in file:
prot = pd.read_table(input_dir + file, low_memory=False)
if "_InputFiles.txt" in file:
files = pd.read_table(input_dir + file)
files["File Name"] = files["File Name"].apply(lambda x: ntpath.basename(x))
# mapping tables
file_sample_mapping = pd.read_table("{}file_sample_mapping.txt".format(input_dir))
plate_layout_mapping = pd.read_table(
"{}plate_layout_mapping.txt".format(input_dir)
).set_index("Plate")
# plate data tables
plate_data = {k: {} for k in plate_layout_mapping.index.unique()}
for plate in plate_data.keys():
plate_data[plate]["sort_layout"] = pd.read_table(
"{}{}".format(input_dir, plate_layout_mapping.loc[plate, "Sort Layout"]),
index_col=0,
)
plate_data[plate]["label_layout"] = pd.read_table(
"{}{}".format(input_dir, plate_layout_mapping.loc[plate, "Label Layout"]),
index_col=0,
).fillna("")
plate_data[plate]["sample_layout"] = pd.read_table(
"{}{}".format(input_dir, plate_layout_mapping.loc[plate, "Sample Layout"]),
index_col=0,
)
plate_data[plate]["facs_data"] = pd.read_table(
"{}{}".format(input_dir, plate_layout_mapping.loc[plate, "Facs Data"])
)
plate_data[plate]["facs_data"] = plate_data[plate]["facs_data"].drop(
["Row", "Column"], axis=1
)
plate_data[plate]["facs_data"] = plate_data[plate]["facs_data"].set_index(
"Well"
)
# create cell metadata
# add each channel from each file to the rows
meta = pd.DataFrame(
[
x.split(" ")[2:]
for x in prot.columns[prot.columns.str.contains("Abundance")]
],
columns=["File ID", "Channel"],
)
# add the file name
meta = meta.merge(
files.set_index("File ID")["File Name"],
left_on="File ID",
right_index=True,
validate="many_to_one",
)
# add the plate and sample
_ = len(meta)
meta = meta.merge(file_sample_mapping, on="File Name", validate="many_to_one")
if len(meta) < _:
raise ValueError("Error in file_sample_mapping.txt")
# add the well information via the sample_layout and label_layout for each plate
for i in meta.index:
p, s, c = meta.loc[i, ["Plate", "Sample", "Channel"]]
p_d = plate_data[p]
well = (
p_d["sample_layout"][
(p_d["sample_layout"] == s) & (p_d["label_layout"] == c)
]
.stack()
.index.tolist()
)
if len(well) > 1:
raise ValueError(
"Error in plate layout data: Plate {}, Sample {}, Channel {}".format(
p, s, c
)
)
elif len(well) == 0:
row, col, well = pd.NA, pd.NA, pd.NA
else:
row = well[0][0]
col = well[0][1]
well = "".join(well[0])
meta.loc[i, ["Row", "Column", "Well"]] = row, col, well
# use the sort layout to map the sorted population and add the facs data
if not pd.isna(well):
meta.loc[i, "Sorted Population"] = plate_data[p]["sort_layout"].loc[
row, col
]
# add the facs data
# meta.loc[i] = meta.loc[i].append(plate_data[p]['facs_data'].loc[well, :])
else:
meta.loc[i, "Sorted Population"] = pd.NA
# add the facs data for each plate
_ = []
for p in meta["Plate"].unique():
_.append(
meta.loc[meta["Plate"] == p].merge(
plate_data[p]["facs_data"],
left_on="Well",
right_index=True,
how="left",
)
)
meta = pd.concat(_)
meta = meta.rename(columns={"Population": "Gated Population"})
meta.to_csv(output_dir + "meta.txt", sep="\t", index=False)
def load_dataset(proteins: str, psms: str, msms: str, files: str, meta: str):
"""Load the dataset from specified paths.
Parameters
----------
proteins
The path to the PD protein table.
psms
The path to the PD PSMs table.
msms
The path to the PD MSMS table.
files
The path to the PD InputFiles table.
meta
The path to the Meta table.
Returns
-------
A dict containing all required tables.
"""
prot = pd.read_table(proteins, low_memory=False)
# To use the Gene Symbol as index:
# Set nan Gene Symbol to protein accession
# and if Gene Symbol is not unique, add the protein accession to make duplicates unique.
nans = prot["Gene Symbol"].isna()
for i in nans.index:
if nans[i]:
prot.loc[i, "Gene Symbol"] = prot.loc[i, "Accession"]
duplicates = prot["Gene Symbol"].duplicated(keep=False)
for i in duplicates.index:
if duplicates[i]:
prot.loc[i, "Gene Symbol"] = (
prot.loc[i, "Gene Symbol"] + "_" + prot.loc[i, "Accession"]
)
psms = | pd.read_table(psms, low_memory=False) | pandas.read_table |
import collections
import os
import sys
import joblib
import numpy as np
import pandas as pd
import torch
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import normalize
from torch.utils.data import DataLoader, DistributedSampler
sys.path.append("fltk/synthpriv/synthetic")
from fltk.datasets.distributed.dataset import DistDataset
from fltk.synthpriv.datasets.base import DataframeDataset
from fltk.synthpriv.synthetic.models import dp_wgan, pate_gan, ron_gauss
from fltk.synthpriv.synthetic.models.IMLE import imle
from fltk.synthpriv.synthetic.models.Private_PGM import private_pgm
# good values for Adult dataset from borealis repo
# model : {epsilon : sigma}
DEFAULTS = {
"imle": {
2: 0.8,
5: 0.7,
8: 0.6,
},
"pate-gan": {
2: 1e-4,
5: 3e-4,
8: 3e-4,
},
"dp-wgan": {
2: 1.0,
5: 0.9,
8: 0.8,
},
}
class SyntheticDataset(DistDataset):
def __init__(self, real_dataset, args, device, id, model="imle", sigma=0.6, target_epsilon=8):
super().__init__(args)
self.real_dataset = real_dataset
cache_path = f"data/IMLE_synthetic_{real_dataset.__class__.__name__}_eps={target_epsilon}_sig={sigma}_{id}.pkl"
if not os.path.exists(cache_path):
try:
with torch.cuda.device(device):
self.fit(model, sigma, target_epsilon)
except Exception as e:
print(e)
exit(1)
joblib.dump((self.train_df, self.train_labels, self.test_df, self.test_labels), cache_path)
else:
self.train_df, self.train_labels, self.test_df, self.test_labels = joblib.load(cache_path)
self.train_df = np.round(self.train_df)
self.train_labels = np.round(self.train_labels)
self.test_df = self.real_dataset.test_df
self.test_labels = self.real_dataset.test_labels
for col in self.real_dataset.train_df.columns:
print(col)
print(np.unique(self.real_dataset.train_df[col]))
print(np.unique(self.train_df[col]))
try:
self.name = "Synthetic" + real_dataset.__class__.__name__
self.train_dataset = DataframeDataset(self.train_df, self.train_labels)
self.test_dataset = DataframeDataset(self.test_df, self.test_labels)
self.n_workers = 16
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=1, rank=0, shuffle=True)
self.test_sampler = DistributedSampler(self.test_dataset, num_replicas=1, rank=0, shuffle=True)
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.args.batch_size,
sampler=self.train_sampler,
num_workers=self.n_workers,
prefetch_factor=int(self.args.batch_size / self.n_workers),
pin_memory=True,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.args.batch_size,
sampler=self.test_sampler,
num_workers=self.n_workers,
prefetch_factor=int(self.args.batch_size / self.n_workers),
pin_memory=True,
)
except Exception as e:
print(e)
exit(1)
def fit(self, model_name, sigma, target_epsilon):
print(f"Fitting synthetic {self.real_dataset.__class__.__name__}")
cols = list(self.real_dataset.train_df.columns) + ["income"]
train = | pd.DataFrame(columns=cols) | pandas.DataFrame |