prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Synthetic Data Generation using a Bayesian Network
Based on following paper
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
PrivBayes: Private Data Release via Bayesian Networks. (2017)
"""
import numpy as np
import pandas as pd
from pyhere import here
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state
from sklearn.metrics import mutual_info_score
from itertools import combinations
from collections import namedtuple
from copy import deepcopy
import warnings
warnings.filterwarnings('ignore')
from synthesis._base_synthesis import _BaseSynthesizer
import synthesis.tools.dp_utils as dp_utils
import synthesis.tools.utils as utils
from thomas.core.cpt import CPT
from thomas.core.factor import Factor
from thomas.core.bayesiannetwork import BayesianNetwork
NodeParentPair = namedtuple('NodeParentPair', ['node', 'parents'])
score_functions = {
'mi': 'mutual_information',
'F': 'F_score', # note requires binary encoded data
'R': 'R_score'
}
class PrivBayes(_BaseSynthesizer):
"""PrivBayes: Private Data Release via Bayesian Networks (Zhang et al 2017)"""
def __init__(self, epsilon: float = 1.0, degree_network=None,
theta_usefulness=4, score_function='mi', random_state=None,
epsilon_split=0.4, n_records_synth=None, network_init=None,
max_cpt_size=10000000, verbose=2):
self.epsilon = epsilon
self.degree_network = degree_network
self.theta_usefulness = theta_usefulness
self.score_function = score_function
self.random_state = random_state
self.epsilon_split = epsilon_split
self.n_records_synth = n_records_synth
self.network_init = network_init
self.max_cpt_size=max_cpt_size
self.verbose = verbose
def fit(self, X, y=None):
self._check_init_args(X)
X = self._check_input_data(X)
self._n_records_fit, self._n_columns_fit = X.shape
# X = X.loc[columns] if columns is not None else X
self.random_state_ = check_random_state(self.random_state)
# todo integrate random state
self._greedy_bayes(X)
self._compute_conditional_distributions(X)
# todo specify name in init?
self.bayesian_network_ = BayesianNetwork.from_CPTs('PrivBayes', self.cpt_.values())
return self
def transform(self, X):
# n_records = self._n_records_fit if n_records is None else n_records
n_records = self.n_records_synth if self.n_records_synth is not None else self._n_records_fit
X = self._check_input_data(X)
Xt = self._generate_data(X, n_records)
if self.verbose:
print("\nSynthetic Data Generated\n")
return Xt
# def _check_input_data(self, X):
# # converts to dataframe in case of numpy input and make all columns categorical.
# X = pd.DataFrame(X).astype(str, copy=False)
# assert X.shape[1] > 1, "input needs at least 2 columns"
# # prevent integer column indexing issues
# X.columns = X.columns.astype(str)
# if hasattr(self, '_header'):
# assert set(X.columns) == set(self._header), "input contains different columns than seen in fit"
# else:
# self._header = list(X.columns)
# return X
def _check_init_args(self, X):
assert (self.degree_network is None) or (self.degree_network < X.shape[1]), "degree of network > " \
"number of columns in X"
if self.epsilon_split is None:
self.epsilon_split = [0.4, 0.6]
else:
if isinstance(self.epsilon_split, float):
self.epsilon_split = [self.epsilon_split, 1-self.epsilon_split]
self.epsilon_split = list(self.epsilon_split)
n_records, n_columns = X.shape
if not self.degree_network:
self.degree_network = self._compute_degree_network(n_records, n_columns)
# check if degree network will not result in conditional tables that do not fit into memory
if self.max_cpt_size:
max_degree_network = self._max_degree_network(X)
if self.degree_network > max_degree_network:
if self.verbose >= 1:
print("Degree network capped from {} to {} to be able to fit CPT into memory"
.format(self.degree_network, max_degree_network))
self.degree_network = max_degree_network
if self.verbose >= 1:
print("Degree of network (k): {}\n".format(self.degree_network))
return self
def _greedy_bayes(self, X):
n_records, n_columns = X.shape
nodes, nodes_selected = self._init_network(X)
# normally equal to n_columns - 1 as only the root is selected, unless user implements new
# network init.
self._n_nodes_dp_computed = len(nodes) - len(nodes_selected)
for i in range(len(nodes_selected), len(nodes)):
if self.verbose >= 2:
print("{}/{} - Evaluating next node to add to network".format(i+1, n_columns))
nodes_remaining = nodes - nodes_selected
n_parents = min(self.degree_network, len(nodes_selected))
node_parent_pairs = [
NodeParentPair(n, tuple(p)) for n in nodes_remaining
for p in combinations(nodes_selected, n_parents)
]
if self.verbose >= 2:
print("Number of NodeParentPair candidates: {}".format(len(node_parent_pairs)))
scores = self._compute_scores(X, node_parent_pairs)
if self.epsilon:
sampled_pair = self._exponential_mechanism(X, node_parent_pairs, scores)
else:
sampled_pair = node_parent_pairs.index(max(scores))
if self.verbose >= 1:
print("Selected node: {} - with parents: {}\n".format(sampled_pair.node, sampled_pair.parents))
nodes_selected.add(sampled_pair.node)
self.network_.append(sampled_pair)
if self.verbose >= 1:
print("Learned Network Structure\n")
return self
def _init_network(self, X):
self._binary_columns = [c for c in X.columns if X[c].unique().size <= 2]
nodes = set(X.columns)
if self.network_init is not None:
nodes_selected = set(n.node for n in self.network_init)
# print("Pre-defined network init: {}".format(self.network_))
for i, pair in enumerate(self.network_init):
if self.verbose >= 1:
print("{}/{} - init node {} - with parents: {}".format(i+1, len(self.network_init),
pair.node, pair.parents))
self.network_ = self.network_init.copy()
return nodes, nodes_selected
# if set_network is not called we start with a random first node
self.network_ = []
nodes_selected = set()
root = np.random.choice(tuple(nodes))
self.network_.append(NodeParentPair(node=root, parents=None))
nodes_selected.add(root)
if self.verbose >= 1:
print("1/{} - Root of network: {}\n".format(X.shape[1], root))
return nodes, nodes_selected
def set_network(self, network):
assert [isinstance(n, NodeParentPair) for n in network], "input network does not consists of " \
"NodeParentPairs"
self.network_init = network
return self
def _compute_scores(self, X, node_parent_pairs):
cached_scores = self._get_cached_scores(node_parent_pairs)
# todo fix cache_scores
scores = np.empty(len(node_parent_pairs))
for idx, pair in enumerate(node_parent_pairs):
if self.score_function == 'mi':
scores[idx] = self._compute_mutual_information_sklearn(X, pair)
return scores
def _get_cached_scores(self, node_parent_pairs):
return []
def _compute_mutual_information(self, X, pair):
p_node = Factor(X.groupby(pair.node).size()).normalize()
p_parents = Factor(X.groupby(list(pair.parents)).size()).normalize()
p_nodeparents = Factor(X.groupby([*pair.parents, pair.node]).size()).normalize()
# todo: have to get values from Factor: 'numpy.ndarray' object has no attribute '_data'
mi = np.sum(p_nodeparents.values * np.log(p_nodeparents/(p_node*p_parents)))
return mi
def _compute_mutual_information_sklearn(self, X, pair):
df_node = X[pair.node].values
if len(pair.parents) == 1:
df_parent = X[pair.parents[0]].values
else:
# todo find alternative method to combine parent cols
df_parent = X.loc[:, pair.parents].apply(lambda x: ' '.join(x.values), axis=1).values
return mutual_info_score(df_node, df_parent)
def _exponential_mechanism(self, X, node_parent_pairs, scores):
# todo check if dp correct -> e.g. 2*scaling?
scaling_factors = self._compute_scaling_factor(X, node_parent_pairs)
sampling_distribution = np.exp(scores / 2*scaling_factors)
normalized_sampling_distribution = sampling_distribution / sampling_distribution.sum()
pair_idx = np.arange(len(node_parent_pairs))
sampled_pair_idx = np.random.choice(pair_idx, p=normalized_sampling_distribution)
sampled_pair = node_parent_pairs[sampled_pair_idx]
return sampled_pair
def _compute_scaling_factor(self, X, node_parent_pairs):
n_records = X.shape[0]
scaling_factors = np.empty(len(node_parent_pairs))
if self.score_function == 'mi':
for idx, pair in enumerate(node_parent_pairs):
if pair.node in self._binary_columns or \
(len(pair.parents) == 1 and pair.parents[0] in self._binary_columns):
sensitivity = (np.log(n_records) / n_records) + \
(((n_records-1)/n_records) * np.log(n_records/(n_records-1)))
else:
sensitivity = (2/n_records)*np.log((n_records+1)/2) + \
(((n_records-1)/n_records) * np.log((n_records+1)/(n_records-1)))
scaling_factors[idx] = self._n_nodes_dp_computed * sensitivity / (self.epsilon*self.epsilon_split[0])
return scaling_factors
def _compute_degree_network(self, n_records, n_columns):
"""
Determine the degree of the network (k) by finding the lowest integer k possible that ensures that the defined
level of theta-usefulness is met. This criterion measures the ratio of information over noise.
Lemma 4.8 in the paper.
Note there are some inconsistencies between the original paper from 2014 and the updated version in 2017
- avg_scale_info: full epsilon in paper 2014 | epsilon_2 in paper2017
- avg_scale_noise: k + 3 in paper 2014 lemma 3 | k + 2 in paper 2017 lemma 4.8
"""
k = n_columns - 1
while k > 1:
# avg_scale_info = self.epsilon * (1 - self.epsilon_split[0]) * n_records
avg_scale_info = self.epsilon * self.epsilon_split[1] * n_records
avg_scale_noise = (n_columns - k) * (2 ** (k + 2))
if (avg_scale_info / avg_scale_noise) >= self.theta_usefulness:
break
k -= 1
return k
def _max_degree_network(self, X):
"""calculate max degree network to ensure the CPTs will fit into memory"""
ranked_column_cardinalities = utils.rank_columns_on_cardinality(X)
cum_cardinality = 1
degree_network = 0
for k, cardinality in enumerate(ranked_column_cardinalities):
cum_cardinality *= cardinality
if cum_cardinality >= self.max_cpt_size:
break
degree_network += 1
return degree_network
def _compute_conditional_distributions(self, X):
P = dict()
# note we only compute n_columns - (degree_network+1), as the CPTs from the other nodes
# in range [0, degree_network] can be inferred -> ensures (eps_2 / (d-k))-differential privacy
local_epsilon = self.epsilon * self.epsilon_split[1] / (self._n_columns_fit - self.degree_network)
# first materialize noisy distributions for nodes who have a equal number of parents to the degree k.
# earlier nodes can be inferred from these distributions without adding extra noise
for idx, pair in enumerate(self.network_[self.degree_network:]):
cpt_size = utils.get_size_contingency_table(X[[*pair.parents, pair.node]])
if self.verbose >= 2:
print('Learning conditional probabilities: {} - with parents {} ~ estimated size: {}'.format(pair.node,
pair.parents,
cpt_size))
attributes = [*pair.parents, pair.node]
dp_joint_distribution = dp_utils.dp_joint_distribution(X[attributes], epsilon=local_epsilon)
# dp_joint_distribution = utils.joint_distribution(X[attributes])
cpt = CPT(dp_joint_distribution, conditioned_variables=[pair.node])
# todo: use custom normalization to fill missing values with uniform
cpt = utils.normalize_cpt(cpt, dropna=False)
P[pair.node] = cpt
# retain noisy joint distribution from k+1 node to infer distributions parent nodes
if idx == 0:
infer_from_distribution = Factor(dp_joint_distribution)
infer_from_distribution = infer_from_distribution.sum_out(pair.node)
# for pair in self.network_[:self.k]:
# go iteratively from node at k to root of network, sum out child nodes and get cpt.
for pair in reversed(self.network_[:self.degree_network]):
if pair.parents is not None:
attributes = [*pair.parents, pair.node]
else:
attributes = [pair.node]
cpt_size = utils.get_size_contingency_table(X[attributes])
if self.verbose >= 2:
print('Learning conditional probabilities: {} - with parents {} ~ estimated size: {}'.format(pair.node,
pair.parents,
cpt_size))
# infer_from_distribution = infer_from_distribution.sum_out(pair.node)
# conditioned_var = pair.parents[-1]
cpt = CPT(infer_from_distribution, conditioned_variables=[pair.node])
cpt = utils.normalize_cpt(cpt, dropna=False)
P[pair.node] = cpt
infer_from_distribution = infer_from_distribution.sum_out(pair.node)
self.cpt_ = P
return self
def _generate_data(self, X, n_records):
Xt = np.empty([n_records, X.shape[1]], dtype=object)
for i in range(n_records):
if self.verbose >= 1:
print('Number of records generated: {} / {}'.format(i+1, n_records), end='\r')
record = self._sample_record()
Xt[i] = list(record.values())
# np to df with original column ordering
Xs = | pd.DataFrame(Xt, columns=[c.node for c in self.network_]) | pandas.DataFrame |
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_frame_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_frame_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_frame_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_frame_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last("1D")
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H")
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
s = | pd.Series() | pandas.Series |
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Normalizer
from datetime import date
from sklearn.metrics import mean_absolute_error
import datetime as dt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.model_selection import RandomizedSearchCV
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
stores = pd.read_csv('stores.csv')
features = pd.read_csv('features.csv')
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
sample = pd.read_csv('sampleSubmission.csv')
train_df = train.sort_values('Date')
test_df = test.sort_values('Date')
train_df = train.merge(stores, how='left').merge(features, how='left')
print(train_df.shape)
#print(train_df.head(2))
print(" ")
print(train_df.columns)
test_df = test.merge(stores, how='left').merge(features, how='left')
print(test_df.shape)
#print(train_df.head(2))
print(" ")
print(test_df.columns)
train_df['Date'] = pd.to_datetime(train_df['Date'], format='%d-%m-%Y')
test_df['Date'] = | pd.to_datetime(test_df['Date'], format='%Y-%m-%d') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Methods to perform coverage analysis.
@author: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import geopandas as gpd
from typing import List, Optional
from shapely import geometry as geo
from datetime import datetime, timedelta
from skyfield.api import load, wgs84, EarthSatellite
from ..schemas.point import Point
from ..schemas.satellite import Satellite
from ..schemas.instrument import Instrument, DutyCycleScheme
from ..utils import (
compute_min_altitude,
swath_width_to_field_of_regard,
compute_max_access_time,
compute_orbit_period,
)
def collect_observations(
point: Point,
satellite: Satellite,
instrument: Instrument,
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect single satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellite: The observing satellite
:type satellite: :class:`tatc.schemas.satellite.Satellite`
:param instrument: The instrument used to make observations
:type instrument::`tatc.schemas.instrument.instrument`
:param start: The start of the mission window
:type start::`datetime.datetime`
:param end: The end of the mission window
:type end::`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: An instance of :class:`geopandas.GeoDataFrame` containing all
recorded reduce_observations
:rtype::`geopandas.GeoDataFrame`
"""
# build a topocentric point at the designated geodetic point
topos = wgs84.latlon(point.latitude, point.longitude)
# load the timescale and define starting and ending points
ts = load.timescale()
t0 = ts.from_datetime(start)
t1 = ts.from_datetime(end)
# load the ephemerides
eph = load("de421.bsp")
# convert orbit to tle
orbit = satellite.orbit.to_tle()
# construct a satellite for propagation
sat = EarthSatellite(orbit.tle[0], orbit.tle[1], satellite.name)
# compute the initial satellite height (altitude)
satellite_height = wgs84.subpoint(sat.at(t0)).elevation.m
# compute the minimum altitude angle required for observation
min_altitude = compute_min_altitude(
satellite_height,
instrument.field_of_regard
if sample_distance is None
else swath_width_to_field_of_regard(satellite_height, sample_distance),
)
# compute the maximum access time to filter bad data
max_access_time = timedelta(
seconds=compute_max_access_time(satellite_height, min_altitude)
)
# TODO: consider instrument operational intervals
ops_intervals = pd.Series(
[pd.Interval(pd.Timestamp(start), pd.Timestamp(end), "both")]
)
# find the set of observation events
t, events = sat.find_events(topos, t0, t1, altitude_degrees=min_altitude)
if omit_solar:
# basic dataframe without solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
}
)
else:
# extended dataframe including solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
"sat_sunlit": pd.Series([], dtype="bool"),
"solar_alt": pd.Series([], dtype="float64"),
"solar_az": pd.Series([], dtype="float64"),
"solar_time": pd.Series([], dtype="float64"),
}
)
# define variables for stepping through the events list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# check for geocentricity
if np.all(events == 1) and events:
# find the satellite altitude, azimuth, and distance at t0
sat_alt, sat_az, sat_dist = (sat - topos).at(t[0]).altaz()
# if ommiting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": None,
"solar_alt": None,
"solar_az": None,
"solar_time": None
}, index=[0]
)
], ignore_index=True)
# compute the access time for the observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
for j in range(len(events)):
if events[j] == 0:
# record the rise time
t_rise = t[j].utc_datetime()
elif events[j] == 1:
# record the culmination time
t_culminate = t[j].utc_datetime()
# find the satellite altitude, azimuth, and distance
sat_alt, sat_az, sat_dist = (sat - topos).at(t[j]).altaz()
if not omit_solar or instrument.req_target_sunlit is not None:
# find the solar altitude, azimuth, and distance
solar_obs = (
(eph["earth"] + topos).at(t[j]).observe(eph["sun"]).apparent()
)
solar_alt, solar_az, solar_dist = solar_obs.altaz()
# find the local solar time
solar_time = solar_obs.hadec()[0].hours + 12
if not omit_solar or instrument.req_self_sunlit is not None:
# find whether the satellite is sunlit
sat_sunlit = sat.at(t[j]).is_sunlit(eph)
elif events[j] == 2:
# record the set time
t_set = t[j].utc_datetime()
# only record an observation if a previous rise and culminate
# events were recorded (sometimes they are out-of-order)
if t_rise is not None and t_culminate is not None:
# check if the observation meets minimum access duration,
# ground sunlit conditions, and satellite sunlit conditions
if (
instrument.min_access_time <= t_set - t_rise <= max_access_time * 2
and instrument.is_valid_observation(
eph,
ts.from_datetime(t_culminate),
sat.at(ts.from_datetime(t_culminate)),
)
and (
instrument.duty_cycle >= 1
or any(ops_intervals.apply(lambda i: t_culminate in i))
)
):
# if omitting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": sat_sunlit,
"solar_alt": solar_alt.degrees,
"solar_az": solar_az.degrees,
"solar_time": solar_time,
}, index=[0]
)
], ignore_index=True)
# reset the variables for stepping through the event list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# compute the access time for each observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def collect_multi_observations(
point: Point,
satellites: List[Satellite],
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect multiple satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellites: The observing satellites
:type satellites: list of :class:`tatc.schemas.satellite.Satellite`
:param start: The start of the mission window
:type start: :`datetime.datetime`
:param end: The end of the mission window
:type end: :class:`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: an instance of :class:`geopandas.GeoDataFrame` containing all
recorded observations
:rtype: :class:`geopandas.GeoDataFrame`
"""
gdfs = [
collect_observations(
point, satellite, instrument, start, end, omit_solar, sample_distance
)
for constellation in satellites
for satellite in (constellation.generate_members())
for instrument in satellite.instruments
]
# merge the observations into one data frame
df = pd.concat(gdfs, ignore_index=True)
# sort the values by start datetime
df = df.sort_values("start")
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
def aggregate_observations(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Aggregate constellation observations for a geodetic point of interest.
:param gdf: The individual observations.
:type gdf: :class:`geopandas.GeoDataFrame`
:return: An instance of :class:`geopandas.GeoDataFrame` The data frame
with aggregated observations.
:rtype: :class:`geopandas.GeoDataFrame`
"""
if len(gdf.index) == 0:
empty_df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": | pd.Series([], dtype="object") | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# see license https://github.com/DerwenAI/kglab#license-and-copyright
"""
SPARQL query abstractions.
"""
import re
import typing
import pandas as pd # type: ignore # pylint: disable=E0401
import pyvis # type: ignore # pylint: disable=E0401
import rdflib # type: ignore # pylint: disable=E0401
from kglab.gpviz import GPViz
from kglab.pkg_types import RDF_Node
from kglab.util import get_gpu_count
from .base import Queryable
## pre-constructor set-up
if get_gpu_count() > 0:
import cudf # type: ignore # pylint: disable=E0401
class SparqlQueryable (Queryable):
"""
SPARQL implementation for Queryable abstract class.
"""
def __init__ (
self,
kg: typing.Any,
) -> None:
"""
Constructor.
"""
self.kg = kg
######################################################################
## SPARQL queries
def query ( # pylint: disable=W0221
self,
query: str,
*,
bindings: dict = None,
) -> typing.Iterable:
"""
Wrapper for [`rdflib.Graph.query()`](https://rdflib.readthedocs.io/en/stable/apidocs/rdflib.html?highlight=query#rdflib.Graph.query) to perform a SPARQL query on the RDF graph.
query:
text for the SPARQL query
bindings:
initial variable bindings
yields:
[`rdflib.query.ResultRow`](https://rdflib.readthedocs.io/en/stable/_modules/rdflib/query.html?highlight=ResultRow#) named tuples, to iterate through the query result set
"""
if not bindings:
bindings = {}
for row in self.kg._g.query( # pylint: disable=W0212
query,
initBindings=bindings,
):
yield row
def query_as_df ( # pylint: disable=W0221
self,
query: str,
*,
bindings: dict = None,
simplify: bool = True,
pythonify: bool = True,
) -> pd.DataFrame:
"""
Wrapper for [`rdflib.Graph.query()`](https://rdflib.readthedocs.io/en/stable/apidocs/rdflib.html?highlight=query#rdflib.Graph.query) to perform a SPARQL query on the RDF graph.
query:
text for the SPARQL query
bindings:
initial variable bindings
simplify:
convert terms in each row of the result set into a readable representation for each term, using N3 format
pythonify:
convert instances of [`rdflib.term.Literal`](https://rdflib.readthedocs.io/en/stable/apidocs/rdflib.html?highlight=Literal#rdflib.term.Identifier) to their Python literal representation
returns:
the query result set represented as a [`pandas.DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html); uses the [RAPIDS `cuDF` library](https://docs.rapids.ai/api/cudf/stable/) if GPUs are enabled
"""
if not bindings:
bindings = {}
row_iter = self.kg._g.query(query, initBindings=bindings) # pylint: disable=W0212
if simplify:
rows_list = [ self.n3fy_row(r.asdict(), pythonify=pythonify) for r in row_iter ]
else:
rows_list = [ r.asdict() for r in row_iter ]
if self.kg.use_gpus:
df = cudf.DataFrame(rows_list)
else:
df = | pd.DataFrame(rows_list) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
from utils.aioLogger import aioLogger
from typing import List
from config.aioConfig import CESDataConfig
from utils.aioError import aioPreprocessError
import re
import matplotlib.pyplot as plt
class CESCsvReader:
"""read data from csv file df, save it in #* self.df"""
def __init__(self, csv_file: Path, use_offset: bool = True) -> None:
self.use_offset = use_offset
self.csv_file = csv_file
self.logger = aioLogger(self.__class__.__name__).logger
self.logger.info(f"{self.csv_file} loaded for processing...")
self.id = self._get_id_from_file_name()
self._csv_file_correction()
self.read_csv_to_df()
# self.show_plots()
def show_plots(self):
fig, ax = plt.subplots(nrows=2, figsize=(12, 6))
self.df[self.offset_sensor_columns].plot(ax=ax[0])
self.df[["stability", "pssc"]].plot(ax=ax[1])
plt.show()
def _convert_df_to_numeric(self, df: pd.DataFrame) -> pd.DataFrame:
return df.apply(pd.to_numeric, errors="ignore")
def read_csv_to_df(self) -> None:
df = pd.read_csv(self.csv_file, skiprows=CESDataConfig.SKIP_ROWS, sep=";")
df = self._drop_according_to_column_and_row(df)
offset_column_name: str = "offsets"
df_info_columns = df.columns.difference(CESDataConfig.SENSOR_COLUMNS)
df_info_columns = df_info_columns.difference([offset_column_name])
df_offset = self.unpack_data_series(df[offset_column_name])
df_offset = self.set_num_of_row_to_zero_from_start(df_offset)
df_offset = self._convert_df_to_numeric(df_offset)
self.offset_sensor_columns = df_offset.columns
df_info = df[df_info_columns].copy()
if self.use_offset:
self.df = pd.concat([df_info, df_offset], axis=1)
else:
df_sensor = self._convert_df_to_numeric(df[CESDataConfig.SENSOR_COLUMNS])
if self._validate_sensor_df(df_sensor):
self.df = pd.concat([df_info, df_sensor], axis=1)
else:
raise aioPreprocessError(
f"preprocess for csv {self.csv_file} NOK ! please double check"
)
def _drop_according_to_column_and_row(
self, df: pd.DataFrame, thresh_percentage: float = 0.5
):
df.dropna(
thresh=len(df) * thresh_percentage, axis=1, inplace=True
) # remove empty columns
df.dropna(
thresh=len(df.columns) * thresh_percentage, axis=0, how="all", inplace=True
) # remove n/a rows
return df
def unpack_data_series(
self, ds: pd.Series, new_column_name_prefix: str = "offset_C"
):
ds = ds.apply(self.convert_list_str_to_list)
item_length = len(ds.iloc[0])
new_ds = ds.explode()
df = pd.DataFrame(data=new_ds).reset_index()
df.columns = ["idx_orig", "value"]
def add_sensor_column(row):
number_to_append = row.name % item_length
sensor_offset_name = f"{new_column_name_prefix}{number_to_append+1}"
return sensor_offset_name
df["sensor"] = df.apply(add_sensor_column, axis=1)
pivot = | pd.pivot(data=df, index="idx_orig", columns="sensor", values="value") | pandas.pivot |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-07-05 16:53:19
# @Author : mayongze (<EMAIL>)
# @Link : https://github.com/mayongze
# @Version : 1.1.1.20170705
import os
import URPCrawlerDAO
import URPMain
import DBHelper
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from pylab import *
# 不设置字体plt 标签不能显示中文
mpl.rcParams['font.sans-serif'] = ['SimHei']
def flunk_pie(cname,sex = None):
strSql = "SELECT s.s_sex,sum(CASE WHEN g.grade >= 60 THEN 1 ELSE 0 END) AS 及格人数,sum( CASE WHEN g.grade < 60 THEN 1 ELSE 0 END ) AS 不及格人数,sum( CASE WHEN 1=1 THEN 1 ELSE 0 END ) AS 没有成绩人数 FROM grade as g ,students as s,course WHERE g.semester = '2016-2017-2' AND s.sno = g.sno AND course.c_name = '{0}' AND g.semester = '2016-2017-2' AND course.c_no = g.cno GROUP BY s_sex".format(cname)
if sex:
title = "2016-2017 春季学期 %s(%s) 成绩统计" %(cname,sex)
else:
sex = 0
title = "2016-2017 春季学期 %s 成绩统计" %cname
dic = {}
resultList = dbHepler.fetchall(strSql)
for item in resultList:
dic[item[0]] = item[1:]
print(dic)
if '男' not in dic:
dic['男']=[0,0,0]
if '女' not in dic:
dic['女']=[0,0,0]
X = [dic['男'][0],dic['女'][0],dic['男'][1],dic['女'][1]]
labels=['(男)及格-%s人'%X[0],'(女)及格-%s人'%X[1],'(男)不及格-%s人'%X[2],'(女)不及格-%s人'%X[3]]
expl = [0,0,0.01,0.01] #第二块即China离开圆心0.1
colors = ["blue","red","coral","green"] #设置颜色(循环显示)
#fig = plt.figure()
plt.figure(1, figsize=(7,6)) # 正方形图
plt.title(title)
plt.pie(X,labels=labels,explode=expl, colors=colors,autopct='%1.2f%%',pctdistance=0.8, shadow=True) #画饼图(数据,数据对应的标签,百分数保留两位小数点)
plt.show()
def flunkCourseRank_barh():
strSql = "SELECT course.c_name,count(g.grade) FROM grade AS g,course WHERE g.semester = '2016-2017-2' AND g.flunkcount > 0 AND g.cno = course.c_no GROUP BY course.c_name ORDER BY count(g.cno) DESC"
tmp = dbHepler.fetchall(strSql)
courseList,grade = [],[]
for item in tmp:
#if int(item[1]) > 5:
courseList.append(item[0])
grade.append(item[1])
label = courseList
x = grade
idx = np.arange(len(x))
color = cm.jet(np.array(x)/max(x))
plt.barh(idx, x, color=color)
#plt.yticks(idx+1,label)
plt.yticks(idx,label,fontsize=5)
plt.grid(axis='x')
plt.xlabel('不及格人数')
plt.ylabel('科目')
plt.title('2016-2017 春季学期 不及格科目')
plt.show()
def flunkMajorStatistics(majorKey):
majorKey = 'C14'
strSql = "SELECT s.s_major,sum(CASE WHEN g.isFlunk = 1 THEN 1 ELSE 0 END) as 挂科人数,sum(CASE WHEN g.isFlunk = 0 THEN 1 ELSE 0 END) as 没挂过科人数,sum(CASE WHEN 1 = 1 THEN 1 ELSE 0 END) as 总人数 FROM (SELECT g.sno,( CASE WHEN sum(CASE WHEN g.grade < 60 and g.grade !='' THEN 1 ELSE 0 END ) > 0 THEN 1 ELSE 0 END ) AS isFlunk FROM grade AS g WHERE g.semester = '2016-2017-2' GROUP BY g.sno ORDER BY g.sno ASC ) AS g, students AS s WHERE s.sno = g.sno AND s.s_class LIKE '%{0}%' GROUP BY s_major".format(majorKey)
tmp = dbHepler.fetchall(strSql)
C14major={}
for item in tmp:
C14major[item[0].rstrip("(城市学院)")] = int(item[1]) / (int(item[2]) + int(item[1]))
majorKey = 'C15'
strSql = "SELECT s.s_major,sum(CASE WHEN g.isFlunk = 1 THEN 1 ELSE 0 END) as 挂科人数,sum(CASE WHEN g.isFlunk = 0 THEN 1 ELSE 0 END) as 没挂过科人数,sum(CASE WHEN 1 = 1 THEN 1 ELSE 0 END) as 总人数 FROM (SELECT g.sno,( CASE WHEN sum(CASE WHEN g.grade < 60 and g.grade !='' THEN 1 ELSE 0 END ) > 0 THEN 1 ELSE 0 END ) AS isFlunk FROM grade AS g WHERE g.semester = '2016-2017-2' GROUP BY g.sno ORDER BY g.sno ASC ) AS g, students AS s WHERE s.sno = g.sno AND s.s_class LIKE '%{0}%' GROUP BY s_major".format(majorKey)
tmp = dbHepler.fetchall(strSql)
C15major={}
for item in tmp:
C15major[item[0].rstrip("(城市学院)")] = int(item[1]) / (int(item[2]) + int(item[1]))
majorKey = 'C16'
strSql = "SELECT s.s_major,sum(CASE WHEN g.isFlunk = 1 THEN 1 ELSE 0 END) as 挂科人数,sum(CASE WHEN g.isFlunk = 0 THEN 1 ELSE 0 END) as 没挂过科人数,sum(CASE WHEN 1 = 1 THEN 1 ELSE 0 END) as 总人数 FROM (SELECT g.sno,( CASE WHEN sum(CASE WHEN g.grade < 60 and g.grade !='' THEN 1 ELSE 0 END ) > 0 THEN 1 ELSE 0 END ) AS isFlunk FROM grade AS g WHERE g.semester = '2016-2017-2' GROUP BY g.sno ORDER BY g.sno ASC ) AS g, students AS s WHERE s.sno = g.sno AND s.s_class LIKE '%{0}%' GROUP BY s_major".format(majorKey)
tmp = dbHepler.fetchall(strSql)
C16major={}
for item in tmp:
C16major[item[0].rstrip("(城市学院)")] = int(item[1]) / (int(item[2]) + int(item[1]))
s14 = pd.Series(C14major)
s15 = pd.Series(C15major)
s16 = pd.Series(C16major)
df = | pd.DataFrame([s14,s15,s16]) | pandas.DataFrame |
"""
This creates Figure 4, fitting of multivalent binding model to Gc Data.
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from scipy.optimize import minimize
from copy import copy
from .figureCommon import subplotLabel, getSetup
from ..PCA import nllsq_EC50
from ..MBmodel import runFullModel, cytBindingModel
path_here = os.path.dirname(os.path.dirname(__file__))
def makeFigure():
"""Get a list of the axis objects and create a figure"""
ax, f = getSetup((10, 7), (3, 4), multz={9: 1})
axlabel = copy(ax)
del axlabel[1]
del axlabel[1]
del axlabel[1]
del axlabel[2]
subplotLabel(axlabel)
ax[0].axis("off")
ax[1].axis("off")
ax[2].axis("off")
ax[3].axis("off")
ax[5].axis("off")
minSolved = minimize(runFullModel, x0=-12.0, args=([0.5, 1], False, True))
print(minSolved)
modelDF = runFullModel(time=[0.5, 1.0], saveDict=False, singleCell=True) # Change to save
print(r2_score(modelDF.Experimental.values, modelDF.Predicted.values))
Pred_Exp_plot(ax[4], modelDF)
legend = ax[4].get_legend()
labels = (x.get_text() for x in legend.get_texts())
ax[5].legend(legend.legendHandles, labels, loc="upper left", prop={"size": 8.5}) # use this to place universal legend later
ax[4].get_legend().remove()
R2_Plot_Cells(ax[6], modelDF)
R2_Plot_Ligs(ax[7], modelDF)
MonVsBivalent(ax[8], modelDF, ligs=True)
R2_Plot_Conc(ax[9], modelDF)
timePlot(ax[10])
return f
def Pred_Exp_plot(ax, df):
"""Plots all experimental vs. Predicted Values"""
sns.scatterplot(x="Experimental", y="Predicted", hue="Cell", style="Valency", data=df, ax=ax, alpha=0.35)
ax.set(xlim=(0, 60000), ylim=(0, 60000))
def R2_Plot_Cells(ax, df):
"""Plots all accuracies per cell"""
accDF = pd.DataFrame(columns={"Cell Type", "Valency", "Accuracy"})
cellTypes = ['Treg $IL2Ra^{hi}$', 'Treg', 'Treg $IL2Ra^{lo}$', 'Thelper $IL2Ra^{hi}$', 'Thelper', 'Thelper $IL2Ra^{lo}$', 'CD8', 'NK']
for cell in cellTypes:
for val in df.Valency.unique():
preds = df.loc[(df.Cell == cell) & (df.Valency == val)].Predicted.values
exps = df.loc[(df.Cell == cell) & (df.Valency == val)].Experimental.values
r2 = r2_score(exps, preds)
accDF = accDF.append(pd.DataFrame({"Cell Type": [cell], "Valency": [val], "Accuracy": [r2]}))
sns.barplot(x="Cell Type", y="Accuracy", hue="Valency", data=accDF, ax=ax)
ax.set(ylim=(0, 1), ylabel=r"Accuracy ($R^2$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha="right")
def R2_Plot_Ligs(ax, df):
"""Plots all accuracies per ligand"""
accDF = pd.DataFrame(columns={"Ligand", "Valency", "Accuracy"})
for ligand in df.Ligand.unique():
for val in df.loc[df.Ligand == ligand].Valency.unique():
preds = df.loc[(df.Ligand == ligand) & (df.Valency == val)].Predicted.values
exps = df.loc[(df.Ligand == ligand) & (df.Valency == val)].Experimental.values
r2 = r2_score(exps, preds)
if val == 1:
accDF = accDF.append(pd.DataFrame({"Ligand": [ligand + " (Mono)"], "Valency": [val], "Accuracy": [r2]}))
else:
accDF = accDF.append(pd.DataFrame({"Ligand": [ligand + " (Biv)"], "Valency": [val], "Accuracy": [r2]}))
sns.barplot(x="Ligand", y="Accuracy", data=accDF, ax=ax)
ax.set(ylim=(0, 1), ylabel=r"Accuracy ($R^2$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha="right")
def R2_Plot_Conc(ax, df):
"""Plots all accuracies per concentration"""
accDF = | pd.DataFrame(columns={"Concentration", "Valency", "Accuracy"}) | pandas.DataFrame |
import pytest
import pandas as pd
from xml.etree import ElementTree as ET
from src import *
from src.holiday import *
error_holiday_res = '''<OpenAPI_ServiceResponse>
<cmmMsgHeader>
<returnCode>500</returnCode>
<errMsg>게이트웨이 내부 서비스 오류</errMsg>
</cmmMsgHeader>
</OpenAPI_ServiceResponse>'''
right_holiday_res = '''
<response><header><resultCode>00</resultCode><resultMsg>NORMAL SERVICE.</resultMsg></header><body><items><item><dateKind>01</dateKind><dateName>1월1일</dateName><isHoliday>Y</isHoliday><locdate>20200101</locdate><seq>1</seq></item><item><dateKind>01</dateKind><dateName>설날</dateName><isHoliday>Y</isHoliday><locdate>20200124</locdate><seq>1</seq></item><item><dateKind>01</dateKind><dateName>설날</dateName><isHoliday>Y</isHoliday><locdate>20200125</locdate><seq>1</seq></item><item><dateKind>01</dateKind><dateName>설날</dateName><isHoliday>Y</isHoliday><locdate>20200126</locdate><seq>1</seq></item><item><dateKind>01</dateKind><dateName>설날</dateName><isHoliday>Y</isHoliday><locdate>20200127</locdate><seq>1</seq></item></items><numOfRows>100</numOfRows><pageNo>1</pageNo><totalCount>5</totalCount></body></response>'''
right_null_res = '''<response><header><resultCode>00</resultCode><resultMsg>NORMAL SERVICE.</resultMsg></header><body><items /><numOfRows>100</numOfRows><pageNo>1</pageNo><totalCount>0</totalCount></body></response>'''
def test_get_time():
dt = get_time()
assert isinstance(dt.year, int)
assert isinstance(dt.month, int)
def test_get_response_status_code():
err_root = ET.fromstring(error_holiday_res)
assert get_response_status_code(err_root) == '500'
right_root = ET.fromstring(right_holiday_res)
assert get_response_status_code(right_root) == '00'
def test_get_holiday_info():
assert all([config.PUBLIC_DATA_DOMAIN, config.PUBLIC_DATA_HOLIDAY_URI,
config.PUBLIC_DATA_PORTAL_KEY, config.DEFAULT_NUM_PAGE])
assert get_holiday_info(2020, 1, config.PUBLIC_DATA_HOLIDAY_HOLIDAY_OP).status_code == 200
def test_parse_response():
holiday_df = | pd.DataFrame(columns=['date', 'name', 'type', 'is_holiday']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from sys import argv
dates=("2020-04-01", "2020-04-08", "2020-04-15", "2020-04-22",
"2020-04-29" ,"2020-05-06", "2020-05-13","2020-05-20", "2020-05-27", "2020-06-03",
"2020-06-10", "2020-06-17", "2020-06-24", "2020-07-01", "2020-07-08",
"2020-07-15", "2020-07-22", "2020-07-29", "2020-08-05", "2020-08-12",
"2020-08-19", "2020-08-26", "2020-09-02", "2020-09-16", "2020-09-23",
"2020-09-30", "2020-10-07", "2020-10-14", "2020-10-21")
days_list=(
60, 67, 74, 81, 88, 95, 102, 109, 116, 123, 130,
137, 144, 151, 158, 165, 172,179,186,193,200,207,
214, #skip 221, data missing 2020-09-09
228,235, 242, 249,256,263)
df = pd.DataFrame()
for i,date in enumerate(dates):
states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT']
n_sims = int(argv[1])
start_date = '2020-03-01'
days = days_list[i]
forecast_type = "R_L" #default None
forecast_date = date #format should be '%Y-%m-%d'
end_date = pd.to_datetime(start_date,format='%Y-%m-%d') + timedelta(days=days-1)
sims_dict={
'state': [],
'onset date':[],
}
for n in range(n_sims):
if n <2000:
sims_dict['sim'+str(n)] = []
print("forecast up to: {}".format(end_date))
date_col = [day.strftime('%Y-%m-%d') for day in pd.date_range(start_date,end_date)]
for i,state in enumerate(states):
df_results = pd.read_parquet("./results/"+state+start_date+"sim_"+forecast_type+str(n_sims)+"days_"+str(days)+".parquet",columns=date_col)
df_local = df_results.loc['total_inci_obs']
sims_dict['onset date'].extend(date_col)
sims_dict['state'].extend([state]*len(date_col))
n=0
print(state)
for index, row in df_local.iterrows():
if n==2000:
break
#if index>=2000:
# continue
#else:
if np.all(row.isna()):
continue
else:
sims_dict['sim'+str(n)].extend(row.values)
n +=1
print(n)
while n < 2000:
print("Resampling")
for index, row in df_local.iterrows():
if n==2000:
break
if np.all(row.isna()):
continue
else:
sims_dict['sim'+str(n)].extend(row.values)
n +=1
df_single = pd.DataFrame.from_dict(sims_dict)
df_single["data date"] = forecast_date
key ='local_obs'
df_single[df_single.select_dtypes(float).columns] = df_single.select_dtypes(float).astype(int)
#df.to_csv('./analysis/UoA_'+forecast_date+str(key)+'.csv')
df = df.append(df_single)
df['data date'] = | pd.to_datetime(df['data date']) | pandas.to_datetime |
import argparse, time,re, os,csv,functools, signal,sys, json
import logging,datetime, threading,concurrent.futures
from logging import handlers
from time import gmtime, strftime
from urllib.parse import urlparse
from os.path import splitext
import pandas as pd
import numpy as np
# Local Imports
from Lib.GCS.wrapper import Wrapper
from Lib.GCS.origin_settings import Origin_Settings
from Lib.GCS.log import ConsoleLogging
def ArgsParser():
parser = argparse.ArgumentParser(description='',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--switchKey', type=str, help='Account_ID to Query for multi account management (switch key)',
required=False)
parser.add_argument('--verbose', action='store_true', help='Turn on Verbose Mode.')
parser.add_argument('--section', type=str, help='EdgeRc section to be used.',
required=False,default='papi')
parser.add_argument('--type', type=str.lower, choices=['as','os','har'], help='Type of report to be done [account-summary,offload,http-archive]]',
required=False,default='as')
parser.add_argument('--cpcodes', nargs='+', type=int, help='List of cpcodes to query. Used only in Offload Analysis.',
required=False)
parser.add_argument('--start', type=str, help='Report Start date in format YYYY-MM-DD", if not provided default is start of last month. Used only in Offload Analysis.',
required=False)
parser.add_argument('--end', type=str, help='Report Start date in format YYYY-MM-DD", if not provided default is start of last month. Used only in Offload Analysis.',
required=False)
parser.add_argument('--domain', type=str, help='Main Domain to be reviewed in HAR, usually it will be the same as the page view URL. Used only in Har Analysis.',
required=False)
parser.add_argument('--first-parties', nargs='+', type=str, help='List of first party domains --domain will be appended to this list. If only one domain is in quesion, --domain is all you need. Used only in Har Analysis.',
required=False)
parser.add_argument('--file', type=str, help='File location to be analysed. Used only in Har Analysis.',
required=False)
parser.add_argument('--groupby', type=str.lower, choices=['ext','url'], help='Used only in Offload Analysis. ',
required=False,default='ext')
args = vars(parser.parse_args())
return parser, args
class Aggregator:
def __init__(self,console,args,section_name):
self.args = None
self.parser = None
self.maxThreads = 5
self.outputdir = "None"
self.verbose = args['verbose']
self.log = console.log
self.wrapper = Wrapper(self.log,section_name)
self.accountId = None
self.wrapper.account = None
self.dfs = {}
self.startDate = None
self.endDate = None
self.accountName = None
self.productMap = None
self.reportType = "as"
self.groupby = args['groupby']
signal.signal(signal.SIGINT, self.signal_handler)
def signal_handler(self,sig, frame):
self.clear_cache()
self.log.critical("Forced Exit... Bye!..")
sys.exit(0)
def _validateDate(self, date):
"""Returns False if input date does not follow YYYY-MM-DD.
Keyword arguments:
date
Return type:
Boolean
"""
try:
datetime.datetime.strptime(str(date), '%Y-%m-%d')
return True
except ValueError:
return False
# raise ValueError("Incorrect data format, should be YYYY-MM-DD")
def createFolder(self,directoryName):
"""Creates directores to store outputs, takes the directory name. This value most of the time will be the
account Name.
Keyword arguments:
directoryName
Return type:
None
"""
self.outputdir = 'Reports'
# Create Audit Folder
try:
os.stat(self.outputdir)
except:
os.mkdir(self.outputdir)
self.outputdir = self.outputdir+'/'+directoryName.replace(' ','_')+'/'
# Create Account Folder under Audit
try:
os.stat(self.outputdir)
except:
os.mkdir(self.outputdir)
self.outputdir = self.outputdir + str(datetime.datetime.utcfromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')).replace(' ','_').replace(':','.') + '-'
def _getProducts(self, contractId):
"""
Return the set of products within a contract as a comma seperated list
Keyword arguments:
contractId
Return type:
list
"""
products = self.wrapper.getProducts(contractId)
productNames = []
if 'products' in products:
for product in products['products']['items']:
productNames.append(product['productName'])
new_row = {
'Product_ID':product['productId'],
'Product_Name':product['productName']
}
if len(productNames) > 1:
return ",".join(productNames)
else:
return []
def getAccountDetails(self):
"""
Gets Account Name from ID, also saves the groups for later functions.
Keyword arguments:
None
Return type:
Boolean, but also stores dataframe in self.dfs
"""
if args['switchKey']:
self.accountId = args['switchKey']
self.wrapper.account = args['switchKey']
self.groups = self.wrapper.getGroups()
if 'incidentId' in self.groups:
self.log.error('Account Not Found or insufficient privileges to complete the operation. Try "--section sectionName" o change edgerc section')
return False
if not args['switchKey']:
self.accountId = self.groups['accountId'][4:]
# self.wrapper.account = self.groups['accountId'][4:]
self.log.info("Account ID: {0}".format(self.accountId))
self.accountName = self.groups['accountName']
self.log.info("Account Name: {0}".format(self.accountName))
csv_file_path = self.createFolder(self.groups['accountName'])
columns = ["Account_Id", "Account_Name"]
df_acc= pd.DataFrame(columns=columns)
new_row = {
'Account_Id':self.groups['accountId'][4:],
'Account_Name':self.groups['accountName']
}
df_acc=df_acc.append(new_row, ignore_index=True)
self.dfs['account'] = df_acc
self._readProductMap()
return True
def accountSummary(self):
"""
Main function for AS report type, orchestrates function execution.
Keyword arguments:
None
Return type:
None
"""
self.log.info("Creating Contract summary table")
self.printContracts()
self.log.info("Creating Groups summary table")
self.printGroups()
self.log.info("Creating CP_Code summary table")
self.printCPcodes()
self.log.info("Creating edge host name summary table")
self.printEdgeHostNames()
self.log.info("Creating Application Security tables")
self.printAppSec()
if args['verbose']:
self.log.info("Creating Property summary. (It may take a while) ")
else:
self.log.info("Creating Property summary. (It may take a while, view more with '--verbose') ")
self.printPropertiesDetails()
self.log.info("Creating Certificate Table.")
self.getEnrollments()
self.log.info("Creating Summary by Hostname")
self.presentation()
self.log.info("Writing Files...")
self._writeFiles()
self.log.info("Report successfull, output can be found here:'Reports/{0}/'".format(self.accountName))
def printContracts(self):
"""
Gets Contracts within Account
Keyword arguments:
None
Return type:
None, but stores dataframe in self.dfs
"""
self.log.info("Creating Contracts table.")
columns = ["Contract_ID" , "Contract_Name", "Products"]
df_ctr= pd.DataFrame(columns=columns)
contracts = self.wrapper.getContractNames()
for contract in contracts['contracts']['items']:
products = self._getProducts(contract['contractId'])
new_row = {
'Contract_ID': contract['contractId'][4:],
'Contract_Name':contract['contractTypeName'],
'Products':products
}
df_ctr=df_ctr.append(new_row, ignore_index=True)
self.dfs['contracts'] = df_ctr
def printGroups(self):
"""
Gets Groups in account
Keyword arguments:
None
Return type:
None, but stores dataframe in self.dfs
"""
self.log.info("Creating Groups table.")
columns = ["Group_ID", "Group_Name","Parent"]
df_grp = pd.DataFrame(columns=columns)
for group in self.groups['groups']['items']:
grp_id = int(group['groupId'][4:])
grp_name = group['groupName']
grp_parent = None
if 'parentGroupId' in group:
grp_parent = int(group['parentGroupId'][4:])
new_row = {
'Group_ID': grp_id,
'Group_Name':grp_name,
'Parent':grp_parent
}
df_grp=df_grp.append(new_row, ignore_index=True)
self.dfs['groups'] = df_grp
def printEdgeHostNames(self):
"""
Gets EdgeHostnames in account
Keyword arguments:
None
Return type:
None, but stores dataframe in self.dfs
"""
lst_eh = []
columns = ["Group_ID", "Contract_ID", "Edge_Host_ID", "Edge_Host_Name", "Edge_Host_Domain_Suffix", "Secure", "IPVersion","Product_ID","Map","Slot"]
df_eh = pd.DataFrame(columns=columns)
contracts = []
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for group in self.groups['groups']['items']:
groupId = group['groupId']
executor.submit(self.GroupsWorker,'edgehost',group,lst_eh,contracts)
df_eh= df_eh.append(lst_eh, ignore_index=True)
self.dfs['edgehostnames'] = df_eh
def PropertyWorker(self,list_grp_configs,list_grp_behaviors,config_details):
"""
Gets Property details,
Keyword arguments:
list_grp_configs
list_grp_behaviors
config_details
Return type:
None, but stores dataframe in self.dfs
"""
args = None
args = ['Prod_Version','Staging_Version', 'Latest_Version']
if 'propertyName' in config_details:
self.log.debug("Importing data for property: '{0}'".format(config_details['propertyName']))
# Assign values to variables here for readability and will be used in rest of function.
groupId = config_details['groupId']
contractId = config_details['contractId']
propertyId = config_details['propertyId']
productionVersion = config_details['productionVersion']
stgVersion = config_details['stagingVersion']
latestVersion = config_details['latestVersion']
productId = None
new_row = {
'Config_Name': config_details['propertyName'],
'Group_ID': int(groupId[4:]),
'Contract_ID': contractId[4:],
'Property_ID': int(propertyId[4:]),
'Prod_Version': productionVersion,
'Staging_Version': stgVersion,
'Latest_Version': latestVersion,
'Product': productId
}
if args:
for config_env in args:
config_version = new_row[config_env]
if config_version is not None:
get_version = self.wrapper.getVersionDetails(propertyId,groupId,contractId,str(config_version))
if 'versions' in get_version:
for item in get_version['versions']['items']:
new_row[config_env + '_Updated_User'] = item['updatedByUser']
new_row[config_env + '_Updated_Time'] = item['updatedDate']
if productId == None:
productId = item['productId'][4:]
else:
new_row[config_env + '_Updated_User'] = 'No_' + config_env
new_row[config_env + '_Updated_Time'] = 'No_' + config_env
new_row['Product'] = productId
version = new_row['Latest_Version']
if ('Prod_Version' in new_row) and (new_row['Prod_Version'] is not None):
version = new_row['Prod_Version']
else:
if ('Staging_Version' in new_row) and (new_row['Staging_Version'] is not None):
version = new_row['Staging_Version']
new_row['Hostnames'] = self.getPropertyHostDetails(new_row['Group_ID'],new_row['Contract_ID'],new_row['Property_ID'], str(version))
new_row['Origins'] = self.getPropertyOriginDetails(new_row['Group_ID'],new_row['Contract_ID'],new_row['Property_ID'], str(version))
new_row['Behaviors'] = self.getBehaviorDetails()
new_row['CP_Codes'] = '['+self.getCPCodeDetails()+']'
property_behaviors = new_row['Behaviors']
list_grp_configs.append(new_row)
if productionVersion is not None:
propertyVersion = productionVersion
elif stgVersion is not None:
propertyVersion = stgVersion
else :
propertyVersion = latestVersion
available_behaviors = self.wrapper.getavailableBehavior(propertyId, str(propertyVersion),contractId, groupId)
if 'behaviors' in available_behaviors:
for b in available_behaviors['behaviors']['items']:
enabled = False
if b['name'] in property_behaviors:
enabled = True
new_row = {
'Config_Name': config_details['propertyName'],
'Behaviors': b['name'],
'Enabled': enabled
}
list_grp_behaviors.append(new_row)
return
def GroupsWorker(self, workType,group,main_list=None,second_list=None):
"""
Worker for multithreads for property functions, cpcode functions, edgehosts due to high number of groups per
account,
Keyword arguments:
workType <= Type of function to be execute [property, cpcode , edgehosts]
group <= Dataframe containing list of account groups
main_list <= list passed down by maint thread to append results
second_list <= secondary list passed down by main thread to append results
Return type:
None
"""
groupId = group['groupId']
if 'contractIds' in group:
for contractId in group['contractIds']:
if workType == 'properties':
location_result = self.wrapper.getProperties(groupId, contractId)
if 'properties' in location_result:
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for config_details in location_result['properties']['items']:
executor.submit(self.PropertyWorker,main_list,second_list,config_details)
elif workType == 'cpcodes':
cpcodes = self.wrapper.getCPCodes(groupId, contractId)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for cp in cpcodes['cpcodes']['items']:
products = []
for product in cp['productIds']:
products.append(product[4:])
new_row = {
'Group_ID': int(groupId[4:]),
'Contract_ID': contractId[4:],
'CP_Code_ID': int(cp['cpcodeId'][4:]),
'CP_Code_Name': cp['cpcodeName'],
'CP_Code_Products': "|".join(products)
}
if new_row not in main_list:
self.log.debug("Fetched data for CPcode: '{0}'".format(cp['cpcodeId'][4:]))
main_list.append(new_row)
elif workType == 'edgehost':
if 'contractIds' in group:
for contractId in group['contractIds']:
if contractId in second_list:
break
second_list.append(contractId)
edgeHostNames = self.wrapper.getEdgeHostNames(groupId, contractId,'hapi')
for edgeHostName in edgeHostNames['edgeHostnames']:
slot = None
if 'slotNumber' in edgeHostName:
slot = edgeHostName['slotNumber']
productID = None
if 'productId' in edgeHostName:
productID = edgeHostName['productId']
IPv = None
if 'ipVersionBehavior' in edgeHostName:
IPv = edgeHostName['ipVersionBehavior']
eMap = None
if 'map' in edgeHostName:
eMap = edgeHostName['map']
new_row = {
'Group_ID': int(groupId[4:]),
'Contract_ID': contractId[4:],
'Edge_Host_ID': edgeHostName['edgeHostnameId'],
'Edge_Host_Name': edgeHostName['recordName']+'.'+edgeHostName['dnsZone'],
"Edge_Host_Domain_Suffix":edgeHostName['dnsZone'],
"Secure":edgeHostName['securityType'],
"IPVersion":IPv,
"Product_ID":productID,
"Map":eMap,
"Slot":slot
}
main_list.append(new_row)
self.log.debug("Fetched configs for group: '{0}'".format(groupId[4:]))
return None
def printCPcodes(self):
"""
orchestrates mutlithreading by using the GroupsWorker function to populate CPcode data
Keyword arguments:
None
Return type:
None
"""
lst_cpcodes = []
columns = ["Group_ID", "Contract_ID", "CP_Code_ID", "CP_Code_Name", "CP_Code_Products"]
df_cpcodes = pd.DataFrame(columns=columns)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for group in self.groups['groups']['items']:
groupId = group['groupId']
executor.submit(self.GroupsWorker,'cpcodes',group,lst_cpcodes)
df_cpcodes= df_cpcodes.append(lst_cpcodes, ignore_index=True)
self.dfs['cpcodes'] = df_cpcodes
def printPropertiesDetails(self, *args):
"""
orchestrates mutlithreading by using the GroupsWorker function to populate property data
Return type:
None
"""
self.log.debug('Start time is {0}'.format(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
self.log.debug('generating config data.....')
columns = [
"Config_Name",
"Group_ID",
"Contract_ID",
"Property_ID",
"Prod_Version",
"Staging_Version",
"Latest_Version",
"Product",
"Prod_Version_Updated_User",
"Prod_Version_Updated_Time",
"Staging_Version_Updated_User",
"Staging_Version_Updated_Time",
"Latest_Version_Updated_User",
"Latest_Version_Updated_Time",
"Hostnames",
"Origins",
"Behaviors",
"CP_Codes"
]
list_properties = []
list_behavior = []
df_property = pd.DataFrame(columns=columns)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for group in self.groups['groups']['items']:
executor.submit(self.GroupsWorker,'properties',group,list_properties,list_behavior)
df_property= df_property.append(list_properties, ignore_index=True)
tmp = df_property[ ['Config_Name' ,
'Property_ID',
"Group_ID",
"Contract_ID",
"Product" ,
"Prod_Version",
"Prod_Version_Updated_User",
"Prod_Version_Updated_Time",
"Latest_Version",
"Latest_Version_Updated_User",
"Latest_Version_Updated_Time",
"Staging_Version" ,
"Staging_Version_Updated_User" ,
"Staging_Version_Updated_Time",
"Behaviors",
"CP_Codes"
]]
self.log.debug('properties.csv generated')
self.dfs['properties']=tmp
columns = ["Config_Name", "Behaviors", "Enabled"]
df_behaviors = pd.DataFrame(columns=columns)
df_behaviors= df_behaviors.append(list_behavior, ignore_index=True)
self.dfs['propertiesBehaviors']=df_behaviors
self.log.debug('properties_behaviors.csv generated')
self.log.debug('Now fetching origin details...')
columns = ["Config_Name","Property_ID", "Group_ID", "Contract_ID","Origin_Host_Name", "Origin_Type"]
df_origins = pd.DataFrame(columns=columns)
for index, row in df_property.iterrows():
for o in row['Origins']:
new_row = {
'Config_Name':row['Config_Name'],
'Property_ID':row['Property_ID'],
'Group_ID':row['Group_ID'],
'Contract_ID':row['Contract_ID'],
'Origin_Host_Name':o['hostname'],
'Origin_Type':o['originType']
}
df_origins = df_origins.append(new_row, ignore_index=True)
self.dfs['origins'] = df_origins
self.log.debug('origins.csv generated')
self.log.debug('Fetching Origin details is now complete')
self.printPropertyHostNames(df_property)
return
@functools.lru_cache()
def _resource_path(self,grp_id, grp_path=None):
"""
Creates a directory like structure groups, to visualize resource location.
Keyword arguments:
grp_id
grp_path
Return type:
grp_path <= Resource Path within Account
"""
grp_id = int(grp_id)
grp_parent = self.groups[self.groups['Group_ID']== grp_id]['Parent'].item()
if grp_path == None:
grp_path = self.groups[self.groups['Group_ID']== grp_id]['Group_Name'].item()
else:
grp_path = "{0} > {1}".format(self.groups[self.groups['Group_ID']== grp_id]['Group_Name'].item(),grp_path)
if grp_parent != "None" and grp_parent != None and not np.isnan(grp_parent):
grp_path = self._resource_path(grp_parent,grp_path)
return grp_path
def printPropertyHostNames(self, df_property):
# now write the host name details
columns = ["Host_Name", "Defined_CNAMED", "Actual_CNAME"
, "Secure", "Akamaized","Slot","Config_Name","Property_ID", "Group_ID", "Contract_ID"]
df_hosts = pd.DataFrame(columns=columns)
for index, row in df_property.iterrows():
for host in row['Hostnames']:
new_row = {
'Host_Name':host['host'],
'Defined_CNAMED':host['cname_defined'],
'Actual_CNAME':host['cname_actual'],
'Secure':host["secure"],
'Akamaized':host["akamaized"],
'Slot':host['slot'],
'Config_Name':row['Config_Name'],
'Property_ID':int(row['Property_ID']),
'Group_ID':int(row['Group_ID']),
'Contract_ID':row['Contract_ID']
}
df_hosts = df_hosts.append(new_row, ignore_index=True)
self.dfs['hostnames']=df_hosts
def getPropertyHostDetails(self, groupId, contractId, propertyId, propertyVersion):
"""
for the property, get the host names, origin names and if the host names are CNAMED to Akamai
Keyword arguments:
grp_id
contractId
propertyId
propertyVersion
Return type:
hostnames
"""
hostdetailsJSON = self.wrapper.getPropertyHostNames(propertyId, propertyVersion, groupId, contractId)
hostnames = []
if 'hostnames' in hostdetailsJSON:
for hostname in hostdetailsJSON['hostnames']['items']:
host = ""
cname_defined = ""
if 'cnameFrom' in hostname:
host = hostname['cnameFrom']
if 'cnameTo' in hostname:
cname_defined = hostname['cnameTo']
cname_actual = str(self.getCNAME(host))
slot = None
# TODO: Not working properly
if cname_actual == "None":
isAkamaized = "Unknown"
secureHostName = "Unknown"
# slot = "Unknown"
else:
isAkamaized = self._isAkamaized(cname_actual)
secureHostName = self._isESSL(cname_actual)
if secureHostName is None:
slot = "None"
secureHostName = False
else:
slot = self.checkSlot(host)
secureHostName = True
new_row = { 'host': host,
'cname_defined': cname_defined,
'cname_actual': cname_actual,
'secure' : secureHostName,
'slot': slot,
'akamaized': isAkamaized
}
hostnames.append(new_row)
return hostnames
def getPropertyOriginDetails(self, groupId, contractId, propertyId, propertyVersion):
"""
Finds Origins from property and defines origin Type
returns
origin_details
"""
self.rules = self.wrapper.getConfigRuleTree(propertyId, propertyVersion, groupId, contractId)
self.origin = Origin_Settings()
origin_details = self.origin.findOrigins(self.rules)
#replace origin for GTM with the word GTM
for origin in origin_details:
if origin['hostname'].endswith('akadns.net'):
origin['originType'] = 'GTM'
return origin_details
def getPropertyCPCodeDetails(self, groupId, contractId, propertyId, propertyVersion):
self.cpcodes = Origin_Settings()
origin_details = self.cpcodes.findOrigins(self.rules, 'cpCode')
# now get the property's product type
return origin_details
def getEnrollments(self):
"""
get a list enrollments using CPS API for a contract and returns a list of enrollments
"""
contracts = self.wrapper.getContractNames()
columns = ["Contract_ID", "Common_Name","Enrollment_ID" ,"Slots","ALT_names", "MustHave_Ciphers", "Preferred_Ciphers", "Deployment_Location", "Certifcate_Type" , "Certifcate_Authority"]
df_certs = pd.DataFrame(columns=columns)
#TODO: print ciphers
for contract in contracts['contracts']['items']:
enrollment_results = self.wrapper.getEnrollements(contract['contractId'][4:])
if enrollment_results is not None:
if 'enrollments' in enrollment_results:
if len(enrollment_results['enrollments']) >0:
for i in enrollment_results['enrollments']:
Enrollment_ID = str(i['location']).split('/')[4]
new_row = {
'Contract_ID':contract['contractId'][4:],
'Common_Name':i['csr']['cn'],
'Enrollment_ID':int(Enrollment_ID),
'Slots': self.getSlotId(Enrollment_ID),
'ALT_names':i['csr']['sans'],
'MustHave_Ciphers':i['networkConfiguration']['mustHaveCiphers'],
'Preferred_Ciphers':i['networkConfiguration']['preferredCiphers'],
'Deployment_Location':i['networkConfiguration'],
'Certifcate_Authority':i['ra'],
'Certifcate_Type':i['certificateType']
}
df_certs = df_certs.append(new_row, ignore_index=True)
self.dfs['certs'] = df_certs
def getSlotId(self,enrollementID):
Enrollment = self.wrapper.getEnrollmentHistory(enrollementID)
slots = None
for c in Enrollment['certificates']:
if c['deploymentStatus'] == 'active':
slots = int(str(c['slots']).replace('[', '').replace(']', ''))
break
return slots
def printMatchTargets(self,matchTargets):
columns = ["Target_ID", "Type", "Config_ID", "Config_Version", "Default_File", "File_Paths", "APIs","Hostnames","Security_Policy", "Sequence"]
df_secMatch = pd.DataFrame(columns=columns)
for mt in matchTargets:
for webTarget in mt['matchTargets']['websiteTargets']:
mtype = None
if 'type' in webTarget: mtype = webTarget['type']
hostnames = None
if 'hostnames' in webTarget: hostnames = webTarget['hostnames']
configId = None
if 'configId' in webTarget: configId = webTarget['configId']
configVersion = None
if 'configVersion' in webTarget: configVersion = webTarget['configVersion']
defaultFile = None
if 'defaultFile' in webTarget: defaultFile = webTarget['defaultFile']
filePaths = None
if 'filePaths' in webTarget: filePaths = webTarget['filePaths']
targetId = None
if 'targetId' in webTarget: targetId = webTarget['targetId']
securityPolicy = None
if 'securityPolicy' in webTarget: securityPolicy = webTarget['securityPolicy']
sequence = None
if 'sequence' in webTarget: sequence = webTarget['sequence']
new_row = {
"Target_ID":targetId,
"Type":mtype,
"Config_ID":configId,
"Config_Version":configVersion,
"Default_File":defaultFile,
"File_Paths":filePaths,
"APIs":None,
"Hostnames":hostnames,
"Security_Policy":securityPolicy,
"Sequence":sequence
}
df_secMatch = df_secMatch.append(new_row, ignore_index=True)
self.dfs['secMatch'] = df_secMatch
return None
def printAppSec(self):
secConfigs = self.getSecConfigs()
matchTargets = []
columns = ["AppSec_Config_Name", "AppSec_Config_ID", "AppSec_Type", "AppSec_Target_Product", "AppSec_Hostnames", "AppSec_Production_Version", "AppSec_Staging_Version"]
df_configs = pd.DataFrame(columns=columns)
for secConfig in secConfigs['configurations']:
version = secConfig['latestVersion']
stg_version = None
prod_version = None
lst_version = None
prodHostnames = None
if ('productionVersion' in secConfig) and (secConfig['productionVersion'] is not None):
version = secConfig['productionVersion']
else:
if ('stagingVersion' in secConfig) and (secConfig['stagingVersion'] is not None):
version = secConfig['stagingVersion']
stg_version = secConfig['stagingVersion']
if 'productionVersion' in secConfig:
prod_version = secConfig['productionVersion']
if 'stagingVersion' in secConfig:
stg_version = secConfig['stagingVersion']
if 'latestVersion' in secConfig:
lst_version = secConfig['latestVersion']
if 'productionHostnames' in secConfig:
prodHostnames = secConfig['productionHostnames']
matchTargets.append(self.getSecMatchTargets(secConfig['id'],version ))
name = None
if 'name' in secConfig:
name = secConfig['name']
new_row = {
'AppSec_Config_Name':name,
'AppSec_Config_ID':secConfig['id'],
'AppSec_Type':secConfig['fileType'],
'AppSec_Target_Product':secConfig["targetProduct"],
'AppSec_Hostnames':prodHostnames,
'AppSec_Production_Version':prod_version,
'AppSec_Staging_Version':stg_version
}
df_configs = df_configs.append(new_row, ignore_index=True)
self.dfs['secConfigs'] = df_configs
self.printMatchTargets(matchTargets)
columns = ["Host_Name","AppSec_Config_Name", "AppSec_Config_ID", "AppSec_Type",
"AppSec_Target_Product", "AppSec_Production_Version","AppSec_Policy"]
df_configByHost = pd.DataFrame(columns=columns)
for secConfig in secConfigs['configurations']:
if 'productionHostnames' in secConfig:
for host in secConfig["productionHostnames"]:
name = None
mtype = None
configId = None
configVersion = None
defaultFile = None
filePaths = []
targetId = []
securityPolicies = "Not Protected"
if 'name' in secConfig:
name = secConfig['name']
for mt in matchTargets:
for webTarget in mt['matchTargets']['websiteTargets']:
if secConfig['id'] != webTarget['configId']:
continue
if 'hostnames' in webTarget:
if host not in webTarget['hostnames']:
continue
if securityPolicies == "Not Protected":
for sp in webTarget['securityPolicy']:
securityPolicies = []
securityPolicies.append(webTarget['securityPolicy']['policyId'])
elif 'securityPolicy' in webTarget:
for sp in webTarget['securityPolicy']:
if webTarget['securityPolicy'] not in securityPolicies:
if securityPolicies == "Not Protected":
securityPolicies = []
securityPolicies.append(webTarget['securityPolicy']['policyId'])
new_row = {
'Host_Name':host,
'AppSec_Config_Name':name,
'AppSec_Config_ID':secConfig['id'],
'AppSec_Type':secConfig['fileType'],
'AppSec_Target_Product':secConfig["targetProduct"],
'AppSec_Production_Version':secConfig["productionVersion"],
'AppSec_Policy':securityPolicies
}
df_configByHost = df_configByHost.append(new_row, ignore_index=True)
self.dfs['secConfigByHost'] = df_configByHost
return
def presentation(self,path=None):
#TODO: FiX: change product from ID to name
if path:
self.outputdir = path
properties = self.dfs['properties']
self.groups = self.dfs['groups']
hostnames = self.dfs['hostnames']
secbyHost = self.dfs['secConfigByHost']
dat = hostnames.merge(self.groups , on='Group_ID').fillna("None")
dat = hostnames.merge(properties[['Config_Name', 'Product', 'Prod_Version','Staging_Version']], on='Config_Name',how='left').fillna("None")
dat = dat.merge(secbyHost,on='Host_Name',how='left').fillna('Not Protected')
dat['Resource_Path'] = dat['Group_ID'].apply(self._resource_path)
dat = dat.rename(columns={"Product": "Product_ID"})
dat['Product'] = dat['Product_ID'].apply(self._translateProductID)
dat = dat[['Host_Name','Defined_CNAMED', 'Actual_CNAME', 'Secure','Slot', 'Akamaized', 'Group_ID','Resource_Path', 'Contract_ID', 'Config_Name', 'Property_ID', 'Product_ID', 'Product', 'Prod_Version', 'Staging_Version', 'AppSec_Config_Name', 'AppSec_Config_ID', 'AppSec_Type', 'AppSec_Target_Product', 'AppSec_Production_Version', 'AppSec_Policy', 'AppSec_Target_Product']]
self.dfs['ByHost'] = dat
def _readProductMap(self):
if self.productMap is None:
with open('Lib/GCS/productMap.json') as f:
self.productMap = json.load(f)
def mineHar(self,har,lst_firstparty):
columns = ['url','host','host-type','protocol','method','status','ext','cpcode','ttl','server',
'cdn-cache','cdn-cache-parent','cdn-cache-key','cdn-req-id','vary','appOrigin','content-type','content-length',
'content-length-origin','transfer-size','content-size','blocked','dns','ssl','connect','send','ttfb','receive',
'edgeTime','originTime']
dat_clean = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import streamlit as st
from ..global_data import Constants, load_data, load_pred
import pandas as pd
from pathlib import Path
import datetime
# from sklearn.preprocessing import MinMaxScaler
from covid_forecasting_joint_learning.pipeline import main as Pipeline, sird
from covid_forecasting_joint_learning.data import cols as DataCol
from matplotlib import pyplot as plt
from .eval import app as __app
from matplotlib.figure import Figure
from matplotlib.spines import Spines
from covid_forecasting_joint_learning.data.kabko import KabkoData
from covid_forecasting_joint_learning.pipeline.preprocessing import Group
from covid_forecasting_joint_learning.pipeline.clustering import Cluster
from covid_forecasting_joint_learning.model.general import DEFAULT_FUTURE_EXO_COLS, DEFAULT_PAST_COLS
def _app():
return __app(
title="# Forecast",
log_dir="logs/pred",
model_dir="model/pred",
trial_id=-2,
limit_data=False,
val=2,
early_stopping_kwargs={
"rise_patience": 25,
"still_patience": 25,
"both_patience": 75
},
show_loss=False,
show_epoch=True,
show_tb=False
)
@st.cache(
hash_funcs={
KabkoData: id,
Cluster: id,
Group: id,
type(KabkoData): id,
type(load_data): id
},
allow_output_mutation=True
)
def pred(target, model_dir_3):
data_torch = target.datasets_torch[0][0]
target.model.future_length = data_torch[4].size(1)
target.model.eval()
pred_vars = target.model(*data_torch[:5]).detach().numpy()
data_np = target.datasets[0][0]
indices = data_np[-1]
df_vars = pd.DataFrame(pred_vars[0], columns=DataCol.SIRD_VARS, index=indices)
prev = data_torch[5]
pred_final = target.model.rebuild(pred_vars, prev, target.population, sird.rebuild)
indices = data_np[-1]
df_final = pd.DataFrame(pred_final[0], columns=DataCol.IRD, index=indices)
Path(model_dir_3).mkdir(parents=True, exist_ok=True)
df = pd.concat([df_vars, df_final], axis=1)
df.to_excel(f"{model_dir_3}/pred.xlsx", sheet_name="pred")
return df
@st.cache(
hash_funcs={
KabkoData: id,
Cluster: id,
Group: id,
type(KabkoData): id,
type(load_data): id
},
allow_output_mutation=True
)
def save_combined_pred(preds, model_dir_2):
df = pd.concat(preds)
df.to_excel(f"{model_dir_2}/pred.xlsx", sheet_name="pred")
label_name = [
(DataCol.IRD, "pred_final"),
(DataCol.SIRD_VARS, "pred_vars")
]
fig_names = [ln[1] for ln in label_name] + DataCol.IRD + DataCol.SIRD_VARS
def plot_etc(fig, ax, name, model_dir_3):
ax.legend(loc='best')
ax.title.set_text(name)
# ax.tick_params(labelrotation=90)
ax.grid(which="both", alpha=0.3)
fig.savefig(f"{model_dir_3}/{name}.jpg", bbox_inches="tight")
@st.cache(hash_funcs={Figure: hash, Spines: hash}, allow_output_mutation=True)
def plot_many(df, labels, name, model_dir_3):
fig, ax = plt.subplots(1, 1)
for k in labels:
ax.plot(df[k], label=k)
plot_etc(fig, ax, name, model_dir_3)
return fig
@st.cache(hash_funcs={Figure: hash, Spines: hash}, allow_output_mutation=True)
def plot_single(df, k, model_dir_3):
fig, ax = plt.subplots(1, 1)
ax.plot(df[k], label=k)
plot_etc(fig, ax, k, model_dir_3)
return fig
@st.cache(hash_funcs={Figure: hash, Spines: hash}, allow_output_mutation=True)
def make_figs(df, model_dir_3):
fig_dict_1 = {}
plt.close('all')
for labels, name in label_name:
fig = plot_many(df, labels, name, model_dir_3)
fig_dict_1[name] = fig
for k in DataCol.IRD + DataCol.SIRD_VARS:
fig = plot_single(df, k, model_dir_3)
fig_dict_1[k] = fig
return fig_dict_1
@st.cache(
hash_funcs={
KabkoData: id,
Cluster: id,
Group: id,
type(KabkoData): id,
type(load_data): id
},
allow_output_mutation=True
)
def preprocess_pred(targets, end_date, past_size, past_cols, future_exo_cols):
Pipeline.preprocessing_7(
targets,
end_date=end_date,
past_size=past_size,
past_cols=past_cols,
future_exo_cols=future_exo_cols
)
def app():
groups, hparams, model_dir, trial_id, target_names = _app()
groups = [g.copy() for g in groups]
group = groups[0]
model_dir_2 = f"{model_dir}/{trial_id}/{group.id}/"
Path(model_dir_2).mkdir(parents=True, exist_ok=True)
pred_expander = st.expander(label='Forecast', expanded=True)
with pred_expander:
st.markdown("## Forecast")
target_col, pred_col = st.columns(2)
target_name = target_col.selectbox(
'Kabupaten/Kota',
target_names
)
fig_name = st.multiselect(
'Label',
fig_names,
DataCol.IRD
)
# pred_date = "2021-12-31"
# pred_date = datetime.date.today()
target = [t for t in group.targets if t.name == target_name][0]
pred_date = target.data.last_valid_index()
pred_date = pred_date + datetime.timedelta(days=14)
pred_date = pred_col.date_input("Forecast until date", pd.to_datetime(pred_date))
past_cols = hparams["past_cols"]
past_cols = DEFAULT_PAST_COLS[past_cols] if isinstance(past_cols, int) else past_cols
future_exo_cols = hparams["future_exo_cols"]
future_exo_cols = DEFAULT_FUTURE_EXO_COLS[future_exo_cols] if isinstance(future_exo_cols, int) else future_exo_cols
preprocess_pred(
group.targets,
end_date=pred_date,
past_size=30 + hparams["additional_past_length"],
past_cols=past_cols,
future_exo_cols=future_exo_cols
)
pred_dict = {}
fig_dict = {}
for cluster in group.clusters:
for target in cluster.targets:
model_dir_3 = f"{model_dir_2}/{target.cluster.id}/{target.name}"
df = pred(target, model_dir_3)
pred_dict[target.name] = df
df["kabko"] = | pd.Series(target.name, index=df.index) | pandas.Series |
import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
import folium
import base64
import xlsxwriter
from xlsxwriter import Workbook
from geopy.distance import great_circle
from io import BytesIO
from collections import Counter
from PIL import Image
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
(pd.set_option('display.float_format', lambda x: '%.3f' % x))
st.set_page_config(layout='wide')
image=Image.open('images/HR.png')
st.sidebar.image(image,use_column_width=True,caption='House Rocket Company')
menu = st.sidebar.radio('Selecione uma das opções de página do Projeto:',
('Data Overview','Insights','Business Solution'))
st.sidebar.write('Para mais informações sobre o projeto, acesse: '"[GitHub](https://github.com/RaulBelarmino/house-rocket-insights/)")
def get_data(path):
data = pd.read_csv(path)
return data
def get_data_clean():
data = pd.read_csv('datasets/data_clean.csv')
return data
def get_data_solution():
data = pd.read_csv('datasets/kc_houses_solution.csv')
return data
def data_overview(data):
st.markdown(
"<h1 style='text-align: center; color: #565656; background: #FADBD8'> Data Overview </h1>",
unsafe_allow_html=True)
st.write(data.head(100))
# Overview map
df1 = data.copy()
# Base map
density_map = folium.Map(location=[df1['lat'].mean(), df1['long'].mean()],
default_zoom_start=15)
make_cluster = MarkerCluster().add_to(density_map)
for name, row in df1.iterrows():
folium.Marker([row['lat'], row['long']],
popup='Price R${0} on: {1}. Sqft: {2} \n\nId: {3} '
'Bedrooms: {4} Bathrooms: {5} '
'Year Built: {6}'.format(row['price'],
row['date'],
row['sqft_lot'],
row['id'],
row['bedrooms'],
row['bathrooms'],
row['yr_built'])).add_to(make_cluster)
folium_static(density_map, width=865, height=400)
# descriptive statistics
df = data.copy()
df['id'] = df.astype(str)
c1, c2 = st.beta_columns((1, 1))
# central tendency metrics
attributes_num = df.select_dtypes(include=['int64', 'float64'])
df_mean = pd.DataFrame(attributes_num.apply(np.mean))
df_median = pd.DataFrame(attributes_num.apply(np.median))
# measures of dispersion
df_min = pd.DataFrame(attributes_num.apply(np.min))
df_max = pd.DataFrame(attributes_num.apply(np.max))
df_std = pd.DataFrame(attributes_num.apply(np.std))
statics = pd.concat([df_mean, df_median, df_min, df_max, df_std], axis=1).reset_index()
statics.columns = ['attributes', 'mean', 'median', 'min', 'max', 'std']
statics = statics.iloc[
[True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False,
True, True], :]
c1.header('Statistcs Descriptive')
c1.dataframe(statics, height=1000)
# Average Metrics
df['sqm_living'] = df['sqft_living'] / 10.764
df['sqm_lot'] = df['sqft_lot'] / 10.764
df['price_sqm'] = df['price'] / df['sqm_living']
df1 = df[['id', 'zipcode']].groupby('zipcode').count().reset_index()
df2 = df[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
df3 = df[['sqm_living', 'zipcode']].groupby('zipcode').mean().reset_index()
df4 = df[['price_sqm', 'zipcode']].groupby('zipcode').mean().reset_index()
# Merge
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = | pd.merge(m1, df3, on='zipcode', how='inner') | pandas.merge |
import unittest
import itertools
import os
import pandas as pd
import platform
import numpy as np
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs, dist_IR_contains)
from hpat.hiframes.rolling import supported_rolling_funcs
LONG_TEST = (int(os.environ['HPAT_LONG_ROLLING_TEST']) != 0
if 'HPAT_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
class TestRolling(unittest.TestCase):
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = hpat.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = hpat.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = hpat.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = hpat.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
for n in sizes:
time = | pd.date_range(start='1/1/2018', periods=n, freq='s') | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import requests
import pandas as pd
import json
from tqdm import tqdm
PATH = '../../'
PATH_STATS = "../../data/france/stats/"
# In[5]:
# Download data from Santé publique France and export it to local files
def download_data_hosp_fra_clage():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/08c18e08-6780-452d-9b8c-ae244ad529b3")
with open(PATH + 'data/france/donnees-hosp-fra-clage.csv', 'wb') as f:
f.write(data.content)
def download_data_opencovid():
data = requests.get("https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv")
with open(PATH + 'data/france/donnees-opencovid.csv', 'wb') as f:
f.write(data.content)
def download_data_vue_ensemble():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71")
with open(PATH + 'data/france/synthese-fra.csv', 'wb') as f:
f.write(data.content)
def download_data_variants():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/848debc4-0e42-4e3b-a176-afc285ed5401") #https://www.data.gouv.fr/fr/datasets/r/c43d7f3f-c9f5-436b-9b26-728f80e0fd52
data_reg = requests.get("https://www.data.gouv.fr/fr/datasets/r/5ff0cad6-f150-47ea-a4e0-57e354c1b2a4") #https://www.data.gouv.fr/fr/datasets/r/73e8851a-d851-43f8-89e4-6178b35b7127
with open(PATH + 'data/france/donnees-variants.csv', 'wb') as f:
f.write(data.content)
with open(PATH + 'data/france/donnees-variants-reg.csv', 'wb') as f:
f.write(data.content)
def download_data_variants_deps():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/4d3e5a8b-9649-4c41-86ec-5420eb6b530c") #https://www.data.gouv.fr/fr/datasets/r/16f4fd03-797f-4616-bca9-78ff212d06e8
with open(PATH + 'data/france/donnees-variants-deps.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_fra():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/efe23314-67c4-45d3-89a2-3faef82fae90")
with open(PATH + 'data/france/donnees-vacsi-fra.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_reg():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/735b0df8-51b4-4dd2-8a2d-8e46d77d60d8")
with open(PATH + 'data/france/donnees-vacsi-reg.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_dep():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/4f39ec91-80d7-4602-befb-4b522804c0af")
with open(PATH + 'data/france/donnees-vacsi-dep.csv', 'wb') as f:
f.write(data.content)
def download_data_obepine():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/031b79a4-5ee1-4f40-a804-b8abec3e99a6") #https://www.data.gouv.fr/fr/datasets/r/ba71be57-5932-4298-81ea-aff3a12a440c
with open(PATH + 'data/france/donnees_obepine_regions.csv', 'wb') as f:
f.write(data.content)
def download_data_donnees_vaccination_par_pathologie():
data = requests.get("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-pathologie/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B")
with open(PATH + 'data/france/donnees-vaccination-par-pathologie.csv', 'wb') as f:
f.write(data.content)
def import_data_donnees_vaccination_par_pathologie():
df = pd.read_csv(PATH + 'data/france/donnees-vaccination-par-pathologie.csv', sep=None)
return df
def download_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement():
data = requests.get("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-tranche-dage-type-de-vaccin-et-departement/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B")
with open(PATH + 'data/france/donnees-tranche-dage-departement.csv', 'wb') as f:
f.write(data.content)
def import_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement():
df = pd.read_csv(PATH + 'data/france/donnees-tranche-dage-departement.csv', sep=None)
return df
def import_data_obepine():
df = pd.read_csv(PATH + 'data/france/donnees_obepine_regions.csv', sep=None)
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df = df.merge(right=df_reg_pop, left_on="Code_Region", right_on="code")
return df
def import_data_metropoles():
df_metro = pd.read_csv(PATH + 'data/france/donnes-incidence-metropoles.csv', sep=",")
epci = pd.read_csv(PATH + 'data/france/metropole-epci.csv', sep=";", encoding="'windows-1252'")
df_metro = df_metro.merge(epci, left_on='epci2020', right_on='EPCI').drop(['EPCI'], axis=1)
return df_metro
def import_data_hosp_clage():
df_hosp = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-clage-covid19.csv', sep=";")
df_hosp = df_hosp.groupby(["reg", "jour", "cl_age90"]).first().reset_index()
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_hosp = df_hosp.merge(df_reg_pop, left_on="reg", right_on="code")
return df_hosp
def import_data_tests_viros():
df = pd.read_csv(PATH + 'data/france/tests_viro-dep-quot.csv', sep=";")
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_dep_reg = pd.read_csv(PATH + 'data/france/departments_regions_france_2016.csv', sep=",")
df["dep"] = df["dep"].astype(str)
df["dep"] = df["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_dep_reg["departmentCode.astype"] = df_dep_reg.departmentCode.astype(str)
df = df.merge(df_dep_reg, left_on="dep", right_on="departmentCode", how="left")
df = df.merge(df_reg_pop, left_on="regionCode", right_on="code", how="left")
return df
def import_data_hosp_ad_age():
df = pd.read_csv('https://www.data.gouv.fr/fr/datasets/r/dc7663c7-5da9-4765-a98b-ba4bc9de9079', sep=";")
return df
def import_data_new():
df_new = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', sep=";")
return df_new
def import_data_df():
df = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19.csv', sep=";")
return df
def import_data_variants():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
#df_variants = df_variants[df_variants.cl_age90==0]
return df_variants
def import_data_variants_deps():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants-deps.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
#df_variants = df_variants[df_variants.cl_age90==0]
return df_variants
def import_data_variants_regs():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants-regs.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
df_variants = df_variants[df_variants.cl_age90==0]
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_variants = df_variants.merge(df_reg_pop, left_on="reg", right_on="code")
return df_variants
def import_data_tests_sexe():
df = pd.read_csv(PATH + 'data/france/tests_viro-fra-covid19.csv', sep=";")
return df
def import_data_vue_ensemble():
df = pd.read_csv(PATH + 'data/france/synthese-fra.csv', sep=",")
df = df.sort_values(["date"])
with open(PATH_STATS + 'vue-ensemble.json', 'w') as outfile:
dict_data = {"cas": int(df["total_cas_confirmes"].diff().values[-1]), "update": df.date.values[-1][-2:] + "/" + df.date.values[-1][-5:-3]}
json.dump(dict_data, outfile)
return df
def import_data_opencovid():
df = pd.read_csv(PATH + 'data/france/donnees-opencovid.csv', sep=",")
"""with open(PATH_STATS + 'opencovid.json', 'w') as outfile:
dict_data = {"cas": int(df["cas_confirmes"].values[-1]), "update": df.index.values[-1][-2:] + "/" + df.index.values[-1][-5:-3]}
json.dump(dict_data, outfile)"""
return df
def import_data_vacsi_a_fra():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-fra.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_vacsi_reg():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-reg.csv', sep=";")
return df
def import_data_vacsi_dep():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-dep.csv', sep=";")
return df
def import_data_vacsi_fra():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-fra.csv', sep=";")
return df
def import_data_vacsi_a_reg():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-reg.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_vacsi_a_dep():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-dep.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_hosp_fra_clage():
df = pd.read_csv(PATH + 'data/france/donnees-hosp-fra-clage.csv', sep=";").groupby(["cl_age90", "jour"]).sum().reset_index()
df = df[df.cl_age90 != 0]
return df
def download_data():
pbar = tqdm(total=8)
download_data_vacsi_fra()
download_data_vacsi_reg()
download_data_vacsi_dep()
url_metadata = "https://www.data.gouv.fr/fr/organizations/sante-publique-france/datasets-resources.csv"
url_geojson = "https://raw.githubusercontent.com/gregoiredavid/france-geojson/master/departements.geojson"
url_deconf = "https://www.data.gouv.fr/fr/datasets/r/f2d0f955-f9c4-43a8-b588-a03733a38921"
url_opencovid = "https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv"
url_vacsi_a_fra = "https://www.data.gouv.fr/fr/datasets/r/54dd5f8d-1e2e-4ccb-8fb8-eac68245befd"
url_vacsi_a_reg = "https://www.data.gouv.fr/fr/datasets/r/c3ccc72a-a945-494b-b98d-09f48aa25337"
url_vacsi_a_dep = "https://www.data.gouv.fr/fr/datasets/r/83cbbdb9-23cb-455e-8231-69fc25d58111"
pbar.update(1)
metadata = requests.get(url_metadata)
pbar.update(2)
geojson = requests.get(url_geojson)
pbar.update(3)
with open(PATH + 'data/france/metadata.csv', 'wb') as f:
f.write(metadata.content)
pbar.update(4)
with open(PATH + 'data/france/dep.geojson', 'wb') as f:
f.write(geojson.content)
pbar.update(5)
df_metadata = pd.read_csv(PATH + 'data/france/metadata.csv', sep=";")
url_data = "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7" #df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-covid19")]["url"].values[0] #donnees-hospitalieres-classe-age-covid19-2020-10-14-19h00.csv
url_data_new = "https://www.data.gouv.fr/fr/datasets/r/6fadff46-9efd-4c53-942a-54aca783c30c" #df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-nouveaux")]["url"].values[0]
url_tests = df_metadata[df_metadata['url'].str.contains("/donnees-tests-covid19-labo-quotidien")]["url"].values[0]
url_metropoles = "https://www.data.gouv.fr/fr/datasets/r/61533034-0f2f-4b16-9a6d-28ffabb33a02" #df_metadata[df_metadata['url'].str.contains("/sg-metro-opendata")]["url"].max()
url_incidence = df_metadata[df_metadata['url'].str.contains("/sp-pe-tb-quot")]["url"].values[0]
url_tests_viro = df_metadata[df_metadata['url'].str.contains("/sp-pos-quot-dep")]["url"].values[0]
url_sursaud = df_metadata[df_metadata['url'].str.contains("sursaud.*quot.*dep")]["url"].values[0]
url_data_clage = df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-classe-age-covid19")]["url"].values[0]
url_data_sexe = "https://www.data.gouv.fr/fr/datasets/r/dd0de5d9-b5a5-4503-930a-7b08dc0adc7c" #df_metadata[df_metadata['url'].str.contains("/sp-pos-quot-fra")]["url"].values[0]
pbar.update(6)
data = requests.get(url_data)
data_new = requests.get(url_data_new)
data_tests = requests.get(url_tests)
data_metropoles = requests.get(url_metropoles)
data_deconf = requests.get(url_deconf)
data_sursaud = requests.get(url_sursaud)
data_incidence = requests.get(url_incidence)
data_opencovid = requests.get(url_opencovid)
data_vacsi_a_fra = requests.get(url_vacsi_a_fra)
data_vacsi_a_reg = requests.get(url_vacsi_a_reg)
data_vacsi_a_dep = requests.get(url_vacsi_a_dep)
data_tests_viro = requests.get(url_tests_viro)
data_clage = requests.get(url_data_clage)
data_sexe = requests.get(url_data_sexe)
pbar.update(7)
with open(PATH + 'data/france/donnes-hospitalieres-covid19.csv', 'wb') as f:
f.write(data.content)
with open(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', 'wb') as f:
f.write(data_new.content)
with open(PATH + 'data/france/donnes-tests-covid19-quotidien.csv', 'wb') as f:
f.write(data_tests.content)
with open(PATH + 'data/france/donnes-incidence-metropoles.csv', 'wb') as f:
f.write(data_metropoles.content)
with open(PATH + 'data/france/indicateurs-deconf.csv', 'wb') as f:
f.write(data_deconf.content)
with open(PATH + 'data/france/sursaud-covid19-departement.csv', 'wb') as f:
f.write(data_sursaud.content)
with open(PATH + 'data/france/taux-incidence-dep-quot.csv', 'wb') as f:
f.write(data_incidence.content)
with open(PATH + 'data/france/tests_viro-dep-quot.csv', 'wb') as f:
f.write(data_tests_viro.content)
with open(PATH + 'data/france/donnes-hospitalieres-clage-covid19.csv', 'wb') as f:
f.write(data_clage.content)
with open(PATH + 'data/france/tests_viro-fra-covid19.csv', 'wb') as f:
f.write(data_sexe.content)
with open(PATH + 'data/france/donnees-opencovid.csv', 'wb') as f:
f.write(data_opencovid.content)
with open(PATH + 'data/france/donnees-vacsi-a-fra.csv', 'wb') as f:
f.write(data_vacsi_a_fra.content)
with open(PATH + 'data/france/donnees-vacsi-a-reg.csv', 'wb') as f:
f.write(data_vacsi_a_reg.content)
with open(PATH + 'data/france/donnees-vacsi-a-dep.csv', 'wb') as f:
f.write(data_vacsi_a_dep.content)
pbar.update(8)
# Import data from previously exported files to dataframes
def import_data():
pbar = tqdm(total=8)
pbar.update(1)
df = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19.csv', sep=";")
df.dep = df.dep.astype(str)
df_sursaud = pd.read_csv(PATH + 'data/france/sursaud-covid19-departement.csv', sep=";")
df_sursaud["dep"] = df_sursaud["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_new = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', sep=";")
df_tests = pd.read_csv(PATH + 'data/france/donnes-tests-covid19-quotidien.csv', sep=";")
df_deconf = pd.read_csv(PATH + 'data/france/indicateurs-deconf.csv', sep=",")
df_incid = pd.read_csv(PATH + 'data/france/taux-incidence-dep-quot.csv', sep=";")
df_incid["dep"] = df_incid["dep"].astype('str')
df_incid["dep"] = df_incid["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_tests_viro = pd.read_csv(PATH + 'data/france/tests_viro-dep-quot.csv', sep=";")
df_tests_viro["dep"] = df_tests_viro["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
pbar.update(2)
df_tests_viro["dep"] = df_tests_viro["dep"].astype('str')
pop_df_incid = df_incid["pop"]
lits_reas = pd.read_csv(PATH + 'data/france/lits_rea.csv', sep=",")
df_regions = pd.read_csv(PATH + 'data/france/departments_regions_france_2016.csv', sep=",")
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_dep_pop = pd.read_csv(PATH + 'data/france/dep-pop.csv', sep=";")
###
df = df.merge(df_regions, left_on='dep', right_on='departmentCode')
df = df.merge(df_reg_pop, left_on='regionName', right_on='regionName')
df = df.merge(df_dep_pop, left_on='dep', right_on='dep')
df = df[df["sexe"] == 0]
df['hosp_nonrea'] = df['hosp'] - df['rea']
df = df.merge(lits_reas, left_on="departmentName", right_on="nom_dpt")
#df_tests_viro = df_tests_viro[df_tests_viro["cl_age90"] == 0]
df_incid = df_incid.merge(df_regions, left_on='dep', right_on='departmentCode')
if "pop" in df_tests_viro.columns:
df_incid = df_incid.merge(df_tests_viro[df_tests_viro["cl_age90"] == 0].drop("pop", axis=1).drop("P", axis=1).drop("cl_age90", axis=1), left_on=['jour', 'dep'], right_on=['jour', 'dep'])
else:
df_incid = df_incid.merge(df_tests_viro[df_tests_viro["cl_age90"] == 0].drop("P", axis=1).drop("cl_age90", axis=1), left_on=['jour', 'dep'], right_on=['jour', 'dep'])
df_new = df_new.merge(df_regions, left_on='dep', right_on='departmentCode')
df_new = df_new.merge(df_reg_pop, left_on='regionName', right_on='regionName')
df_new = df_new.merge(df_dep_pop, left_on='dep', right_on='dep')
df_new['incid_hosp_nonrea'] = df_new['incid_hosp'] - df_new['incid_rea']
df_sursaud = df_sursaud.merge(df_regions, left_on='dep', right_on='departmentCode')
df_sursaud = df_sursaud.merge(df_reg_pop, left_on='regionName', right_on='regionName')
df_sursaud = df_sursaud.merge(df_dep_pop, left_on='dep', right_on='dep')
df_sursaud = df_sursaud[df_sursaud["sursaud_cl_age_corona"] == "0"]
df_sursaud["taux_covid"] = df_sursaud["nbre_pass_corona"] / df_sursaud["nbre_pass_tot"]
pbar.update(3)
df['rea_pop'] = df['rea']/df['regionPopulation']*100000
df['rea_deppop'] = df['rea']/df['departmentPopulation']*100000
df['rad_pop'] = df['rad']/df['regionPopulation']*100000
df['dc_pop'] = df['dc']/df['regionPopulation']*100000
df['dc_deppop'] = df['dc']/df['departmentPopulation']*100000
df['hosp_pop'] = df['hosp']/df['regionPopulation']*100000
df['hosp_deppop'] = df['hosp']/df['departmentPopulation']*100000
df['hosp_nonrea_pop'] = df['hosp_nonrea']/df['regionPopulation']*100000
pbar.update(4)
df_confirmed = | pd.read_csv(PATH + 'data/data_confirmed.csv') | pandas.read_csv |
import unittest
import koleksyon.mcmc as mcmc
import pandas as pd
import numpy as np
import datetime
def artist_costs():
#Artist-Album costs (0.0 represents that you don't buy and albumn, .99 represents you buy just a song)
MichaelJackson = np.array([0.0,0.99,8.64,8.69,12.33,12.96,38.99,30.12,13.99,17.25])
LadyGaga = np.array([0.0,0.99,14.28,11.20,11.25,14.98,13.69,9.99,18.95])
Eminem = np.array([0.0,0.99,15.99,9.33,21.61,22.37,12.80,10.75,11.70])
JustinBieber = np.array([0.0,0.99,10.70,9.49,14.65,29.18,21.93,15.95,19.90,37.98])
FreddieMercury = np.array([0.0,0.99,14.74,11.50,18.99,12.49,14.54,10.99,11.89,16.53,11.70,9.71,12.39])
MileyCyrus = np.array([0.0,0.99,11.18,6.98,9.21,9.95,9.49])
TaylorSwift = np.array([0.0,0.99,13.98,16.99,13.51,8.97,15.02,7.00,13.97,8.97,6.86])
LilWayne = np.array([0.0,0.99,11.55,16.00,29.47,13.41,9.68,15.95,11.99,16.63])
SelenaGomez = np.array([0.0,0.99,12.59,10.91,36.57,16.52])
Rihanna = np.array([0.0,0.99,13.98,10.25,22.90,6.32,9.19])
ArtistAlbums = {}
ArtistAlbums["<NAME>"] = MichaelJackson
ArtistAlbums["<NAME>"] = LadyGaga
ArtistAlbums["Eminem"] = Eminem
ArtistAlbums["<NAME>"] = JustinBieber
ArtistAlbums["<NAME>"] = FreddieMercury
ArtistAlbums["<NAME>"] = MileyCyrus
ArtistAlbums["<NAME>"] = TaylorSwift
ArtistAlbums["<NAME>"] = LilWayne
ArtistAlbums["<NAME>"] = SelenaGomez
ArtistAlbums["Rihanna"] = Rihanna
return ArtistAlbums
def ppurchase(ArtistAlbums):
purchase_probability = {}
for k,v in ArtistAlbums.items():
#print(k)
#print(v)
proba = []
proba.append(0.70) #30% purchases, 70% not purchases
proba.append(0.24)
r = 0.06 / (len(v) - 2)
for i in range(0,len(v)-2):
proba.append(r)
proba = np.array(proba)
#print(proba)
purchase_probability[k] = proba
return purchase_probability
class TestMCMC(unittest.TestCase):
def setUp(self):
self.prepData()
#@classmethod
#def setUpClass(cls):
# cls.prepData(None) #note the strange way we need to interact with the class on setup!
#simple function that preps the data for ALL the tests in this file... runs once... note, random seed set to make reproduceable
def prepData(self):
print("Preparing the data so we can run tests...")
np.random.seed(42)
print("Parsing count data and setting up probability arrays")
self.ArtistAlbums = artist_costs()
self.purchase_probability = ppurchase(self.ArtistAlbums)
self.dfa = | pd.read_csv("../data/artist_wiki_page_views-20200101-20201231.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import math
import random
import pickle
from typing import List, Tuple
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMRegressor
from progress.bar import Bar
from prismx.utils import read_gmt, load_correlation, load_feature
from prismx.feature import load_features
def create_training_data(workdir: str, gmt_file: str, false_sample_count: int=50000) -> List:
correlation_files = os.listdir(workdir+"/correlation")
correlation = load_correlation(workdir, 0)
background_genes = list(correlation.columns)
library, rev_library, ugenes = read_gmt(gmt_file, background_genes)
df_true = pd.DataFrame()
lk = list(range(0, len(correlation_files)-1))
lk.append("global")
bar = Bar('Retrieve training data', max=2*len(lk))
for i in lk:
feature = load_feature(workdir, i)
features = []
keys = list(feature.columns)
setname = []
genename = []
for se in keys:
vals = library[se]
for val in vals:
setname.append(val)
genename.append(se)
features.append(feature.loc[val.encode('UTF-8'), se])
df_true.loc[:,i] = features
bar.next()
df_true2 = pd.concat([pd.DataFrame(genename), pd.DataFrame(setname),df_true, pd.DataFrame(np.ones(len(setname)))], axis=1)
samp_set = []
samp_gene = []
npw = np.array(df_true2.iloc[:, 0])
false_gene_count = math.ceil(false_sample_count/len(background_genes))
for i in background_genes:
rkey = random.sample(keys,1)[0]
ww = np.where(npw == rkey)[0]
for j in range(0, false_gene_count):
rgene = random.sample(background_genes,1)[0]
if rgene not in df_true2.iloc[ww, 1]:
samp_set.append(rkey)
samp_gene.append(rgene)
df_false = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module of methods to sample variables of a single data type.
"""
import warnings
from typing import List, Optional, Tuple
import networkx as nx
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.gaussian_process.kernels import Kernel
from causalnex.structure.data_generators import (
generate_structure,
nonlinear_sem_generator,
sem_generator,
)
from causalnex.structure.structuremodel import StructureModel
def generate_continuous_data(
sm: nx.DiGraph,
n_samples: int,
distribution: str = "gaussian",
noise_scale: float = 1.0,
intercept: bool = False,
seed: int = None,
kernel: Optional[Kernel] = None,
) -> np.ndarray:
"""
Simulate samples from SEM with specified type of noise.
The order of the columns on the returned array is the one provided by `sm.nodes`
Args:
sm: A DAG in form of a networkx or StructureModel. Does not require weights.
n_samples: The number of rows/observations to sample.
kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or
Matern(1) or any combinations thereof. The kernels are used to
create the latent variable for the binary / categorical variables
and are directly used for continuous variables.
distribution: The type of distribution to use for the noise
of a variable. Options: 'gaussian'/'normal' (alias), 'student-t',
'exponential', 'gumbel'.
noise_scale: The standard deviation of the noise.
intercept: Whether to use an intercept for each feature.
seed: Random state
Returns:
x_mat: [n_samples,d_nodes] sample matrix
Raises:
ValueError: if distribution isn't gaussian/normal/student-t/exponential/gumbel
"""
if kernel is None:
df = sem_generator(
graph=sm,
default_type="continuous",
n_samples=n_samples,
distributions={"continuous": distribution},
noise_std=noise_scale,
intercept=intercept,
seed=seed,
)
else:
df = nonlinear_sem_generator(
graph=sm,
kernel=kernel,
default_type="continuous",
n_samples=n_samples,
distributions={"continuous": distribution},
noise_std=noise_scale,
seed=seed,
)
return df[list(sm.nodes())].values
def generate_binary_data(
sm: nx.DiGraph,
n_samples: int,
distribution: str = "logit",
noise_scale: float = 1.0,
intercept: bool = False,
seed: int = None,
kernel: Optional[Kernel] = None,
) -> np.ndarray:
"""
Simulate samples from SEM with specified type of noise.
The order of the columns on the returned array is the one provided by `sm.nodes`
Args:
sm: A DAG in form of a networkx or StructureModel. Does not require weights.
n_samples: The number of rows/observations to sample.
kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or
Matern(1) or any combinations thereof. The kernels are used to
create the latent variable for the binary / categorical variables
and are directly used for continuous variables.
distribution: The type of distribution to use for the noise
of a variable. Options: 'probit'/'normal' (alias),
'logit' (default).
noise_scale: The standard deviation of the noise. The binary and
categorical features are created using a latent variable approach.
The noise standard deviation determines how much weight the "mean"
estimate has on the feature value.
intercept: Whether to use an intercept for the latent variable of each feature.
seed: Random state
Returns:
x_mat: [n_samples,d_nodes] sample matrix
Raises:
ValueError: if distribution isn't 'probit', 'normal', 'logit'
"""
if kernel is None:
df = sem_generator(
graph=sm,
default_type="binary",
n_samples=n_samples,
distributions={"binary": distribution},
noise_std=noise_scale,
intercept=intercept,
seed=seed,
)
else:
df = nonlinear_sem_generator(
graph=sm,
kernel=kernel,
default_type="binary",
n_samples=n_samples,
distributions={"binary": distribution},
noise_std=noise_scale,
seed=seed,
)
return df[list(sm.nodes())].values
def generate_continuous_dataframe(
sm: nx.DiGraph,
n_samples: int,
distribution: str = "gaussian",
noise_scale: float = 1.0,
intercept: bool = False,
seed: int = None,
kernel: Optional[Kernel] = None,
) -> pd.DataFrame:
"""
Generates a dataframe with samples from SEM with specified type of noise.
Args:
sm: A DAG in form of a networkx or StructureModel. Does not require weights.
n_samples: The number of rows/observations to sample.
kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or
Matern(1) or any combinations thereof. The kernels are used to
create the latent variable for the binary / categorical variables
and are directly used for continuous variables.
distribution: The type of distribution to use for the noise
of a variable. Options: 'gaussian'/'normal' (alias), 'student-t',
'exponential', 'gumbel'.
noise_scale: The standard deviation of the noise.
intercept: Whether to use an intercept for each feature.
seed: Random state
Returns:
Dataframe with the node names as column names
Raises:
ValueError: if distribution is not 'gaussian', 'normal', 'student-t',
'exponential', 'gumbel'
"""
if kernel is None:
return sem_generator(
graph=sm,
default_type="continuous",
n_samples=n_samples,
distributions={"continuous": distribution},
noise_std=noise_scale,
intercept=intercept,
seed=seed,
)
return nonlinear_sem_generator(
graph=sm,
kernel=kernel,
default_type="continuous",
n_samples=n_samples,
distributions={"continuous": distribution},
noise_std=noise_scale,
seed=seed,
)
def generate_binary_dataframe(
sm: nx.DiGraph,
n_samples: int,
distribution: str = "logit",
noise_scale: float = 1.0,
intercept: bool = False,
seed: int = None,
kernel: Optional[Kernel] = None,
) -> pd.DataFrame:
"""
Generates a dataframe with samples from SEM with specified type of noise.
Args:
sm: A DAG in form of a networkx or StructureModel. Does not require weights.
n_samples: The number of rows/observations to sample.
kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or
Matern(1) or any combinations thereof. The kernels are used to
create the latent variable for the binary / categorical variables
and are directly used for continuous variables.
distribution: The type of distribution to use for the noise
of a variable. Options: 'probit'/'normal' (alias),
'logit' (default).
noise_scale: The standard deviation of the noise. The binary and
categorical features are created using a latent variable approach.
The noise standard deviation determines how much weight the "mean"
estimate has on the feature value.
intercept: Whether to use an intercept for the latent variable of each feature.
seed: Random state
Returns:
x_mat: [n_samples,d_nodes] sample matrix
Raises:
ValueError: if distribution is not 'probit', 'normal', 'logit'
"""
if kernel is None:
return sem_generator(
graph=sm,
default_type="binary",
n_samples=n_samples,
distributions={"binary": distribution},
noise_std=noise_scale,
intercept=intercept,
seed=seed,
)
return nonlinear_sem_generator(
graph=sm,
kernel=kernel,
default_type="binary",
n_samples=n_samples,
distributions={"binary": distribution},
noise_std=noise_scale,
seed=seed,
)
def generate_count_dataframe(
sm: nx.DiGraph,
n_samples: int,
zero_inflation_factor: float = 0.1,
intercept: bool = False,
seed: int = None,
kernel: Optional[Kernel] = None,
) -> pd.DataFrame:
"""
Generates a dataframe with samples from SEM with specified type of noise.
Args:
sm: A DAG in form of a networkx or StructureModel. Does not require weights.
n_samples: The number of rows/observations to sample.
kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or
Matern(1) or any combinations thereof. The kernels are used to
create the latent variable for the binary / categorical variables
and are directly used for continuous variables.
zero_inflation_factor: The probability of zero inflation for count data.
intercept: Whether to use an intercept for the latent variable of each feature.
seed: Random state
Returns:
x_mat: [n_samples, d_nodes] sample matrix
Raises:
ValueError: if ``zero_inflation_factor`` is not a float in [0, 1].
"""
if kernel is None:
return sem_generator(
graph=sm,
default_type="count",
n_samples=n_samples,
distributions={"count": zero_inflation_factor},
noise_std=1, # not used for poisson
intercept=intercept,
seed=seed,
)
return nonlinear_sem_generator(
graph=sm,
kernel=kernel,
default_type="count",
n_samples=n_samples,
distributions={"count": zero_inflation_factor},
noise_std=1, # not used for poisson
seed=seed,
)
def generate_categorical_dataframe(
sm: nx.DiGraph,
n_samples: int,
distribution: str = "logit",
n_categories: int = 3,
noise_scale: float = 1.0,
intercept: bool = False,
seed: int = None,
kernel: Optional[Kernel] = None,
) -> pd.DataFrame:
"""
Generates a dataframe with samples from SEM with specified type of noise.
Args:
sm: A DAG in form of a networkx or StructureModel. Does not require weights.
n_samples: The number of rows/observations to sample.
kernel: A kernel from sklearn.gaussian_process.kernels like RBF(1) or
Matern(1) or any combinations thereof. The kernels are used to
create the latent variable for the binary / categorical variables
and are directly used for continuous variables.
distribution: The type of distribution to use for the noise
of a variable. Options: 'probit'/'normal' (alias),
"logit"/"gumbel" (alias). Logit is default.
n_categories: Number of categories per variable/node.
noise_scale: The standard deviation of the noise. The categorical features
are created using a latent variable approach. The noise standard
deviation determines how much weight the "mean" estimate has on
the feature value.
intercept: Whether to use an intercept for the latent variable of each feature.
seed: Random state
Returns:
x_mat: [n_samples, d_nodes] sample matrix
Raises:
ValueError: if distribution is not 'probit', 'normal', 'logit', 'gumbel'
"""
if kernel is None:
return sem_generator(
graph=sm,
default_type=f"categorical:{n_categories}",
n_samples=n_samples,
distributions={"categorical": distribution},
noise_std=noise_scale,
intercept=intercept,
seed=seed,
)
return nonlinear_sem_generator(
graph=sm,
kernel=kernel,
default_type=f"categorical:{n_categories}",
n_samples=n_samples,
distributions={"categorical": distribution},
noise_std=noise_scale,
seed=seed,
)
def generate_structure_dynamic( # pylint: disable=too-many-arguments
num_nodes: int,
p: int,
degree_intra: float,
degree_inter: float,
graph_type_intra: str = "erdos-renyi",
graph_type_inter: str = "erdos-renyi",
w_min_intra: float = 0.5,
w_max_intra: float = 0.5,
w_min_inter: float = 0.5,
w_max_inter: float = 0.5,
w_decay: float = 1.0,
) -> StructureModel:
"""
Generates a dynamic DAG at random.
Args:
num_nodes: Number of nodes
p: maximum lag to be considered in the structure
degree_intra: expected degree on nodes from the current state
degree_inter: expected degree on nodes from the lagged nodes
graph_type_intra:
- erdos-renyi: constructs a graph such that the probability of any given edge is degree / (num_nodes - 1)
- barabasi-albert: constructs a scale-free graph from an initial connected graph of (degree / 2) nodes
- full: constructs a fully-connected graph - degree has no effect
graph_type_inter:
- erdos-renyi: constructs a graph such that the probability of any given edge is degree / (num_nodes - 1)
- full: connect all past nodes to all present nodes
w_min_intra: minimum weight for intra-slice nodes
w_max_intra: maximum weight for intra-slice nodes
w_min_inter: minimum weight for inter-slice nodes
w_max_inter: maximum weight for inter-slice nodes
w_decay: exponent of weights decay for slices that are farther apart. Default is 1.0, which implies no decay
Raises:
ValueError: if graph type unknown or `num_nodes < 2`
Returns:
StructureModel containing all simulated nodes and edges (intra- and inter-slice)
"""
sm_intra = generate_structure(
num_nodes=num_nodes,
degree=degree_intra,
graph_type=graph_type_intra,
w_min=w_min_intra,
w_max=w_max_intra,
)
sm_inter = _generate_inter_structure(
num_nodes=num_nodes,
p=p,
degree=degree_inter,
graph_type=graph_type_inter,
w_min=w_min_inter,
w_max=w_max_inter,
w_decay=w_decay,
)
res = StructureModel()
res.add_nodes_from(sm_inter.nodes)
res.add_nodes_from([f"{u}_lag0" for u in sm_intra.nodes])
res.add_weighted_edges_from(sm_inter.edges.data("weight"))
res.add_weighted_edges_from(
[(f"{u}_lag0", f"{v}_lag0", w) for u, v, w in sm_intra.edges.data("weight")]
)
return res
def _generate_inter_structure(
num_nodes: int,
p: int,
degree: float,
graph_type: str,
w_min: float,
w_max: float,
w_decay: float = 1.0,
neg: float = 0.5,
) -> StructureModel:
"""Simulate random DAG between two time slices.
Args:
num_nodes: number of nodes per slice
p: number of slices that influence current slice
degree: expected in-degree of current time slice
graph_type: {'erdos-renyi' 'full'}
w_min: minimum weight for inter-slice nodes
w_max: maximum weight for inter-slice nodes
w_decay: exponent of weights decay for slices that are farther apart. Default is 1.0, which implies no decay
neg: the proportion of edge weights expected to be negative. By default, 50% of the edges are expected
to be negative weight (`neg == 0.5`).
Returns:
G_inter: weighted, bipartite DAG for inter-slice connections
Raises:
ValueError: if graph type not known
"""
if w_min > w_max:
raise ValueError(
"Absolute minimum weight must be less than or equal to maximum weight: "
f"{w_min} > {w_max}"
)
if graph_type == "erdos-renyi":
prob = degree / num_nodes
b = (np.random.rand(p * num_nodes, num_nodes) < prob).astype(float)
elif graph_type == "full": # ignore degree, only for experimental use
b = np.ones([p * num_nodes, num_nodes])
else:
raise ValueError(
f"Unknown inter-slice graph type `{graph_type}`. "
"Valid types are 'erdos-renyi' and 'full'"
)
u = []
for i in range(p):
u_i = np.random.uniform(low=w_min, high=w_max, size=[num_nodes, num_nodes]) / (
w_decay ** i
)
u_i[np.random.rand(num_nodes, num_nodes) < neg] *= -1
u.append(u_i)
u = np.concatenate(u, axis=0) if u else np.empty(b.shape)
a = (b != 0).astype(float) * u
df = pd.DataFrame(
a,
index=[
f"{var}_lag{l_val}" for l_val in range(1, p + 1) for var in range(num_nodes)
],
columns=[f"{var}_lag0" for var in range(num_nodes)],
)
idxs, cols = list(df.index), list(df.columns)
for i in idxs:
df[i] = 0
for i in cols:
df.loc[i, :] = 0
g_inter = StructureModel(df)
return g_inter
def generate_dataframe_dynamic( # pylint: disable=R0914
g: StructureModel,
n_samples: int = 1000,
burn_in: int = 100,
sem_type: str = "linear-gauss",
noise_scale: float = 1.0,
drift: np.ndarray = None,
) -> pd.DataFrame:
"""Simulate samples from dynamic SEM with specified type of noise.
Args:
g: Dynamic DAG
n_samples: number of samples
burn_in: number of samples to discard
sem_type: {linear-gauss,linear-exp,linear-gumbel}
noise_scale: scale parameter of noise distribution in linear SEM
drift: array of drift terms for each node, if None then the drift is 0
Returns:
X: [n,d] sample matrix, row t is X_t
Y: [n,d*p] sample matrix, row t is [X_{t-1}, ..., X_{t-p}]
Raises:
ValueError: if sem_type isn't linear-gauss/linear_exp/linear-gumbel
"""
s_types = ("linear-gauss", "linear-exp", "linear-gumbel")
if sem_type not in s_types:
raise ValueError(f"unknown sem type {sem_type}. Available types are: {s_types}")
intra_nodes = sorted([el for el in g.nodes if "_lag0" in el], key=lambda t: t.split('_lag')[1])
inter_nodes = sorted([el for el in g.nodes if "_lag0" not in el], key=lambda t: t.split('_lag')[1])
w_mat = nx.to_numpy_array(g, nodelist=intra_nodes)
a_mat = nx.to_numpy_array(g, nodelist=intra_nodes + inter_nodes)[
len(intra_nodes) :, : len(intra_nodes)
]
g_intra = nx.DiGraph(w_mat)
g_inter = nx.bipartite.from_biadjacency_matrix(
csr_matrix(a_mat), create_using=nx.DiGraph
)
d = w_mat.shape[0]
p = a_mat.shape[0] // d
total_length = n_samples + burn_in
X = np.zeros([total_length, d])
Xlags = np.zeros([total_length, p * d])
ordered_vertices = list(nx.topological_sort(g_intra))
if drift is None:
drift = np.zeros(d)
for t in range(total_length):
for j in ordered_vertices:
parents = list(g_intra.predecessors(j))
parents_prev = list(g_inter.predecessors(j + p * d))
X[t, j] = (
drift[j]
+ X[t, parents].dot(w_mat[parents, j])
+ Xlags[t, parents_prev].dot(a_mat[parents_prev, j])
)
if sem_type == "linear-gauss":
X[t, j] = X[t, j] + np.random.normal(scale=noise_scale)
elif sem_type == "linear-exp":
X[t, j] = X[t, j] + np.random.exponential(scale=noise_scale)
elif sem_type == "linear-gumbel":
X[t, j] = X[t, j] + np.random.gumbel(scale=noise_scale)
if (t + 1) < total_length:
Xlags[t + 1, :] = np.concatenate([X[t, :], Xlags[t, :]])[: d * p]
return pd.concat(
[
| pd.DataFrame(X[-n_samples:], columns=intra_nodes) | pandas.DataFrame |
import pandas as pd
import numpy as np
from .cleanning import delFromVardict
# # removed from version 0.0.8, replaced by calculating woe directly inside bitable
# def calcWOE(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt):
#
# woe = np.log((eachGoodCnt / eachBadCnt) / (allGoodCnt / allBadCnt))
#
# return woe
# # removed from version 0.0.8, replaced by calculating iv directly inside bitable
# def calcIV(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt):
# # calcIV(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt, label='DEFAULT')
# woe = calcWOE(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt)
# ivcolumn = (eachGoodCnt / allGoodCnt - eachBadCnt / allBadCnt) * woe
# iv = sum(ivcolumn)
#
# return ivcolumn, iv
def bivariate(df, col, label, withIV=True, missingvalue='missing', dealMissing=True):
df = df.replace(np.nan, missingvalue)
gb = df.groupby(col, as_index=False)
total = df.shape[0]
all = gb.count()
bad = gb.sum()[label]
good = (all[label] - bad)
bitable = | pd.DataFrame({col: all[col], 'total': good + bad, 'good': good, 'bad': bad}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
"""
In this script, the results of the friction tests are visualised.
All visualisations are stored in /figures/
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, TU Delft Biomechanical Design"
__credits__ = ["<NAME>, <NAME>, <NAME>"]
__license__ = "CC0-1.0 License"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
# Imports
import math
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from statistics import mean
# Global variables
# The diameter (in m) of the pneumatic cylinder
d_25 = 25 / 1000
# The radius (in m) of the pneumatic cylinder
r_25 = d_25 / 2
# The surface area (in m^2) of the pneumatic cylinder
area = math.pi * r_25**2
# The diameter (in m) of the pneumatic cylinder for the X-ring and corresponding O-ring
d_257 = 25.7 / 1000
# The radius (in m) of the pneumatic cylinder for the X-ring and corresponding O-ring
r_257 = d_257 / 2
# The surface area (in m^2) of the pneumatic cylinder for the X-ring and corresponding O-ring
large_area = math.pi * r_257**2
# The models with different sealing mechanism used in this test
rings = ['O-ring','NAPN','NAP310','PK','KDN','O-ring257','X-ring257']
# The models with different cross-sectional shape used in this test
shapes = ['Circle','Stadium','Kidney','Stadium_lc','Kidney_lc']
# Remove first 15 data points to avoid deviating starting values
drop_amount = 15
# # Friction force test
# Define a dictionary to store all data from the friction force tests
# For each model all variables are stored in this nested dictionary
friction_force = {}
# For each ring type
for ring in rings:
friction_force[ring] = {}
for bar in [1,3,5,7]:
# Load the data of the corresponding results in .CSV and drop unncessary columns
ring_df = pd.read_csv(f'./data/friction/{ring}_{bar}bar.csv',delimiter='\s+',header=None,names=(['Time','A','B','C','Laser(mm)','Pressure(bar)','Force(N)']))
ring_df.drop(columns=['A','B','C'],index=ring_df.index[range(drop_amount)],axis=1,inplace=True)
# Store the data in our larger dictionary
friction_force[ring][bar] = {}
# Set the time (in s) and laser (in mm)
friction_force[ring][bar]['Time'] = ring_df['Time']/1000
friction_force[ring][bar]['Laser(mm)'] = ring_df['Laser(mm)']
# Set the pressure (in MPa) and force (in N)
friction_force[ring][bar]['Pressure(bar)'] = ring_df['Pressure(bar)']/10
friction_force[ring][bar]['Force(N)'] = ring_df['Force(N)']
# Calculate force Fp based on the measured pressure (see equation 2 in the report)
# The 25.7 mm rings have a different and larger surface area
if '257' in ring:
Fp = ring_df['Pressure(bar)'] * 10**5 * large_area
else:
Fp = ring_df['Pressure(bar)'] * 10**5 * area
# Calculate the friction force by substracting the measured force with Fp (see equation 3 in the report)
FF = ring_df['Force(N)'] - Fp
friction_force[ring][bar]['FrictionForce'] = FF
friction_force[ring][bar]['FrictionFrom'] = FF[FF>FF.mean()].mean()
friction_force[ring][bar]['FrictionTo'] = FF[FF<FF.mean()].mean()
# For each shape type
for shape in shapes:
friction_force[shape] = {}
for bar in [1,2,3,4,5,6,7]:
# Some shapes extrude at higher pressure, no data is available for them
if bar > 3 and shape not in ['Stadium_lc','Kidney_lc','Kidney', 'Circle']:
break
if bar > 4 and shape not in ['Stadium_lc', 'Kidney_lc', 'Circle']:
break
if bar > 5 and shape not in ['Kidney_lc', 'Circle']:
break
# Load the data of the corresponding results in .CSV and drop unncessary columns
shape_df = pd.read_csv(f'./data/friction/{shape}_{bar}bar.csv',delimiter='\s+',header=None,names=(['Time','A','B','C','Laser(mm)','Pressure(bar)','Force(N)']))
shape_df.drop(columns=['A','B','C'],index=shape_df.index[range(drop_amount)],axis=1,inplace=True)
# Store the data in our larger dictionary
friction_force[shape][bar] = {}
# Set the time (in s) and laser (in mm)
friction_force[shape][bar]['Time'] = shape_df['Time']/1000
friction_force[shape][bar]['Laser(mm)'] = shape_df['Laser(mm)']
# Set the pressure (in MPa) and force (in N)
friction_force[shape][bar]['Pressure(bar)'] = shape_df['Pressure(bar)']/10
friction_force[shape][bar]['Force(N)'] = shape_df['Force(N)']
# Calculate force Fp based on the measured pressure (see equation 2 in the report)
Fp = shape_df['Pressure(bar)'] * 10**5 * area
# Calculate the friction force by substracting the measured force with Fp (see equation 3 in the report)
FF = shape_df['Force(N)'] - Fp
friction_force[shape][bar]['FrictionForce'] = FF
friction_force[shape][bar]['FrictionFrom'] = FF[FF>FF.mean()].mean()
friction_force[shape][bar]['FrictionTo'] = FF[FF<FF.mean()].mean()
# #### Friction force range definement plot - visual for in methodology
plt.annotate(text='',xy=(12,friction_force['O-ring'][1]['FrictionFrom']), xytext=(12,friction_force['O-ring'][1]['FrictionTo']), arrowprops=dict(arrowstyle='<->', lw=2))
plt.hlines(xmin=0, xmax=70,y=friction_force['O-ring'][1]['FrictionFrom'], linestyles='dashed', colors='0', lw=2)
plt.hlines(xmin=0, xmax=70,y=friction_force['O-ring'][1]['FrictionTo'], linestyles='dashed', colors='0', lw=2)
plt.plot(friction_force['O-ring'][1]['Time'],friction_force['O-ring'][1]['FrictionForce'],'tab:blue',label='O-ring')
plt.plot(friction_force['NAPN'][1]['Time'],friction_force['NAPN'][1]['FrictionForce'],'tab:orange',alpha=0.25,label='NAPN')
plt.plot(friction_force['NAP310'][1]['Time'],friction_force['NAP310'][1]['FrictionForce'],'tab:green',alpha=0.25,label='NAP 330')
plt.plot(friction_force['PK'][1]['Time'],friction_force['PK'][1]['FrictionForce'],'tab:red',alpha=0.25,label='PK')
plt.plot(friction_force['KDN'][1]['Time'],friction_force['KDN'][1]['FrictionForce'],'tab:purple', alpha=0.25,label='KDN')
plt.xlim([5,15])
plt.xlabel('Time (s)')
plt.ylabel('Force (N)')
plt.legend(loc='lower center',bbox_to_anchor=(0.5,-0.3),ncol=5)
plt.savefig('./figures/method_frictionforce_1bar_zoom.pdf',bbox_inches = 'tight')
plt.clf()
# #### Standard deviation & Standard error
# Function to calculate standard error for a specific test
def calculate_se(friction_force,model,bar):
# Calculate the mean to define retracting and extending parts
frictionforce_mean = friction_force[model][bar]['FrictionForce'].mean()
# Variable to store the friction force
frictionforce = list(friction_force[model][bar]['FrictionForce'])
# Variables for results and counter
frictionforce_se_means = []
i = 0
# Loop through the data and break them up into separate tests
while i < len(frictionforce) - 1:
# Lists for retracting and extending parts of a single test
retracting = []
extending = []
# First the retracting part of a test is done
# Get all values above the mean
while len(retracting) < 100 or frictionforce[i] > frictionforce_mean:
retracting.append(frictionforce[i])
i += 1
# Break if it gets below the mean
if i > len(frictionforce) - 1:
break
# Secondly the extending part of a test is done
# Get all values below the mean
while len(extending) < 100 or frictionforce[i] < frictionforce_mean:
extending.append(frictionforce[i])
i += 1
# Break if it gets above the mean
if i > len(frictionforce) - 1:
break
# The friction force range is defined as the difference between the mean friction force of the retracting and extending strokes
frictionforce_se_means.append(mean(retracting)-mean(extending))
# Standard error is calculated by the standard deviation of the means
# Also return the mean of the friction force ranges across the tests
# Finally return the last test to determine the standard deviation of one extending and retracting stroke
return mean(frictionforce_se_means),np.std(frictionforce_se_means),extending,retracting
# For each model use the calculate_se() function to acquire the friction force range and the standard error
# Additionally for each of the rings and shapes the standard deviation of a single test is saved
std_single_test_rings = pd.DataFrame(columns=['Bar']+rings)
std_single_test_rings = std_single_test_rings.set_index('Bar')
for ring in rings:
for bar in [1,3,5,7]:
mean_ff,se_ff,extending,retracting = calculate_se(friction_force,ring,bar)
friction_force[ring][bar]['SE_FrictionForce'] = se_ff
friction_force[ring][bar]['Mean_FrictionForce'] = mean_ff
# For each retracting and extending test, check if the index already exists
if str(bar)+'_bar_retracting' not in list(std_single_test_rings.index):
std_single_test_rings = std_single_test_rings.append(pd.Series(name= str(bar)+'_bar_retracting'))
if str(bar)+'_bar_extending' not in list(std_single_test_rings.index):
std_single_test_rings = std_single_test_rings.append(pd.Series(name= str(bar)+'_bar_extending'))
# For each individual test save the average and standard deviation
std_single_test_rings.loc[str(bar)+'_bar_retracting'][ring] = f'{str(round(mean(retracting),2))} $\pm$ {round(np.std(retracting),2)}'
std_single_test_rings.loc[str(bar)+'_bar_extending'][ring] = f'{str(round(mean(extending),2))} $\pm$ {round(np.std(extending),2)}'
# Again define a dataframe to store the standard deviations of each single test
std_single_test_shapes = pd.DataFrame(columns=['Bar']+shapes)
std_single_test_shapes = std_single_test_shapes.set_index('Bar')
for shape in shapes:
for bar in [1,2,3,4,5,6,7]:
try:
mean_ff,se_ff,extending,retracting = calculate_se(friction_force,shape,bar)
friction_force[shape][bar]['SE_FrictionForce'] = se_ff
friction_force[shape][bar]['Mean_FrictionForce'] = mean_ff
# For each retracting and extending test, check if the index already exists
if str(bar)+'_bar_retracting' not in list(std_single_test_shapes.index):
std_single_test_shapes = std_single_test_shapes.append(pd.Series(name= str(bar)+'_bar_retracting'))
if str(bar)+'_bar_extending' not in list(std_single_test_shapes.index):
std_single_test_shapes = std_single_test_shapes.append(pd.Series(name= str(bar)+'_bar_extending'))
# For each test save the average and standard deviation
std_single_test_shapes.loc[str(bar)+'_bar_retracting'][shape] = f'{str(round(mean(retracting),2))} $\pm$ {round(np.std(retracting),2)}'
std_single_test_shapes.loc[str(bar)+'_bar_extending'][shape] = f'{str(round(mean(extending),2))} $\pm$ {round(np.std(extending),2)}'
except Exception as e:
print(f'No data for {shape} - {e} bar due to extrusion of the O-ring')
print(std_single_test_rings)
# print(std_single_test_rings.to_latex(escape=False))
print(std_single_test_shapes)
# print(std_single_test_shapes.to_latex(escape=False))
# #### Friction force range plot 25mm
# Variables to make plotting of friction force range with standard error more clear
fr = {'Pressure': [.1,.3,.5,.7],
'O_ring': [friction_force['O-ring'][i]['Mean_FrictionForce'] for i in friction_force['O-ring']],
'NAPN': [friction_force['NAPN'][i]['Mean_FrictionForce'] for i in friction_force['NAPN']],
'NAP310': [friction_force['NAP310'][i]['Mean_FrictionForce'] for i in friction_force['NAP310']],
'PK': [friction_force['PK'][i]['Mean_FrictionForce'] for i in friction_force['PK']],
'KDN': [friction_force['KDN'][i]['Mean_FrictionForce'] for i in friction_force['KDN']],
'O_ring257': [friction_force['O-ring257'][i]['Mean_FrictionForce'] for i in friction_force['O-ring257']],
'X_ring257': [friction_force['X-ring257'][i]['Mean_FrictionForce'] for i in friction_force['X-ring257']],
}
fr = pd.DataFrame(data=fr)
se = {'Pressure': [.1,.3,.5,.7],
'O_ring': [friction_force['O-ring'][i]['SE_FrictionForce'] for i in friction_force['O-ring']],
'NAPN': [friction_force['NAPN'][i]['SE_FrictionForce'] for i in friction_force['NAPN']],
'NAP310': [friction_force['NAP310'][i]['SE_FrictionForce'] for i in friction_force['NAP310']],
'PK': [friction_force['PK'][i]['SE_FrictionForce'] for i in friction_force['PK']],
'KDN': [friction_force['KDN'][i]['SE_FrictionForce'] for i in friction_force['KDN']],
'O_ring257': [friction_force['O-ring257'][i]['SE_FrictionForce'] for i in friction_force['O-ring257']],
'X_ring257': [friction_force['X-ring257'][i]['SE_FrictionForce'] for i in friction_force['X-ring257']],
}
se = pd.DataFrame(data=se)
# Visualize the friction force range - 25 mm cylinder
plt.errorbar(fr.Pressure,fr.O_ring257,se.O_ring257,color='tab:blue',alpha=0.25, linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.X_ring257,se.X_ring257,color='tab:brown',alpha=0.25,linestyle=(0,(5,2,2)),capsize=2)
plt.errorbar(fr.Pressure,fr.O_ring,se.O_ring,color='tab:blue',label='O-ring', linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.NAPN,se.NAPN,color='tab:orange',label='NAPN',linestyle='dashdot',capsize=2)
plt.errorbar(fr.Pressure,fr.NAP310,se.NAP310,color='tab:green',label='NAP310', linestyle=(0,(5,2,2)),capsize=2)
plt.errorbar(fr.Pressure,fr.PK,se.PK,color='tab:red',label='PK',linestyle='dashed',capsize=2)
plt.errorbar(fr.Pressure,fr.KDN,se.KDN,color='tab:purple',label='KDN',linewidth=1,capsize=2)
plt.xlabel('Pressure (MPa)')
plt.ylabel('Dynamic friction force range (N)')
plt.legend()
plt.savefig('./figures/result_frictionforcerange_25mm.pdf',bbox_inches = 'tight')
plt.clf()
# #### Friction force range plot 25.7mm
# Visualize the friction force range - 25.7 mm cylinder
plt.errorbar(fr.Pressure,fr.O_ring,se.O_ring,color='tab:blue',alpha=0.25, linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.NAPN,se.NAPN,color='tab:orange',alpha=0.25,linestyle='dashdot',capsize=2)
plt.errorbar(fr.Pressure,fr.NAP310,se.NAP310,color='tab:green',alpha=0.25, linestyle=(0,(5,2,2)),capsize=2)
plt.errorbar(fr.Pressure,fr.PK,se.PK,color='tab:red',alpha=0.25,linestyle='dashed',capsize=2)
plt.errorbar(fr.Pressure,fr.KDN,se.KDN,color='tab:purple',alpha=0.25,linewidth=1,capsize=2)
plt.errorbar(fr.Pressure,fr.O_ring257,se.O_ring257,color='tab:blue',label='O-ring', linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.X_ring257,se.X_ring257,color='tab:brown',label='X-ring',linestyle=(0,(5,2,2)),capsize=2)
plt.xlabel('Pressure (MPa)')
plt.ylabel('Dynamic friction force range (N)')
plt.legend()
plt.savefig('./figures/result_frictionforcerange_257mm.pdf',bbox_inches = 'tight')
plt.clf()
# #### Friction force range plot different shapes
# Again variables to make plotting of friction force range with standard error more clear
fr_s = {'Pressure': [.1,.2,.3],
'Stadium': [friction_force['Stadium'][i]['Mean_FrictionForce'] for i in friction_force['Stadium']],
}
fr_s = pd.DataFrame(data=fr_s)
se_s = {'Pressure': [.1,.2,.3],
'Stadium': [friction_force['Stadium'][i]['SE_FrictionForce'] for i in friction_force['Stadium']],
}
se_s = pd.DataFrame(data=se_s)
fr_ck = {'Pressure': [.1,.2,.3,.4],
'Circle': [friction_force['Circle'][i]['Mean_FrictionForce'] for i in friction_force['Circle']][:4],
'Kidney': [friction_force['Kidney'][i]['Mean_FrictionForce'] for i in friction_force['Kidney']],
}
fr_ck = pd.DataFrame(data=fr_ck)
se_ck = {'Pressure': [.1,.2,.3,.4],
'Circle': [friction_force['Circle'][i]['SE_FrictionForce'] for i in friction_force['Circle']][:4],
'Kidney': [friction_force['Kidney'][i]['SE_FrictionForce'] for i in friction_force['Kidney']],
}
se_ck = pd.DataFrame(data=se_ck)
# Visualize the friction force range - different shapes
plt.errorbar(fr_ck.Pressure,fr_ck.Circle,se_ck.Circle,color='0.8',label='Circle',linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr_s.Pressure,fr_s.Stadium,se_s.Stadium,color='tab:olive', label='Stadium',linestyle='dashdot',capsize=2)
plt.errorbar(fr_ck.Pressure,fr_ck.Kidney,se_ck.Kidney,color='tab:cyan', label='Kidney',capsize=2)
plt.xlabel('Pressure (MPa)')
plt.ylabel('Dynamic friction force range (N)')
plt.legend(loc='lower center',bbox_to_anchor=(0.5,-0.3),ncol=3)
plt.savefig('./figures/result_frictionforcerange_shape.pdf',bbox_inches = 'tight')
plt.clf()
# #### Friction force range plot different shapes with lower clearance
# Again variables to make plotting of friction force range with standard error more clear
fr_s_lc = {'Pressure': [.1,.2,.3,.4,.5],
'Stadium_lc': [friction_force['Stadium_lc'][i]['Mean_FrictionForce'] for i in friction_force['Stadium_lc']],
}
fr_s_lc = | pd.DataFrame(data=fr_s_lc) | pandas.DataFrame |
from __future__ import division
from itertools import combinations
import numpy as np
import pandas as pd
import scipy.integrate
from statsmodels.tools.tools import ECDF
from sklearn import preprocessing
import seaborn as sns
class BaseSample(object):
def __init__(self, data_frame, number_arms=2):
self.integrate = None
self.data = | pd.DataFrame.copy(data_frame) | pandas.DataFrame.copy |
import pandas as pd
import numpy as np
import csv
import os
import matplotlib.pyplot as plt
## Written by <NAME>
def topspin_to_pd(input_filename):
###row_dict was written by <NAME> ###
Rows = dict()
with open(input_filename) as p:
reader = csv.reader(p, delimiter=" ")
for row in reader:
row = [x for x in row if x]
if "#" in row or not row:
continue
else:
try:
Rows[row[0]] = [row[3],row[4]]
except:
pass
HSQC_Data_df = pd.DataFrame.from_dict(Rows, orient='index',columns = ['1H','13C']).astype('float')
HSQC_Data_df = HSQC_Data_df.sort_values(by=['1H'],ascending = True).round({'1H':2,'13C':1})
return HSQC_Data_df
def hsqc_to_np(input_filename,C_scale=100,H_scale=100, output_numpy=None): # x is csv filename ex) flavonoid.csv
qc = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 18:27:38 2021
@author: sergiomarconi
"""
import pandas as pd
from sklearn.preprocessing import normalize
from src.functions_brdf import *
def kld_transform(hsi_cube, kld_out):
#brick = brick.values
kld_groups = | pd.read_csv(kld_out, header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 17:41:26 2020
@author: <NAME>
"""
#Import packages and functions
import pickle
import pandas as pd
import numpy as np
import joblib
from functions import create_ABseries, standardize_data
#Set random seed for replicability
np.random.seed(78937)
#Create 96,000 data series for training models
a_values = [0,0.2]
t_values = [0,30]
constant_values = [4,10]
pointsa_values = [3,5]
pointsb_values = [5,10]
smd_values = [0,0,0,0,0,1,2,3,4,5]
dataset = []
for i in range(300):
for a in a_values:
for t in t_values:
for constant in constant_values:
for points_a in pointsa_values:
for points_b in pointsb_values:
for smd in smd_values:
dataseries = create_ABseries(a, t, constant, points_a,
points_b, smd)
if smd == 0:
dataset.append([dataseries, 0])
else:
dataset.append([dataseries, 1])
#Ranodmize order of data
shuffled_order = np.random.choice(range(96000), 96000, replace = False)
shuffled_dataset = []
for i in shuffled_order:
shuffled_dataset.append(dataset[i])
#Organize and standardize training data (96,000 graphs)
x = np.empty((0,8))
y = np.empty((0,))
for i in range(len(shuffled_dataset)):
series = shuffled_dataset[i][0]
features = standardize_data(series).reshape(1,-1)
x = np.vstack((x, features))
y = np.hstack((y, shuffled_dataset[i][1]))
#Train SGD with code from Lanovaz et al. (2020)
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
x_train, x_valid ,y_train, y_valid =\
train_test_split(x, y, test_size = 0.50, random_state = 48151)
def trainSGD(x_train, y_train, x_valid, y_valid):
lr = [0.00001, 0.0001, 0.001, 0.01]
iterations = np.arange(5,1000,5)
SGDresults = []
best_acc = 0
for i in iterations:
for n in lr:
sgd = SGDClassifier(loss = "hinge", penalty = 'elasticnet', alpha = n,
max_iter = i, class_weight = {0:1, 1:0.20},
random_state=48151)
sgd.fit(x_train, y_train)
current_acc = sgd.score(x_valid, y_valid)
SGDresults.append([n, i, current_acc])
if current_acc > best_acc:
best_acc = current_acc
filename = 'best_modelsgd.sav'
joblib.dump(sgd, filename)
return joblib.load('best_modelsgd.sav')
sgd = trainSGD(x_train, y_train, x_valid, y_valid)
#Predictions of SGD on test data (1,024 graphs)
f = open("dataset.txt","rb")
test_data = pickle.load(f)
sgd_results = np.empty((0,))
for i in range(len(test_data)):
series = test_data[i][0]
features = standardize_data(series).reshape(1,-1)
sgd_results = np.hstack((sgd_results, sgd.predict(features).flatten()))
sgd_results = pd.DataFrame(sgd_results)
sgd_results.to_csv('sgd_values.csv', header = False, index = False)
#Train SVC with code from Lanovaz et al. (2020)
from sklearn.svm import SVC
def trainSVC(train_x, train_y, valid_x, valid_y):
svc = SVC(class_weight = {0:1, 1:0.5})
gamma = [0.00001, 0.0001, 0.001, 0.01, 0.1]
C = [1,10,100]
SVCresults = []
best_acc = 0
for c in C:
for n in gamma:
svc.set_params(gamma =n, C = c)
svc.fit(train_x, train_y)
current_acc = svc.score(valid_x, valid_y)
SVCresults.append([n, c, current_acc])
if current_acc > best_acc:
best_acc = current_acc
filename = 'best_modelsvc.sav'
joblib.dump(svc, filename)
return joblib.load('best_modelsvc.sav')
svc = trainSVC(x_train, y_train, x_valid, y_valid)
#Prediction of SVC on test data (1,024 graphs)
svc_results = np.empty((0,))
for i in range(len(test_data)):
series = test_data[i][0]
features = standardize_data(series).reshape(1,-1)
svc_results = np.hstack((svc_results, svc.predict(features).flatten()))
svc_results = | pd.DataFrame(svc_results) | pandas.DataFrame |
import copy
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from autogluon.tabular.utils.features import AbstractFeatureGenerator
class GeneratorHelper:
@staticmethod
def fit_transform_assert(input_data: DataFrame, generator: AbstractFeatureGenerator, expected_feature_metadata_in_full: dict = None, expected_feature_metadata_full: dict = None):
# Given
original_input_data = copy.deepcopy(input_data)
# Raise exception
with pytest.raises(AssertionError):
# Can't call transform before fit_transform
generator.transform(input_data)
if len(input_data.columns) > 0:
# Raise exception
with pytest.raises(AssertionError):
input_data_with_duplicate_columns = pd.concat([input_data, input_data], axis=1)
# Can't call fit_transform with duplicate column names
generator.fit_transform(input_data_with_duplicate_columns)
assert not generator.is_fit()
output_data = generator.fit_transform(input_data)
assert generator.is_fit()
with pytest.raises(AssertionError):
# Can't call fit_transform after fit
generator.fit_transform(input_data)
# Ensure input_data is not altered inplace
assert input_data.equals(original_input_data)
# Ensure unchanged row count
assert len(input_data) == len(output_data)
# Ensure transform and fit_transform output are the same for training data
output_data_transform = generator.transform(input_data)
assert output_data.equals(output_data_transform)
# Ensure transform will be the same if unnecessary columns are removed from input
output_data_transform = generator.transform(input_data[generator.features_in])
assert output_data.equals(output_data_transform)
# Ensure transform will be the same if input feature order is not the same as generator.features_in
reversed_column_names = list(input_data.columns)
reversed_column_names.reverse()
output_data_transform = generator.transform(input_data[reversed_column_names])
assert output_data.equals(output_data_transform)
# Ensure output feature order is correct
assert generator.features_out == list(output_data.columns)
if generator.features_in:
with pytest.raises(KeyError):
# Error if missing input feature
generator.transform(input_data.drop(generator.features_in[0]))
with pytest.raises(KeyError):
# Error if missing all input features
generator.transform(pd.DataFrame())
# Ensure unknown input columns don't affect output
input_data_with_extra = copy.deepcopy(input_data)
input_data_with_extra['__UNKNOWN_COLUMN__'] = 0
output_data_transform = generator.transform(input_data_with_extra)
assert output_data.equals(output_data_transform)
# Ensure feature_metadata_in is as expected
if expected_feature_metadata_in_full is not None:
assert expected_feature_metadata_in_full == generator.feature_metadata_in.to_dict(inverse=True)
# Ensure feature_metadata is as expected
if expected_feature_metadata_full is not None:
assert expected_feature_metadata_full == generator.feature_metadata.to_dict(inverse=True)
return output_data
class DataHelper:
@staticmethod
def generate_empty() -> DataFrame:
return DataFrame(index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
@staticmethod
def generate_obj_feature() -> Series:
return | Series(['a', 'b', 'a', 'd', 'd', 'd', 'c', np.nan, np.nan]) | pandas.Series |
from PyQt5.QtWidgets import QWidget,QGridLayout, QTableWidget, QTableWidgetItem, QHeaderView, QAbstractItemView, QLabel, QPushButton, QMessageBox
from PyQt5.QtGui import QFont, QColor
from PyQt5.QtCore import Qt
import pandas as pd
import numpy as np
class CoreStrategy(QWidget):
def __init__(self):
super(CoreStrategy, self).__init__()
myfont = QFont('Arial', 12, QFont.Bold)
self.setFont(myfont)
# 首先还是读取数据
# 读取现在有的etfs
try:
self.etfs = ['^FCHI']#np.load('./Data/etfs.npy').tolist()
except FileNotFoundError:
self.etfs = ['^FCHI']#['SPY', 'QQQ', 'TLT', 'GLD', 'IWM', 'EFA', 'HYG', 'XLV']
self.supply_demande_zones_rownames = ['SZ Distal', 'SZ Proximal', 'DZ Proximal', 'DZ Distal']
self.proposed_trade_rownames = ['Trade Type', 'Entry Proximal', 'Entry Distal', 'Exit Proximal', 'Risk', 'Reward', 'Ratio']
self.odds_enhancers_rownames = ['Freshness(2)', 'Profit Zone(2)', 'Strength(2)', 'Time(1)', 'Curve(1)', 'Trend(2)', 'Total Score', 'Trade Stastus']
# 读取表格数据,如果其中某个etf的数据不在self.etfs中的,将其删除
# 在self.etfs中而不在表格中的,需要添加一行
try:
self.supply_demande_zones_data = pd.read_csv('./Data/zones.csv', index_col=0)
except FileNotFoundError:
self.supply_demande_zones_data = pd.DataFrame(index=self.etfs, columns=self.supply_demande_zones_rownames)
try:
self.proposed_trade_data = pd.read_csv('./Data/proposed_trades.csv', index_col=0)
except FileNotFoundError:
self.proposed_trade_data = | pd.DataFrame(index=self.etfs, columns=self.proposed_trade_rownames) | pandas.DataFrame |
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.engine.pathfinder import sparse_los_from_nx_graph
from syspy.assignment import raw as raw_assignment
from tqdm import tqdm
def jam_time(links, ref_time='time', flow='load', alpha=0.15, beta=4, capacity=1500):
alpha = links['alpha'] if 'alpha' in links.columns else alpha
beta = links['beta'] if 'beta' in links.columns else beta
capacity = links['capacity'] if 'capacity' in links.columns else capacity
return links[ref_time] * (1 + alpha * np.power((links[flow] / capacity), beta))
def z_prime(row, phi):
delta = row['aux_flow'] - row['former_flow']
return (delta * row['time'] * (row['former_flow'] + phi * delta)).sum()
def find_phi(links, inf=0, sup=1, tolerance=1e-6):
if z_prime(links, inf) > 0:
print('fin: ', inf)
return inf
if z_prime(links, sup) < 0:
return sup
m = (inf + sup) / 2
if (sup - inf) < tolerance:
return m
z_prime_m = z_prime(links, m)
if z_prime_m == 0:
return m
elif z_prime_m < 0:
inf = m
elif z_prime_m > 0:
sup = m
return find_phi(links, inf, sup, tolerance)
class RoadPathFinder:
def __init__(self, model):
self.zones = model.zones.copy()
self.road_links = model.road_links.copy()
self.zone_to_road = model.zone_to_road.copy()
try:
self.volumes = model.volumes.copy()
except AttributeError:
pass
def aon_road_pathfinder(
self,
time='time',
ntleg_penalty=1e9,
cutoff=np.inf,
access_time='time',
**kwargs,
):
road_links = self.road_links
road_links['index'] = road_links.index
indexed = road_links.set_index(['a', 'b']).sort_index()
ab_indexed_dict = indexed['index'].to_dict()
road_graph = nx.DiGraph()
road_graph.add_weighted_edges_from(
self.road_links[['a', 'b', time]].values.tolist()
)
zone_to_road = self.zone_to_road.copy()
zone_to_road['time'] = zone_to_road[access_time]
zone_to_road = zone_to_road[['a', 'b', 'direction', 'time']]
zone_to_road.loc[zone_to_road['direction'] == 'access', 'time'] += ntleg_penalty
road_graph.add_weighted_edges_from(
zone_to_road[['a', 'b', 'time']].values.tolist()
)
def node_path_to_link_path(road_node_list, ab_indexed_dict):
tuples = list(zip(road_node_list[:-1], road_node_list[1:]))
road_link_list = [ab_indexed_dict[t] for t in tuples]
return road_link_list
def path_to_ntlegs(path):
try:
return [(path[0], path[1]), (path[-2], path[-1])]
except IndexError:
return []
los = sparse_los_from_nx_graph(
road_graph,
pole_set=set(self.zones.index),
cutoff=cutoff + ntleg_penalty,
**kwargs
)
los['node_path'] = los['path'].apply(lambda p: p[1:-1])
los['link_path'] = los['node_path'].apply(
lambda p: node_path_to_link_path(p, ab_indexed_dict)
)
los['ntlegs'] = los['path'].apply(path_to_ntlegs)
los.loc[los['origin'] != los['destination'], 'gtime'] -= ntleg_penalty
self.car_los = los.rename(columns={'gtime': 'time'})
def frank_wolfe_step(
self,
iteration=0,
log=False,
speedup=True,
volume_column='volume_car'
):
links = self.road_links # not a copy
# a
links['eq_jam_time'] = links['jam_time'].copy()
links['jam_time'] = jam_time(links, ref_time='time', flow='flow')
# b
self.aon_road_pathfinder(time='jam_time')
merged = pd.merge(
self.volumes,
self.car_los,
on=['origin', 'destination']
)
auxiliary_flows = raw_assignment.assign(
merged[volume_column],
merged['link_path']
)
auxiliary_flows.columns = ['flow']
links['aux_flow'] = auxiliary_flows['flow']
links['aux_flow'].fillna(0, inplace=True)
links['former_flow'] = links['flow'].copy()
# c
phi = 2 / (iteration + 2)
if iteration > 0 and speedup:
phi = find_phi(links)
if phi == 0:
return True
if log:
print('step: %i ' % iteration, 'moved = %.1f %%' % (phi * 100))
self.car_los['iteration'] = iteration
self.car_los['phi'] = phi
links['flow'] = (1 - phi) * links['flow'] + phi * links['aux_flow']
links['flow'].fillna(0, inplace=True)
return False # fin de l'algorithme
def process_car_los(self, car_los_list):
df = pd.concat(car_los_list).sort_values('iteration')
phi_series = df.groupby('iteration')['phi'].first()
phi_series = phi_series.loc[phi_series > 0]
# will not work if road_links.index have mixed types
groupby = df.groupby(df['link_path'].apply(lambda l: tuple(l)))
iterations = groupby['iteration'].apply(lambda s: tuple(s))
los = groupby.first()
los['iterations'] = iterations
def path_weight(iterations):
w = 0
for i in phi_series.index:
phi = phi_series[i]
if i in iterations:
w = w + (1 - w) * phi
else:
w = w * (1 - phi)
return w
combinations = {
i: path_weight(i)
for i in set(los['iterations'].apply(lambda l: tuple(l)))
}
# weight
los['weight'] = los['iterations'].apply(lambda l: combinations[l])
# ntleg_time
time_dict = self.zone_to_road.set_index(['a', 'b'])['time'].to_dict()
los['ntleg_time'] = los['ntlegs'].apply(lambda p: sum([time_dict[l] for l in p]))
# equilibrium_jam_time
time_dict = self.road_links['eq_jam_time'].to_dict()
los['link_eq_time'] = los['link_path'].apply(lambda p: sum([time_dict[l] for l in p]))
los['eq_time'] = los['link_eq_time'] + los['ntleg_time']
# actual_time
time_dict = self.road_links['jam_time'].to_dict()
los['link_actual_time'] = los['link_path'].apply(lambda p: sum([time_dict[l] for l in p]))
los['actual_time'] = los['link_actual_time'] + los['ntleg_time']
# free_time
time_dict = self.road_links['time'].to_dict()
los['link_free_time'] = los['link_path'].apply(lambda p: sum([time_dict[l] for l in p]))
los['free_time'] = los['link_free_time'] + los['ntleg_time']
return los.reset_index(drop=True)
def get_relgap(self, car_los):
los = car_los.copy()
los = pd.merge(los, self.volumes, on=['origin', 'destination'])
min_time = los.groupby(['origin', 'destination'], as_index=False)['actual_time'].min()
los = | pd.merge(los, min_time, on=['origin', 'destination'], suffixes=['', '_minimum']) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 16:59:12 2021
@author: <NAME>
"""
#IMPORT LIBRARIES------------------------------------------------------------->
import pandas as pd
#LOAD DATA-------------------------------------------------------------------->
"""The data can be located at:
https://www.kaggle.com/nasa/astronaut-yearbook
"""
dataset = | pd.read_csv("astronauts.csv") | pandas.read_csv |
'''
Utility scripts
'''
import argparse
import copy
import logging
import sys
import typing
import pandas as pd
_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def time_granularity_value_to_stringfy_time_format(granularity_int: int) -> str:
try:
granularity_int = int(granularity_int)
except ValueError:
raise ValueError("The given granularity is not int format!")
granularity_dict = {
14: "%Y-%m-%d %H:%M:%S",
13: "%Y-%m-%d %H:%M",
12: "%Y-%m-%d %H",
11: "%Y-%m-%d",
10: "%Y-%m",
9: "%Y"
}
if granularity_int in granularity_dict:
return granularity_dict[granularity_int]
else:
_logger.warning("Unknown time granularity value as {}! Will use second level.".format(str(granularity_int)))
return granularity_dict[14]
def get_time_granularity(time_column: pd.DataFrame) -> str:
if "datetime" not in time_column.dtype.name:
try:
time_column = pd.to_datetime(time_column)
except:
raise ValueError("Can't parse given time column!")
if len(time_column.unique()) == 1:
allow_duplicate_amount = 0
else:
allow_duplicate_amount = 1
time_granularity = 'second'
if any(time_column.dt.minute != 0) and len(time_column.dt.minute.unique()) > allow_duplicate_amount:
time_granularity = 'minute'
elif any(time_column.dt.hour != 0) and len(time_column.dt.hour.unique()) > allow_duplicate_amount:
time_granularity = 'hour'
elif any(time_column.dt.day != 0) and len(time_column.dt.day.unique()) > allow_duplicate_amount:
# it is also possible weekly data
is_weekly_data = True
time_column_sorted = time_column.sort_values()
temp1 = time_column_sorted.iloc[0]
for i in range(1, len(time_column_sorted)):
temp2 = time_column_sorted.iloc[i]
if (temp2 - temp1).days != 7:
is_weekly_data = False
break
if is_weekly_data:
time_granularity = 'week'
else:
time_granularity = 'day'
elif any(time_column.dt.month != 0) and len(time_column.dt.month.unique()) > allow_duplicate_amount:
time_granularity = 'month'
elif any(time_column.dt.year != 0) and len(time_column.dt.year.unique()) > allow_duplicate_amount:
time_granularity = 'year'
else:
_logger.error("Can't guess the time granularity for this dataset! Will use as second")
return time_granularity
def join_datasets_by_files(files: typing.List[typing.Union[str, pd.DataFrame]], how: str = "left") -> pd.DataFrame:
"""
:param how: the method to join the dataframe, {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘left’
How to handle the operation of the two objects.
left: use calling frame’s index (or column if on is specified)
right: use other’s index.
outer: form union of calling frame’s index (or column if on is specified) with other’s index, and sort it. lexicographically.
inner: form intersection of calling frame’s index (or column if on is specified) with other’s index, preserving the order of the calling’s one.
:param files: either a path to csv or a DataFrame directly
:return: a joined DataFrame object
"""
if not isinstance(files, list):
raise ValueError("Input must be a list of files")
if len(files) < 2:
raise ValueError("Input files amount must be larger than 2")
_logger.info("Totally {} files.".format(str(len(files))))
necessary_column_names = {"region_wikidata", "precision", "time"}
ignore_column_names = {"region_wikidata", "precision", "time", "variable_name", "variable", "region_Label", "calendar",
"productLabel", "qualityLabel"}
loaded_dataframes = []
loaded_filenames = []
for i, each in enumerate(files):
if isinstance(each, str):
try:
temp_loaded_df = pd.read_csv(each)
except Exception as e:
_logger.warning("Failed on loading dataframe No.{}".format(str(i)))
_logger.error(str(e))
continue
elif isinstance(each, pd.DataFrame):
temp_loaded_df = each
else:
_logger.warning("Unsupported format '{}' on No.{} input, will ignore.".format(str(type(each)), str(i)))
continue
if len(set(temp_loaded_df.columns.tolist()).intersection(necessary_column_names)) != len(necessary_column_names):
_logger.error("Following columns {} are necessary to be exists".format(str(necessary_column_names)))
raise ValueError("Not all columns found on given No.{} datasets {}.".format(str(i), each))
loaded_dataframes.append(temp_loaded_df)
loaded_filenames.append(each)
# use first input df as base df
output_df = copy.deepcopy(loaded_dataframes[0])
# drop_columns = []
# for col_name in ["productLabel", "qualityLabel"]:
# if col_name in output_df:
# drop_columns.append(col_name)
# if drop_columns:
# output_df = output_df.drop(drop_columns, axis=1)
possible_name = []
for each_col_name in output_df.columns:
if each_col_name not in ignore_column_names and "label" not in each_col_name.lower():
possible_name.append(each_col_name)
if len(possible_name) != 1:
_logger.error("get multiple possible name???")
_logger.error(str(output_df.columns))
source_precision = output_df['precision'].iloc[0]
output_df = output_df[["region_wikidata", "time", possible_name[0]]]
output_df = output_df.dropna()
output_df = output_df.drop_duplicates()
# transfer the datetime format to ensure format match
time_stringfy_format = time_granularity_value_to_stringfy_time_format(source_precision)
output_df['time'] = | pd.to_datetime(output_df['time']) | pandas.to_datetime |
from itertools import product
import numpy as np
import pytest
from pandas.core.dtypes.common import is_interval_dtype
import pandas as pd
import pandas._testing as tm
class TestSeriesConvertDtypes:
# The answerdict has keys that have 4 tuples, corresponding to the arguments
# infer_objects, convert_string, convert_integer, convert_boolean
# This allows all 16 possible combinations to be tested. Since common
# combinations expect the same answer, this provides an easy way to list
# all the possibilities
@pytest.mark.parametrize(
"data, maindtype, answerdict",
[
(
[1, 2, 3],
np.dtype("int32"),
{
((True, False), (True, False), (True,), (True, False)): "Int32",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"int32"
),
},
),
(
[1, 2, 3],
np.dtype("int64"),
{
((True, False), (True, False), (True,), (True, False)): "Int64",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"int64"
),
},
),
(
["x", "y", "z"],
np.dtype("O"),
{
(
(True, False),
(True,),
(True, False),
(True, False),
): pd.StringDtype(),
((True, False), (False,), (True, False), (True, False)): np.dtype(
"O"
),
},
),
(
[True, False, np.nan],
np.dtype("O"),
{
(
(True, False),
(True, False),
(True, False),
(True,),
): pd.BooleanDtype(),
((True, False), (True, False), (True, False), (False,)): np.dtype(
"O"
),
},
),
(
["h", "i", np.nan],
np.dtype("O"),
{
(
(True, False),
(True,),
(True, False),
(True, False),
): pd.StringDtype(),
((True, False), (False,), (True, False), (True, False)): np.dtype(
"O"
),
},
),
( # GH32117
["h", "i", 1],
np.dtype("O"),
{
(
(True, False),
(True, False),
(True, False),
(True, False),
): np.dtype("O"),
},
),
(
[10, np.nan, 20],
np.dtype("float"),
{
((True, False), (True, False), (True,), (True, False)): "Int64",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"float"
),
},
),
(
[np.nan, 100.5, 200],
np.dtype("float"),
{
(
(True, False),
(True, False),
(True, False),
(True, False),
): np.dtype("float"),
},
),
(
[3, 4, 5],
"Int8",
{((True, False), (True, False), (True, False), (True, False)): "Int8"},
),
(
[[1, 2], [3, 4], [5]],
None,
{
(
(True, False),
(True, False),
(True, False),
(True, False),
): np.dtype("O"),
},
),
(
[4, 5, 6],
np.dtype("uint32"),
{
((True, False), (True, False), (True,), (True, False)): "UInt32",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"uint32"
),
},
),
(
[-10, 12, 13],
np.dtype("i1"),
{
((True, False), (True, False), (True,), (True, False)): "Int8",
((True, False), (True, False), (False,), (True, False)): np.dtype(
"i1"
),
},
),
(
[1, 2.0],
object,
{
((True,), (True, False), (True,), (True, False)): "Int64",
((True,), (True, False), (False,), (True, False)): np.dtype(
"float"
),
((False,), (True, False), (True, False), (True, False)): np.dtype(
"object"
),
},
),
(
[1, 2.5],
object,
{
((True,), (True, False), (True, False), (True, False)): np.dtype(
"float"
),
((False,), (True, False), (True, False), (True, False)): np.dtype(
"object"
),
},
),
(
["a", "b"],
pd.CategoricalDtype(),
{
(
(True, False),
(True, False),
(True, False),
(True, False),
): pd.CategoricalDtype(),
},
),
(
| pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]) | pandas.to_datetime |
## SETUP ##
## dependencies
import pandas as pd
## logging
sys.stdout = open(snakemake.log[0], 'w')
sys.stderr = open(snakemake.log[0], 'w')
## input files
input_dict = {
'taxlist' : snakemake.input['taxlist'],
'slvmap' : snakemake.input['slvmap'],
'dups' : snakemake.input['dups'],
}
## output files
output_dict = {
'kraknames_S' : snakemake.output['kraknames_S'],
'kraknodes_S' : snakemake.output['kraknodes_S'],
'krakseq2tax_S' : snakemake.output['krakseq2tax_S'],
'kraknames_G' : snakemake.output['kraknames_G'],
'kraknodes_G' : snakemake.output['kraknodes_G'],
'krakseq2tax_G' : snakemake.output['krakseq2tax_G'],
'kronataxtab_S' : snakemake.output['kronataxtab_S'],
'kronataxlist_S' : snakemake.output['kronataxlist_S'],
'kronaseq2tax_S' : snakemake.output['kronaseq2tax_S'],
'kronataxtab_G' : snakemake.output['kronataxtab_G'],
'kronataxlist_G' : snakemake.output['kronataxlist_G'],
'kronaseq2tax_G' : snakemake.output['kronaseq2tax_G'],
'qiimetax_S' : snakemake.output['qiimetax_S'],
'qiimetax_G' : snakemake.output['qiimetax_G']
}
# input_dict = {
# 'slvmap' : "METADATA/Reference_Sequences/silva/slvmap.txt",
# 'taxlist' : "METADATA/Reference_Sequences/silva/taxlist.txt",
# 'dups' : "METADATA/Reference_Sequences/silva/reference.dups"
# }
# output_dict = {
# 'kraknames_S' : "METADATA/Reference_Sequences/silva/kraken2/species/taxonomy/names.dmp",
# 'kraknodes_S' : "METADATA/Reference_Sequences/silva/kraken2/species/taxonomy/nodes.dmp",
# 'krakseq2tax_S' : "METADATA/Reference_Sequences/silva/kraken2/species/seqid2taxid.map",
# 'kraknames_G' : "METADATA/Reference_Sequences/silva/kraken2/genus/taxonomy/names.dmp",
# 'kraknodes_G' : "METADATA/Reference_Sequences/silva/kraken2/genus/taxonomy/nodes.dmp",
# 'krakseq2tax_G' : "METADATA/Reference_Sequences/silva/kraken2/genus/seqid2taxid.map",
# 'kronataxtab_S' : "METADATA/Reference_Sequences/silva/krona/species/taxonomy.tab",
# 'kronataxtab_G' : "METADATA/Reference_Sequences/silva/krona/genus/taxonomy.tab",
# 'kronataxlist_S' : "METADATA/Reference_Sequences/silva/krona/species/taxlist.txt",
# 'kronataxlist_G' : "METADATA/Reference_Sequences/silva/krona/genus/taxlist.txt",
# 'kronaseq2tax_S' : "METADATA/Reference_Sequences/silva/krona/species/seqid2taxid.map",
# 'qiimetax_G' : "METADATA/Reference_Sequences/silva/qiime/genus/taxonomy.tsv",
# 'qiimetax_S' : "METADATA/Reference_Sequences/silva/qiime/species/taxonomy.tsv"
# }
# import os
# krak_dir_S = "/".join(output_dict['krakseq2tax_S'].split("/")[:-1])
# krak_dir_G = "/".join(output_dict['krakseq2tax_G'].split("/")[:-1])
# krona_dir_S = "/".join(output_dict['kronataxtab_S'].split("/")[:-1])
# krona_dir_G = "/".join(output_dict['kronataxtab_G'].split("/")[:-1])
# qiime_dir_S = "/".join(output_dict['qiimetax_S'].split("/")[:-1])
# qiime_dir_G = "/".join(output_dict['qiimetax_G'].split("/")[:-1])
# os.makedirs( krak_dir_S + "/taxonomy" )
# os.makedirs( krak_dir_G + "/taxonomy" )
# os.makedirs( krona_dir_S )
# os.makedirs( krona_dir_G )
# os.makedirs( qiime_dir_S )
# os.makedirs( qiime_dir_G )
## LOAD DATA ##
## load taxID list
df_taxlist = pd.read_csv(input_dict['taxlist'], sep='\t',
names=['pathname','taxID','rank','remark','release'],
usecols=['pathname','taxID','rank'])
## load SILVA taxIDs w/o species classification
df_accmap = pd.read_csv(input_dict['slvmap'], sep='\t',
skiprows=1,
names=['accID','start','end','path','name','taxID'],
usecols=['accID','start','end','name','taxID'])
## PER ACCID ##
## the concept of this paragraph was adapted from <NAME>, Little Rock, AR, <EMAIL>
## add path to accmap
df_accmap = pd.merge(df_accmap, df_taxlist.loc[:,['pathname','taxID']], how='left', on='taxID').rename(columns={'pathname' : 'path', 'taxID' : 'taxID_old'})
## get long accession ID with start and stop
df_accmap['accIDstartend'] = df_accmap['accID'].map(str) + "." + df_accmap['start'].map(str) + "." + df_accmap['end'].map(str)
df_accmap.drop(['accID','start','end'], axis=1, inplace=True)
## set accIDstartend as index
df_accmap.set_index('accIDstartend',inplace=True)
## replace any unwated characters in species name and split the name at widespace
name_fix = df_accmap['name'].str.replace('[^\w\-\[\]\(\)\\/. ]', '').str.split()
#!# concatenate the first two words (or less) of species name back together
df_accmap['name'] = name_fix.str[0].str.cat( name_fix.str[1], sep=" ", na_rep='' ).str.strip()
## get data frame with names from path
df_pathname = df_accmap['path'].str.split(';', expand=True)
#!# remove any unwanted columns
df_pathname.drop(columns=range(7,len(df_pathname.columns)),inplace=True)
## add species name to data frame of names
df_pathname[6] = df_accmap['name']
## replace None with empty string
df_pathname.fillna("", inplace=True)
## add dummy string to other missing taxa
df_pathname.loc[df_pathname[1]=="",1] = "unknown_phylum"
df_pathname.loc[df_pathname[2]=="",2] = "unknown_class"
df_pathname.loc[df_pathname[3]=="",3] = "unknown_order"
df_pathname.loc[df_pathname[4]=="",4] = "unknown_family"
df_pathname.loc[df_pathname[5]=="",5] = "unknown_genus"
## add semicolon to names for path formation
df_pathname = df_pathname + ";"
## combine names to paths
for n in range(1,len(df_pathname.columns)):
df_pathname[n] = df_pathname[n-1] + df_pathname[n]
## PER TAXID ##
## set pathname to index of taxlist
df_taxlist.set_index('pathname',inplace=True)
## loop over ranks to:
## create new taxIDs for new taxa
## add new taxa to taxlist (including species)
for n in range(0,len(df_pathname.columns)):
## get unknown taxa from path (should only be the "unknown_" ones we just created and all species taxa)
missing_pathname = set(df_pathname[n]) - set(df_taxlist.index)
## skip this rank if no missing taxa are detected
if missing_pathname == set():
continue
## create new taxIDs above the current max taxID
new_taxID = list(range( df_taxlist['taxID'].max() + 1, df_taxlist['taxID'].max() + len(missing_pathname) + 1 ))
## rank will be set to no rank, except for species
current_rank = "species" if (n == 6) else "no rank"
## add new taxa to taxlist
df_taxlist = df_taxlist.append( pd.DataFrame({ 'taxID' : new_taxID, 'rank' : current_rank }, index = pd.Index( missing_pathname, name = 'pathname' )) )
## add target taxIDs to taxlist (temporarily sets targetID of domains to their taxID, see next step)
df_taxlist['targetID'] = df_taxlist.loc[df_taxlist.index.str.rsplit(";",n=2).str[0] + ";",'taxID'].to_list()
## set target of domains to root (taxID 1)
df_taxlist.loc[ df_taxlist['taxID'] == df_taxlist['targetID'], 'targetID' ] = 1
## add new taxIDs to accmap
df_accmap['taxID'] = df_taxlist.loc[df_pathname[6],'taxID'].to_list()
## set pathname back to column of taxlist
df_taxlist.reset_index(inplace=True)
## add depth to taxlist
df_taxlist['depth'] = df_taxlist['pathname'].str.split(";").str.len()-1
## add name to taxlist
df_taxlist['name'] = df_taxlist['pathname'].str.split(";").str[-2]
## DUPLICATES ##
## set pathname to index of taxlist
df_taxlist.set_index('pathname',inplace=True)
## add column to mark duplicates for removal
df_accmap['dups_del'] = True
## open duplication taxids for reading
dups_r = open(input_dict['dups'], "r")
## loop over lines with duplicate accIDs
for dups in dups_r:
## convert to array
dups_ids = dups.split(";")
del dups_ids[-1]
## keep first (or only) accID
df_accmap.loc[dups_ids[0],'dups_del'] = False
## if all accIDs come from same taxID (or is not a duplicate at all), continue with next accID
if df_accmap.loc[dups_ids,'taxID'].unique().size == 1:
continue
## otherwise, iterate over higher ranks to find lowest common taxon
for rnk in reversed(range(0,6)): ## not 7, since species identity was covered above
## get current "unique" taxon
rnk_tax = df_pathname.loc[dups_ids,rnk].unique()
## if current taxon is unique, set taxID of retained accID to the higher rank taxID
if rnk_tax.size == 1:
df_accmap.loc[dups_ids[0],'taxID'] = df_taxlist.loc[rnk_tax,'taxID'][0]
break
## closing duplication taxids
dups_r.close()
## remove duplicate accIDs
df_accmap = df_accmap.loc[~df_accmap['dups_del'],:] ## Note: '~' means not
## also remove from df_pathname
df_pathname = df_pathname.loc[df_accmap.index,:]
## check for species taxa whithout associated accIDs after deduplication
df_taxlist = df_taxlist.loc[~((~df_taxlist.loc[:,'taxID'].isin( df_accmap.loc[:,'taxID'] )) & (df_taxlist.loc[:,'rank'] == "species")), : ]
## set accIDstartend back to column of accmap
df_accmap.reset_index(inplace=True)
## set pathname back to column of taxlist
df_taxlist.reset_index(inplace=True)
## EXPORT ACCID-TAXID ASSOCIATION ##
## write sequence taxon association FOR HIGHER TAXA
df_tmp = df_accmap.loc[:,['accIDstartend','taxID_old']] ## use original association to higher taxa
df_tmp.to_csv(
## save
output_dict['krakseq2tax_G'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
df_tmp.to_csv(
## save
output_dict['kronaseq2tax_G'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
del df_tmp
## write sequence taxon association INCLUDING SPECIES
df_tmp = df_accmap.loc[:,['accIDstartend','taxID']] ## use new taxIDs specific for species
df_tmp.to_csv(
## save
output_dict['krakseq2tax_S'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
df_tmp.to_csv(
## save
output_dict['kronaseq2tax_S'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
del df_tmp
## EXPORT TAXLIST ##
## write taxlist-like file (without last columns) FOR HIGHER TAXA
pd.concat([
## include root
pd.DataFrame([[";",1,"root"]],columns=['pathname','taxID','rank']),
## include higher ranks
df_taxlist.loc[ df_taxlist['rank']!="species", ['pathname','taxID','rank'] ] ## exclude species
]).to_csv(
## save
output_dict['kronataxlist_G'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## write taxlist-like file (without last columns) INCLUDING SPECIES
pd.concat([
## include root
pd.DataFrame([[";",1,"root"]],columns=['pathname','taxID','rank']),
## include higher ranks and species
df_taxlist.loc[:,['pathname','taxID','rank']]
]).to_csv(
## save
output_dict['kronataxlist_S'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## EXPORT KRONA ##
## write krona silva reference taxonomy FOR HIGHER TAXA
pd.concat([
## include root
pd.DataFrame([[1,0,1,"no rank","root"]],columns=['taxID','depth','targetID','rank','name']),
## include higher ranks
df_taxlist.loc[ df_taxlist['rank']!="species", ['taxID','depth','targetID','rank','name'] ]
]).to_csv(
## save
output_dict['kronataxtab_G'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## write krona silva reference taxonomy INCLUDING SPECIES
pd.concat([
## include root
pd.DataFrame([[1,0,1,"no rank","root"]],columns=['taxID','depth','targetID','rank','name']),
## include higher ranks and species
df_taxlist.loc[:,['taxID','depth','targetID','rank','name']]
]).to_csv(
## save
output_dict['kronataxtab_S'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## EXPORT KRAKEN2 ##
## kraken formats are strange!? Add missing columns
df_taxlist = df_taxlist.assign(
dummy1 = "|",
dummy2 = "-",
dummy3 = "scientific name"
)
## write kraken2 taxonomy names FOR HIGHER TAXA
pd.concat([
## include root
pd.DataFrame([[1,"|","root","|","-","|","scientific name","|"]],columns=['taxID','dummy1','name','dummy1','dummy2','dummy1','dummy3','dummy1']),
## include higher ranks
df_taxlist.loc[ df_taxlist['rank']!="species", ['taxID','dummy1','name','dummy1','dummy2','dummy1','dummy3','dummy1'] ]
]).to_csv(
## save
output_dict['kraknames_G'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## write kraken2 taxonomy names INCLUDING SPECIES
pd.concat([
## include root
pd.DataFrame([[1,"|","root","|","-","|","scientific name","|"]],columns=['taxID','dummy1','name','dummy1','dummy2','dummy1','dummy3','dummy1']),
## include higher ranks and species
df_taxlist.loc[:,['taxID','dummy1','name','dummy1','dummy2','dummy1','dummy3','dummy1']]
]).to_csv(
## save
output_dict['kraknames_S'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## write kraken2 taxonomy nodes FOR HIGHER TAXA
pd.concat([
## include root
pd.DataFrame([[1,"|",1,"|","no rank","|","-","|"]],columns=['taxID','dummy1','targetID','dummy1','rank','dummy1','dummy2','dummy1']),
## include higher ranks
df_taxlist.loc[ df_taxlist['rank']!="species", ['taxID','dummy1','targetID','dummy1','rank','dummy1','dummy2','dummy1'] ]
]).to_csv(
## save
output_dict['kraknodes_G'], mode='w', sep='\t', header=False, index=False, quoting=0, float_format="%g"
)
## write kraken2 taxonomy nodes INCLUDING SPECIES
pd.concat([
## include root
| pd.DataFrame([[1,"|",1,"|","no rank","|","-","|"]],columns=['taxID','dummy1','targetID','dummy1','rank','dummy1','dummy2','dummy1']) | pandas.DataFrame |
"""Module for data preprocessing.
You can consolidate data with `data_consolidation` and optimize it for example for machine learning models.
Then you can preprocess the data to be able to achieve even better results.
There are many small functions that you can use separately, but there is main function `preprocess_data` that
call all the functions based on input params for you. For inverse preprocessing use `preprocess_data_inverse`
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Generic, Any, cast, Union
from dataclasses import dataclass, astuple
import warnings
import importlib.util
from typing_extensions import Literal
import numpy as np
import pandas as pd
import mylogging
from .custom_types import DataFrameOrArrayGeneric
if TYPE_CHECKING:
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
ScalerType = Union[MinMaxScaler, RobustScaler, StandardScaler]
# Lazy load
# import scipy.signal
# import scipy.stats
# from sklearn import preprocessing
def data_consolidation(
data: pd.DataFrame | np.ndarray,
predicted_column: int | str = None,
other_columns: int = 1,
datalength: int = 0,
datetime_column: str | int | None = "",
resample_freq: str | None = None,
resample_function: Literal[None, "sum", "mean"] = "sum",
embedding: Literal[None, "label", "one-hot"] = "label",
unique_threshold: float = 0.6,
remove_nans_threshold: float = 0.85,
remove_nans_or_replace: str | float = "interpolate",
dtype: str | np.dtype | pd.DataFrame | list = "float32",
) -> pd.DataFrame:
"""Transform input data in various formats and shapes into data in defined shape optimal for machine learning models, that other functions rely on.
If you have data in other format than dataframe, use `load_data` first.
Note:
This function return only numeric data. All string columns will be removed (use embedding if you need)
Predicted column is moved on index 0 !!!
Args:
data (pd.DataFrame | np.ndarray): Input data in well standardized format.
predicted_column (int | str, optional): Predicted column name or index. Move on first column and test if number.
If None, it's ignored. Defaults to None.
other_columns (int, optional): Whether use other columns or only predicted one. Defaults to 1.
datalength (int, optional): Data length after resampling. Defaults to 0.
datetime_column (str | int | None, optional): Name or index of datetime column. Defaults to None.
resample_freq (str | None, optional): Frequency of resampled data. Defaults to None.
resample_function (Literal[None, 'sum', 'mean'], optional): 'sum' or 'mean'. Whether sum resampled columns, or use average. Defaults to 'sum'.
embedding(Literal[None, "label", "one-hot"], optional): 'label' or 'one-hot'. Categorical encoding. Create numbers from strings. 'label' give each
category (unique string) concrete number. Result will have same number of columns. 'one-hot' create for every
category new column. Only columns, where are strings repeating (unique_threshold) will be used. Defaults to 'label'.
unique_threshold(float, optional): Remove string columns, that have to many categories. E.g 0.9 define, that if
column contain more that 90% of NOT unique values it's deleted. Min is 0, max is 1. It will remove ids,
hashes etc. Defaults to 0.6.
remove_nans_threshold (float, optional): From 0 to 1. Require that many non-nan numeric values to not be deleted.
E.G if value is 0.9 with column with 10 values, 90% must be numeric that implies max 1 np.nan can be presented,
otherwise column will be deleted. Defaults to 0.85.
remove_nans_or_replace (str | float, optional): 'interpolate', 'remove', 'neighbor', 'mean' or value. Remove or replace
rest nan values. If you want to keep nan, setup value to np.nan. If you want to use concrete value, use float or
int type. Defaults to 'interpolate'.
dtype (str | np.dtype | pd.DataFrame | list, optional): Output dtype. For possible inputs check pandas function `astype`. Defaults to 'float32'.
Raises:
KeyError, TypeError: May happen if wrong params. E.g. if predicted column name not found in dataframe.
Returns:
np.ndarray, pd.DataFrame, str: Data in standardized form. Data array for prediction - predicted column on index 0,
and column for ploting as pandas dataframe. Data has the same type as input.
"""
if not isinstance(data, pd.DataFrame):
try:
data = pd.DataFrame(data)
except Exception as err:
raise (
RuntimeError(
mylogging.return_str(
"Check configuration file for supported formats. It can be path of file (csv, json, parquet...) or it "
"can be data in python format (numpy array, pandas dataframe or series, dict or list, ). It can also be other "
"format, but then it have to work with pd.DataFrame(your_data)."
f"\n\n Detailed error: \n\n {err}",
caption="Data load failed",
)
)
)
data_for_predictions_df = data.copy()
if data_for_predictions_df.shape[0] < data_for_predictions_df.shape[1]:
mylogging.info(
"Input data must be in shape (n_samples, n_features) that means (rows, columns) Your shape is "
f" {data.shape}. It's unusual to have more features than samples. Probably wrong shape.",
caption="Data transposed warning!!!",
)
data_for_predictions_df = data_for_predictions_df.T
if predicted_column or predicted_column == 0:
if isinstance(predicted_column, str):
predicted_column_name = predicted_column
if predicted_column_name not in data_for_predictions_df.columns:
raise KeyError(
mylogging.return_str(
f"Predicted column name - '{predicted_column}' not found in data. Change 'predicted_column' in config"
f". Available columns: {list(data_for_predictions_df.columns)}",
caption="Column not found error",
)
)
elif isinstance(predicted_column, int) and isinstance(
data_for_predictions_df.columns[predicted_column], str
):
predicted_column_name = data_for_predictions_df.columns[predicted_column]
else:
predicted_column_name = "Predicted column"
data_for_predictions_df.rename(
columns={data_for_predictions_df.columns[predicted_column]: predicted_column_name},
inplace=True,
)
# Make predicted column index 0
data_for_predictions_df.insert(
0, predicted_column_name, data_for_predictions_df.pop(predicted_column_name)
)
else:
predicted_column_name = None
reset_index = False
if datetime_column not in [None, False, ""]:
try:
if isinstance(datetime_column, str):
data_for_predictions_df.set_index(datetime_column, drop=True, inplace=True)
else:
data_for_predictions_df.set_index(
data_for_predictions_df.columns[datetime_column], drop=True, inplace=True,
)
data_for_predictions_df.index = | pd.to_datetime(data_for_predictions_df.index) | pandas.to_datetime |
import unittest
import data_profiler as dp
import numpy as np
import pandas as pd
class TestUnstructuredDataLabeler(unittest.TestCase):
# simple test for new default TF model + predict()
def test_fit_with_default_model(self):
data = [
['this is my test sentence.',
{'entities': [
(5 , 7, 'ADDRESS'),
(11, 20, 'INTEGER_BIG'),
(20, 22, 'ADDRESS'),
(22, 24, 'INTEGER_BIG')]}],
['How nice.',
{'entities': [
(0, 2, 'ADDRESS'),
(4, 5, 'INTEGER_BIG'),
(6, 8, 'INTEGER_BIG')]}]
]
data = | pd.DataFrame(data * 50) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from mars import opcodes
from mars.config import options, option_context
from mars.core import OutputType, tile
from mars.core.operand import OperandStage
from mars.dataframe import eval as mars_eval, cut, to_numeric
from mars.dataframe.base import to_gpu, to_cpu, astype
from mars.dataframe.core import DATAFRAME_TYPE, SERIES_TYPE, SERIES_CHUNK_TYPE, \
INDEX_TYPE, CATEGORICAL_TYPE, CATEGORICAL_CHUNK_TYPE
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
from mars.dataframe.datasource.series import from_pandas as from_pandas_series
from mars.dataframe.datasource.index import from_pandas as from_pandas_index
from mars.tensor.core import TENSOR_TYPE
def test_to_gpu():
# test dataframe
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
cdf = to_gpu(df)
assert df.index_value == cdf.index_value
assert df.columns_value == cdf.columns_value
assert cdf.op.gpu is True
pd.testing.assert_series_equal(df.dtypes, cdf.dtypes)
df, cdf = tile(df, cdf)
assert df.nsplits == cdf.nsplits
assert df.chunks[0].index_value == cdf.chunks[0].index_value
assert df.chunks[0].columns_value == cdf.chunks[0].columns_value
assert cdf.chunks[0].op.gpu is True
pd.testing.assert_series_equal(df.chunks[0].dtypes, cdf.chunks[0].dtypes)
assert cdf is to_gpu(cdf)
# test series
sdata = data.iloc[:, 0]
series = from_pandas_series(sdata)
cseries = to_gpu(series)
assert series.index_value == cseries.index_value
assert cseries.op.gpu is True
series, cseries = tile(series, cseries)
assert series.nsplits == cseries.nsplits
assert series.chunks[0].index_value == cseries.chunks[0].index_value
assert cseries.chunks[0].op.gpu is True
assert cseries is to_gpu(cseries)
def test_to_cpu():
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
cdf = to_gpu(df)
df2 = to_cpu(cdf)
assert df.index_value == df2.index_value
assert df.columns_value == df2.columns_value
assert df2.op.gpu is False
pd.testing.assert_series_equal(df.dtypes, df2.dtypes)
df, df2 = tile(df, df2)
assert df.nsplits == df2.nsplits
assert df.chunks[0].index_value == df2.chunks[0].index_value
assert df.chunks[0].columns_value == df2.chunks[0].columns_value
assert df2.chunks[0].op.gpu is False
pd.testing.assert_series_equal(df.chunks[0].dtypes, df2.chunks[0].dtypes)
assert df2 is to_cpu(df2)
def test_rechunk():
raw = pd.DataFrame(np.random.rand(10, 10))
df = from_pandas_df(raw, chunk_size=3)
df2 = tile(df.rechunk(4))
assert df2.shape == (10, 10)
assert len(df2.chunks) == 9
assert df2.chunks[0].shape == (4, 4)
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), pd.RangeIndex(4))
pd.testing.assert_index_equal(df2.chunks[0].columns_value.to_pandas(), pd.RangeIndex(4))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, raw.dtypes[:4])
assert df2.chunks[2].shape == (4, 2)
pd.testing.assert_index_equal(df2.chunks[2].index_value.to_pandas(), pd.RangeIndex(4))
pd.testing.assert_index_equal(df2.chunks[2].columns_value.to_pandas(), pd.RangeIndex(8, 10))
pd.testing.assert_series_equal(df2.chunks[2].dtypes, raw.dtypes[-2:])
assert df2.chunks[-1].shape == (2, 2)
pd.testing.assert_index_equal(df2.chunks[-1].index_value.to_pandas(), pd.RangeIndex(8, 10))
pd.testing.assert_index_equal(df2.chunks[-1].columns_value.to_pandas(), pd.RangeIndex(8, 10))
pd.testing.assert_series_equal(df2.chunks[-1].dtypes, raw.dtypes[-2:])
for c in df2.chunks:
assert c.shape[1] == len(c.dtypes)
assert len(c.columns_value.to_pandas()) == len(c.dtypes)
columns = [np.random.bytes(10) for _ in range(10)]
index = np.random.randint(-100, 100, size=(4,))
raw = pd.DataFrame(np.random.rand(4, 10), index=index, columns=columns)
df = from_pandas_df(raw, chunk_size=3)
df2 = tile(df.rechunk(6))
assert df2.shape == (4, 10)
assert len(df2.chunks) == 2
assert df2.chunks[0].shape == (4, 6)
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), df.index_value.to_pandas())
pd.testing.assert_index_equal(df2.chunks[0].columns_value.to_pandas(), pd.Index(columns[:6]))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, raw.dtypes[:6])
assert df2.chunks[1].shape == (4, 4)
pd.testing.assert_index_equal(df2.chunks[1].index_value.to_pandas(), df.index_value.to_pandas())
pd.testing.assert_index_equal(df2.chunks[1].columns_value.to_pandas(), pd.Index(columns[6:]))
pd.testing.assert_series_equal(df2.chunks[1].dtypes, raw.dtypes[-4:])
for c in df2.chunks:
assert c.shape[1] == len(c.dtypes)
assert len(c.columns_value.to_pandas()) == len(c.dtypes)
# test Series rechunk
series = from_pandas_series(pd.Series(np.random.rand(10,)), chunk_size=3)
series2 = tile(series.rechunk(4))
assert series2.shape == (10,)
assert len(series2.chunks) == 3
pd.testing.assert_index_equal(series2.index_value.to_pandas(), pd.RangeIndex(10))
assert series2.chunk_shape == (3,)
assert series2.nsplits == ((4, 4, 2), )
assert series2.chunks[0].shape == (4,)
pd.testing.assert_index_equal(series2.chunks[0].index_value.to_pandas(), pd.RangeIndex(4))
assert series2.chunks[1].shape == (4,)
pd.testing.assert_index_equal(series2.chunks[1].index_value.to_pandas(), pd.RangeIndex(4, 8))
assert series2.chunks[2].shape == (2,)
pd.testing.assert_index_equal(series2.chunks[2].index_value.to_pandas(), pd.RangeIndex(8, 10))
series2 = tile(series.rechunk(1))
assert series2.shape == (10,)
assert len(series2.chunks) == 10
pd.testing.assert_index_equal(series2.index_value.to_pandas(), pd.RangeIndex(10))
assert series2.chunk_shape == (10,)
assert series2.nsplits == ((1,) * 10, )
assert series2.chunks[0].shape == (1,)
pd.testing.assert_index_equal(series2.chunks[0].index_value.to_pandas(), pd.RangeIndex(1))
# no need to rechunk
series2 = tile(series.rechunk(3))
series = tile(series)
assert series2.chunk_shape == series.chunk_shape
assert series2.nsplits == series.nsplits
def test_data_frame_apply():
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
def df_func_with_err(v):
assert len(v) > 2
return v.sort_values()
with pytest.raises(TypeError):
df.apply(df_func_with_err)
r = df.apply(df_func_with_err, output_type='dataframe',
dtypes=df_raw.dtypes)
assert r.shape == (np.nan, df.shape[-1])
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.dataframe
assert r.op.elementwise is False
r = df.apply('ffill')
assert r.op._op_type_ == opcodes.FILL_NA
r = tile(df.apply(np.sqrt))
assert all(v == np.dtype('float64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.dataframe
assert r.op.elementwise is True
r = tile(df.apply(lambda x: pd.Series([1, 2])))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1])
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(np.sum, axis='index'))
assert np.dtype('int64') == r.dtype
assert r.shape == (df.shape[1],)
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[0],)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(np.sum, axis='columns'))
assert np.dtype('int64') == r.dtype
assert r.shape == (df.shape[0],)
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[1],)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: [1, 2], axis=1, result_type='expand'))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: list(range(10)), axis=1, result_type='reduce'))
assert np.dtype('object') == r.dtype
assert r.shape == (df.shape[0],)
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[1],)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
r = tile(df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast'))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
assert r.op.elementwise is False
finally:
options.chunk_store_limit = old_chunk_store_limit
raw = pd.DataFrame({'a': [np.array([1, 2, 3]), np.array([4, 5, 6])]})
df = from_pandas_df(raw)
df2 = df.apply(lambda x: x['a'].astype(pd.Series), axis=1,
output_type='dataframe', dtypes=pd.Series([np.dtype(float)] * 3))
assert df2.ndim == 2
def test_series_apply():
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = tile(series.apply('add', args=(1,)))
assert r.op._op_type_ == opcodes.ADD
r = tile(series.apply(np.sqrt))
assert np.dtype('float64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
r = tile(series.apply('sqrt'))
assert np.dtype('float64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
r = tile(series.apply(lambda x: [x, x + 1], convert_dtype=False))
assert np.dtype('object') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.APPLY
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
r = series.apply(np.sum)
assert r.dtype == np.dtype(object)
r = series.apply(lambda x: pd.Series([1]), output_type='dataframe')
expected = s_raw2.apply(lambda x: pd.Series([1]))
pd.testing.assert_series_equal(r.dtypes, expected.dtypes)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
assert r.ndim == 2
pd.testing.assert_series_equal(r.dtypes, dtypes)
assert r.shape == (2, 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes, index=pd.RangeIndex(2))
assert r.ndim == 2
pd.testing.assert_series_equal(r.dtypes, dtypes)
assert r.shape == (2, 3)
with pytest.raises(AttributeError, match='abc'):
series.apply('abc')
with pytest.raises(TypeError):
# dtypes not provided
series.apply(lambda x: x.tolist(), output_type='dataframe')
def test_transform():
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
df = from_pandas_df(df_raw, chunk_size=5)
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
# test transform with infer failure
def transform_df_with_err(v):
assert len(v) > 2
return v.sort_values()
with pytest.raises(TypeError):
df.transform(transform_df_with_err)
r = tile(df.transform(transform_df_with_err, dtypes=df_raw.dtypes))
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0])
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# test transform scenarios on data frames
r = tile(df.transform(lambda x: list(range(len(x)))))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0])
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: list(range(len(x))), axis=1))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], df.shape[1])
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(['cumsum', 'cummax', lambda x: x + 1]))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], df.shape[1] * 3)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0] * 3)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform({'A': 'cumsum', 'D': ['cumsum', 'cummax'], 'F': lambda x: x + 1}))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], 4)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# test agg scenarios on series
r = tile(df.transform(lambda x: x.iloc[:-1], _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1])
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (2, np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = tile(df.transform(fn_list, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1] * 2)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 2)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: x.sum(), _call_agg=True))
assert r.dtype == np.dtype('int64')
assert r.shape == (df.shape[1],)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[0],)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
fn_dict = {
'A': rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
'D': [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)],
'F': lambda x: x.iloc[:-1].reset_index(drop=True),
}
r = tile(df.transform(fn_dict, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, 4)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# SERIES CASES
# test transform scenarios on series
r = tile(series.transform(lambda x: x + 1))
assert np.dtype('int64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_string_method():
s = pd.Series(['a', 'b', 'c'], name='s')
series = from_pandas_series(s, chunk_size=2)
with pytest.raises(AttributeError):
_ = series.str.non_exist
r = series.str.contains('c')
assert r.dtype == np.bool_
assert r.name == s.name
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
assert r.shape == s.shape
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.bool_
assert c.name == s.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
r = series.str.split(',', expand=True, n=1)
assert r.op.output_types[0] == OutputType.dataframe
assert r.shape == (3, 2)
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
pd.testing.assert_index_equal(r.columns_value.to_pandas(), pd.RangeIndex(2))
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i, 0)
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
pd.testing.assert_index_equal(c.columns_value.to_pandas(), | pd.RangeIndex(2) | pandas.RangeIndex |
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Please Note that, this version is a hack, it's super hacky, never call this one for normal use
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
import numpy as np
from mpi4py import MPI
import sys
sys.path.insert(0, '../compress_gradient')
from compress_gradient import compress
from utils import err_simulation
LAYER_DIGITS= int(1e+3)
TIMEOUT_THRESHOLD_=10
# only use for maj vote
#SEED_=428
#torch.manual_seed(SEED_)
def generate_tag(layer_tag, step_token):
'''
Tag component [current-step-token (which help to recogize stale gradient)
+layer-tag]
we only limit the digits for layer tag here since step token can be
extremely large e.g. 10k steps
:param layer_tag
:param step token
:return:
'''
tag = step_token * LAYER_DIGITS \
+ layer_tag
tag = int(tag)
return tag
class BasicBlockSplit(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockSplit, self).__init__()
self.full_modules = []
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.full_modules.append(self.conv1)
self.bn1 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.full_modules.append(self.conv2)
self.bn2 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn2)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.full_modules.append(self.shortcut[0])
self.full_modules.append(self.shortcut[1])
def forward(self, x, input_list, output_list):
'''
the input_list and output_list here is similar to input/output in ResNet class
'''
# we skip the detach and append operation on the very first x here
# since that's done outside of this function
out = self.conv1(x)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn1(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.conv2(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn2(out)
output_list.append(out)
# TODO(hwang): figure out if this part also need hack
out += self.shortcut(x)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
return out, input_list, output_list
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.full_modules = []
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.full_modules.append(self.conv1)
self.bn1 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.full_modules.append(self.conv2)
self.bn2 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn2)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.full_modules.append(self.conv3)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.full_modules.append(self.bn3)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.full_modules.append(self.shortcut[0])
self.full_modules.append(self.shortcut[1])
def forward(self, x, input_list, output_list):
# we skip the detach operation on the very first x here since that's done outside of this function
#out = F.relu(self.bn1(self.conv1(x)))
#out = F.relu(self.bn2(self.conv2(out)))
#out = self.bn3(self.conv3(out))
#out += self.shortcut(x)
#out = F.relu(out)
#return out
out = self.conv1(x)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn1(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.conv2(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn2(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.conv3(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn3(out)
output_list.append(out)
# TODO(hwang): figure out if this part also need hack
out += self.shortcut(x)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
return out, input_list, output_list
class ResNetSplit(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNetSplit, self).__init__()
global TIMEOUT_THRESHOLD_
self.in_planes = 64
self.full_modules = []
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.full_modules.append(self.conv1)
self.bn1 = nn.BatchNorm2d(64)
self.full_modules.append(self.bn1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.full_modules.append(self.linear)
self.relu = nn.ReLU()
self.avg_pool2d = nn.AvgPool2d(kernel_size=4)
self._init_channel_index = self.count_channel_index()
@property
def fetch_init_channel_index(self):
return self._init_channel_index
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
block_layers = block(self.in_planes, planes, stride)
layers.append(block_layers)
for m in block_layers.full_modules:
self.full_modules.append(m)
self.in_planes = planes * block.expansion
layers_split = nn.ModuleList(layers)
return layers_split
def forward(self, x):
# use these containers to save intermediate variables
self.output = []
self.input = []
# start the forward process right here implement the following logic to every intermediate var:
# detach from previous history
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.conv1(x)
# add to list of outputs
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.bn1(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.relu(x)
self.output.append(x)
# start to handle blocks
for layer in self.layer1:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
for layer in self.layer2:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
for layer in self.layer3:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
for layer in self.layer4:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.avg_pool2d(x)
self.output.append(x)
x = x.view(x.size(0), -1)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.linear(x)
self.output.append(x)
return x
def count_channel_index(self):
channel_index_ = 0
for k, v in self.state_dict().items():
if "running_mean" in k or "running_var" in k:
continue
else:
channel_index_ += 1
return channel_index_
def backward(self, g, communicator, req_send_check, cur_step):
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index-2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
# send layer only after the last layer is received
req_send_check[-1].wait()
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
# since in resnet we do not use bias weight for conv layer
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad_weight):
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
continue
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
# handle the remaining gradients here to send to parameter server
while channel_index >= 0:
req_send_check[-1].wait()
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
return req_send_check
def backward_normal(self, g, communicator, req_send_check, cur_step, fail_workers, err_mode, compress_grad):
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index-2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
# send layer only after the last layer is received
req_send_check[-1].wait()
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
# since in resnet we do not use bias weight for conv layer
if | pd.isnull(self.full_modules[mod_avail_index].bias) | pandas.isnull |
import sys
from comet_ml import Experiment
from sklearn.metrics import classification_report
from datetime import datetime
from typing import Dict
import pandas as pd
import pickle
import json
import os
import module_results
#____________________________________________________________________________________________________________________________________
DATA_DIR_KEY = 'data'
MODEL_DIR_KEY = 'model'
CV_DIR_KEY = 'cv'
PLOTS_DIR_KEY = 'plots'
PICKLE_DIR_KEY = 'pickle'
JSON_DIR_KEY = 'json'
TEST_DIR_KEY = 'test'
#____________________________________________________________________________________________________________________________________
def clear_vars_packages() -> None:
"""Clear all loaded variables"""
for name in dir():
if not name.startswith('_'):
del globals()[name]
#____________________________________________________________________________________________________________________________________
def get_workflow_dirs(which_pc: str = 'mine',
data_dir: str = None,
model_id: str = None,
num_iter: int = 0) -> Dict:
""" Create a dictionary of directories which are needed for the ML workflow.
Keys: 'data' - the data dir.
'model' - the model dir.
sub: 'cv' - the cross validation results.
sub: 'plots' - the plots for the model.
sub: 'pickle' - the serializing directory.
:param which_pc: the pc used
:param data_dir: the data directory name
:param model_id: model short discription i.e. ID
:param num_iter: number of iterations of the hyparparam search
:return: dict with the keys described and the appropriate dirs as their values
"""
# get data and project root dir based on pc
if which_pc.startswith('pano'):
root_dir = f"C:\\Users\\Inno\\Documents\\IV\\GLYCO"
data_dir = f"C:\\Users\\Inno\\Documents\\IV\\PREPROCESSED_DATA\\{data_dir}"
else:
root_dir = f"C:\\Users\\ilija\\Pycharm Projects\\GLYCO"
data_dir = f"{root_dir}\\_DATA_PREPROCESSED\\{data_dir}"
# define the model directory (if less than 10 iter it is only a debugging/test try)
if num_iter < 10: model_id ='TRY_____' + model_id
model_dir = f'{root_dir}\\RESULTS\\September\\{str(datetime.now().date())}\\' \
f'{model_id}_{num_iter}_{str(datetime.now().hour)}h{str(datetime.now().minute)}m'
# create the workflow directories dictionary with global constants
# that will be used throughout the module
workflow_dirs_dict = {
DATA_DIR_KEY: data_dir,
MODEL_DIR_KEY: model_dir,
CV_DIR_KEY: f'{model_dir}\\CV',
PLOTS_DIR_KEY: f'{model_dir}\\Plots',
PICKLE_DIR_KEY: f'{model_dir}\\Pickled_Models',
JSON_DIR_KEY: f'{model_dir}\\JSON',
TEST_DIR_KEY: f'{model_dir}\\Test',
}
# if debugging or prototyping, delete old dir with same name
# that is created not more than a minute ago
if num_iter < 10 and os.path.exists(model_dir):
os.remove(model_dir)
# create the directories
os.makedirs(model_dir)
print(f'Created dir: {model_dir}')
os.makedirs(workflow_dirs_dict[CV_DIR_KEY])
print(f'Created dir: {workflow_dirs_dict[CV_DIR_KEY]}')
os.makedirs(workflow_dirs_dict[PLOTS_DIR_KEY])
print(f'Created dir: {workflow_dirs_dict[PLOTS_DIR_KEY]}')
os.makedirs(workflow_dirs_dict[PICKLE_DIR_KEY])
print(f'Created dir: {workflow_dirs_dict[PICKLE_DIR_KEY]}')
os.makedirs(workflow_dirs_dict[JSON_DIR_KEY])
print(f'Created dir: {workflow_dirs_dict[JSON_DIR_KEY]}')
os.makedirs(workflow_dirs_dict[TEST_DIR_KEY])
print(f'Created dir: {workflow_dirs_dict[TEST_DIR_KEY]}')
return workflow_dirs_dict
#____________________________________________________________________________________________________________________________________
def train_and_save_sklearn(hyperparameters_optimizer,
train_test_sets:Dict = None,
submodel_id_dict: Dict = None,
workflow_dirs: Dict = None,
comet_dict: Dict = None,
):
X_train = train_test_sets['X_train']
y_train = train_test_sets['y_train']
# train model and save training time
start_time = datetime.now()
hyperparameters_optimizer.fit(X_train, y_train)
end_time = datetime.now()
training_time = end_time - start_time
# save serialized model
pickle.dump(hyperparameters_optimizer.best_estimator_,
open(f'{workflow_dirs[PICKLE_DIR_KEY]}\\PICKLED_{submodel_id_dict["full_string"]}.sav', 'wb'))
# save cross validation results
| pd.DataFrame(hyperparameters_optimizer.cv_results_) | pandas.DataFrame |
import requests
import re
import pandas as pd
import json
def get_webtoon_genre_list():
url = "https://webtoon.p.rapidapi.com/originals/genres/list"
querystring = {"language":"en"}
headers = {
'x-rapidapi-host': "webtoon.p.rapidapi.com",
'x-rapidapi-key': "200898dbd8msh7effe9f4aca8119p1f02a4jsn9f53b70ac5e8"
}
response_gen = requests.request("GET", url, headers=headers, params=querystring)
webtoon_gen_json = response_gen.json()
webtoon_json_gen_df = pd.DataFrame(webtoon_gen_json['message']['result']['genreList']['genres'])
print(webtoon_json_gen_df['name'].tolist())
def get_webtoon_list_ranking(genre):
url = "https://webtoon.p.rapidapi.com/originals/titles/list-by-rank"
querystring = {"count":"30","language":"en"}
headers = {
'x-rapidapi-host': "webtoon.p.rapidapi.com",
'x-rapidapi-key': "200898dbd8msh7effe9f4aca8119p1f02a4jsn9f53b70ac5e8"
}
response_rank = requests.request("GET", url, headers=headers, params=querystring)
webtoon_rank_json = response_rank.json()
webtoon_json_rank_df = | pd.DataFrame(webtoon_json['message']['result']['titleNoListByTabCode']) | pandas.DataFrame |
from abc import ABC, abstractproperty
from collections import namedtuple
from pathlib import Path
import typing as t
import dill
import numpy as np
import pandas as pd
from loguru import logger
from sklearn.pipeline import Pipeline
SentimentType = t.NamedTuple(
"Sentiment",
[
("sentiment", str),
("positive_probability", float),
("negative_probability", float),
],
)
Sentiment = namedtuple(
"Sentiment", ["sentiment", "positive_probability", "negative_probability"]
)
@logger.catch
def persist_model(name: str, clf: Pipeline = None, method: str = "load") -> None:
"""Persist Model
Function use to save or load model
Arguments:
name {str} -- name of the saved model
Keyword Arguments:
clf {trained model} -- required only during save (default: {None})
method {str} -- [takes in 'load' or 'save' argument to load or save models] (default: {'load'})
Raises:
ValueError: [raised when the arguments are not correct]
"""
if method == "load":
with open(name, "rb") as f:
return dill.load(f)
elif method == "save":
logger.info(f"[+] Persisting {name} ...")
if clf is None:
raise ValueError("Pass Model/Pipeline/Transformation")
with open(name, "wb") as f:
dill.dump(clf, f)
logger.info(f"[+] Persistence Complete. Model {name} is saved")
else:
raise ValeuError("Wrong arguments")
MODEL_PATH = Path(__file__).parent / "models/base_model.pkl"
PRE_LOAD_MODEL = persist_model(f"{MODEL_PATH}", method="load")
class HisiaLoad(ABC):
def __init__(self, model_path: str = None):
"""Factory Class
This is used to ensure a single model loading instance
and a abstract property sentiment that is overiden in child classes
Keyword Arguments:
model_path {str} -- path to the trained model (default: {None})
"""
if model_path is None:
self.model = PRE_LOAD_MODEL
else:
self.model = persist_model(model_path, method="load")
def __repr__(self):
return f"{self.__class__.__name__}(Model=Logistic Regression)"
@abstractproperty
def sentiment(self):
pass
class Hisia(HisiaLoad):
"""Hisia
Keyword Arguments:
text {str} -- text to analyze
model_path {str} -- path to the trained model (default: {None})
...
Attributes
----------
text : str
a text to analyze
nmodel : Pipeline
a loaded model as a scikit-learn pipeline with both features transformers and classifier
Property
-------
sentiment
returns the sentiment of text
explain
returns a dictionary of sentiment score explanation
calculation decission = W1(word1) + W2(word2) + .. + intercept
Usage:
```python
from hisia import Hisia
positive_gro = Hisia('det var super deligt')
print(positive_gro.sentiment)
print(positive_gro.explain)
```
"""
def __init__(self, text: str, model_path: str = None):
super().__init__(model_path)
self.text = text
self.sentiment
def __repr__(self):
return (
f"Sentiment(sentiment={self.sentiment.sentiment}, "
f"positive_probability={self.sentiment.positive_probability}, "
f"negative_probability={self.sentiment.negative_probability})"
)
@property
def sentiment(self) -> SentimentType:
if isinstance(self.text, str):
self.X = [self.text]
else:
self.X = self.text
response = self.model.predict_proba(self.X)
response = pd.DataFrame(response)
response.columns = ["negative_probability", "positive_probability"]
response["sentiment"] = np.where(
response["negative_probability"] > 0.5, "negative", "positive"
)
self.results = Sentiment(**response.round(3).to_dict(orient="index")[0])
return self.results
@property
def explain(self) -> t.Dict[str, float]:
feature_names = self.model.named_steps[
"count_verctorizer"
].get_feature_names_out()
best_features = [
feature_names[i]
for i in self.model.named_steps["feature_selector"].get_support(
indices=True
)
]
coefficients = self.model.named_steps["logistic_regression"].coef_[0]
index_range = range(len(best_features))
look_table = {
index: (token, coef)
for index, coef, token in zip(index_range, coefficients, best_features)
}
v = self.model.named_steps["count_verctorizer"].transform(self.X)
v = self.model.named_steps["feature_selector"].transform(v)
v = | pd.DataFrame.sparse.from_spmatrix(v) | pandas.DataFrame.sparse.from_spmatrix |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append( | DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') | pandas.DatetimeIndex |
"""
TO-DO:
1. Get all features data sets [X]
2. Add labels to the data sets[X]
3. Obtain results of the training ( of all three algos)[X]
4. Obtain the plots and confussion matrix[]
"""
from pathlib import Path
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
from sklearn import metrics
from sklearn.metrics import roc_auc_score,confusion_matrix, precision_score,f1_score,recall_score,accuracy_score,RocCurveDisplay
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from matplotlib import font_manager
from sklearn.model_selection import KFold
def label_dist(label):
one_n = np.count_nonzero(label==1)
zero_n = np.count_nonzero(label==0)
return print(f"Unos:{one_n}\t Ceros:{zero_n}")
kf = KFold(n_splits=5)
font_dirs = ['../otros/lmr']
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
plt.rcParams['font.family'] = 'Latin Modern Roman'
def plot_roc_curve(y_test,y_pred,fname):
with plt.style.context("seaborn-paper"):
fig,ax = plt.subplots(figsize = (5,5))
roc = RocCurveDisplay.from_predictions(y_test,y_pred)
roc.plot(ax = ax)
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
fig.savefig(fname,dpi = 300)
def gen_score(y_test,y_pred,feature_name,algo_name,panda_index = 0):
accuracy = accuracy_score(y_test,y_pred)
precission = precision_score(y_test,y_pred)
f1 = f1_score(y_test,y_pred)
recall = recall_score(y_test,y_pred)
try:
roc_score = roc_auc_score(y_test,y_pred)
except:
roc_score =0
results = pd.DataFrame({"feature_name":feature_name,"algo":algo_name,"accuracy":accuracy,"precission":precission,
"f1":f1,"recall":recall,"roc_score":roc_score},index = [panda_index])
return results
RANDOM_STATE = 42
kneigh = KNeighborsClassifier()
dtree = DecisionTreeClassifier(random_state=RANDOM_STATE)
forest = RandomForestClassifier(random_state=RANDOM_STATE)
algos = {"kneigh":kneigh,"dtree":dtree,"forest":forest}
scaler = MinMaxScaler()
input_labels ={"pca":Path("../data/labels/labels_pca"),
"lin_auto":Path("../data/labels/labels_lineal"),
"conv_auto":Path("../data/labels/labels_conv")}
input_path = Path("../data/feature_data")
output_path = Path("../data/con_auto_classification_results")
output_rocplots_path = output_path/"pca_roc_plots"
if not output_path.exists():
output_path.mkdir()
if not output_rocplots_path.exists():
output_rocplots_path.mkdir()
# 1. Loading the features
features_paths = []
for i in input_path.glob("*"):
if i.is_dir():
for j in i.glob("*.pkl"):
features_paths.append(j)
data1 = []
data2 = []
data3 = []
for data in features_paths:
if "1" in re.findall(r"\d+",str(data)):
data1.append(data)
elif "2" in re.findall(r"\d+",str(data)):
data2.append(data)
else:
data3.append(data)
data1.sort()
data2.sort()
data3.sort()
labels_paths = [i for i in input_labels["conv_auto"].glob("*.pkl")]
labels_paths.sort()
labels = [pd.read_pickle(i) for i in labels_paths]
#labels_paths_lin= [i for i in input_labels["lin_auto"].glob("*.pkl")]
#labels_paths_conv = [i for i in input_labels["conv_auto"].glob("*.pkl")]
# 2. Add labels to the dataset and train
outer_index = 0
for index, dir_paths in enumerate([data1,data2,data3]):
training_results = | pd.DataFrame() | pandas.DataFrame |
import os
from multiprocessing import Pool
import pandas as pd
import numpy as np
import vcf
from pysam import AlignmentFile
class Extract:
"""
Class for extracting genotype information from alignment file using
the user supplied VCF file.
"""
def __init__(self, args):
self.db = args.database
self.threads = args.threads
self.min_mapping_quality = args.min_mapping_quality
self.min_base_quality = args.min_base_quality
self.default_genotype = args.default_genotype
self.vcf = args.vcf
self.bed = args.bed
self.fafile = args.fafile
self.overwrite = args.overwrite
self.min_coverage = args.min_coverage
self.min_homozygous_thresh = args.min_homozygous_thresh
self.sites = []
self.regions = None
self._parse_vcf()
self._parse_bed_file()
def _parse_vcf(self):
if self.vcf is None:
return
for record in vcf.Reader(open(self.vcf, 'r')):
self.sites.append({
'chrom': record.CHROM,
'start': record.POS-1,
'end': record.POS,
'ref_allele': str(record.REF),
'alt_allele': str(record.ALT[0])
})
def _parse_bed_file(self):
if self.bed is None:
return
self.regions = pd.read_csv(self.bed, sep='\t', header=None)
# only keep Y chrom regions
self.regions = self.regions[self.regions[0].isin(['Y', 'chrY'])]
if len(self.regions) == 0:
print('There are no Y chromosome regions. Cannot determine if there is a sex mismatch.')
self.regions.columns = range(self.regions.shape[1])
def _extract_regions(self, sample):
"""
Code to extract the coverage information for the regions listed
in the BED file.
"""
if self.regions is None:
return sample
# get the pileup
bam = AlignmentFile(sample.sample_bam)
region_counts = []
for i in self.regions.index:
chrom = self.regions.at[i, 0]
start = int(self.regions.at[i, 1])
end = int(self.regions.at[i, 2])
count = bam.count(chrom, start, end)
region_counts.append({
'chrom': chrom,
'start': start,
'end': end,
'count': count})
if len(region_counts) > 0:
region_counts = | pd.DataFrame(region_counts) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def plot_rec_results(self, metric_name='recall'):
""" self is an instance of Experiment or ExperimentResult """
ir = pd.DataFrame(self.item_rec).T
ur = | pd.DataFrame(self.user_rec) | pandas.DataFrame |
#!/usr/bin/env python
#
# Script for 5' assignment of 5'P-Seq data
# input is BAM file must contain NH tag
# reads with the tag NH:i:1 only included
# output 1: raw counts in *_iv.h5 - single indexed
# output 2: normalised RPM in _idx_iv.h5 - double indexed
#
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__version__ = "0.1.2"
__email__ = "<EMAIL>"
__status__ = "beta"
import re
import sys
import pysam
import argparse
import pandas as pd
from collections import Counter
parser = argparse.ArgumentParser(description="five prime assinment of 5'P-Seq data" )
parser.add_argument('-i', type=str, help='aligned_sorted.bam')
args = parser.parse_args()
sys.stderr.write("\n\
-i input : {}\n\n".format(args.i))
usage = 'python fivePassignment.py -i aligned_sorted.bam"'
if args.i==None:
sys.exit("\n usage:\n\t{}\n".format(usage))
raw_out = False # bool
# output file name from infilename
f = re.sub(r'_sorted.bam', '', re.sub(r'.*\/', '', args.i))
outf_raw_hdf = "{}_raw_iv.h5".format(f)
outf_rpm_hdf = "{}_rpm_iv.h5".format(f)
outf_idx_hdf = "{}_idx_iv.h5".format(f)
def yeastChr():
# Ordered yeast Chr list short names from ensembl
return ['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','Mito']
def update_df(df, Chr, strand):
df.fillna(0, inplace=True)
columns = list(df.columns)
columns = ["Chr", "Position", "Strand"] + columns
df["Chr"] = Chr
df["Strand"] = strand
df["Position"] = df.index
return df[columns]
def restructurate_hd5(infile, outfile, close_outfile=True):
""" infile.h5 keys - "/For_raw", "/Rev_raw", ...
outfile.h2 keys - "/For_raw/I", "/For_raw/II", ... etc
"Position" is set to index
:param infile:
:param outfile:
:return: reindexed 2 level hdf
"""
# open inp_HDF
inp__h5 = | pd.HDFStore(infile, "r") | pandas.HDFStore |
#! python3
"""Process data acquired from the Malvern Mastersizer 2000. The csv output contains lots of factor information with the numeric data towards the end. A common feature of the classes and modules is to split thse datasets into associate 'head' and 'data' subsets so that the numerical data can be processed independantly."""
import os
from os.path import join
import sys
import numpy as np
import pandas as pd
from ..database.csv import MetaData
from .psa import Test as PSA
def csv_to_df(path):
"""Load an exported Mastersizer 2000 csv into a pandas dataframe. Two headers exist and dates should be parsed."""
df = pd.read_csv(path, header = [0,1], parse_dates = [5,6])
return(df)
class MS2000(PSA):
"""Create an object from a row of Mastersizer 2000 CSV."""
def __init__(self, row):
""""""
m, d = self._split_row(row)
self._meta_to_self(m)
self.dist = self._get_distribution(d)
self.precision = 6
def _split_row(self, row):
"""Return the metadata from a row."""
h2 = row.index.get_level_values(1)
hix, dix = [], []
for n, i in enumerate(h2):
try:
float(i)
dix += [n]
except:
hix += [n]
h = row[row.index[(hix)]]
d = row[row.index[(dix)]]
h.index = h.index.droplevel(1)
d.index = d.index.droplevel(0)
return(h, d)
def _meta_to_self(self, meta):
"""Add metadata as attributes of the current object."""
row = meta
for i in meta.keys():
v = getattr(row, i)
k = i.replace('[4, 3] - ','').replace('[3, 2] - ','')
k = k.replace('(0.1)','10').replace('(0.5)','50').replace('(0.9)','90')
k = k.rstrip()
k = k.replace(' ','_')
k = k.lower()
setattr(self, k, v)
def _get_distribution(self, dat):
"""Return data as a dataframe of size limits and frequency"""
df = dat.reset_index()
df['index'] = df['index'].astype('float') / 1000
df.columns = ['mm', 'frequency']
df['frequency'] = [round(i,6) for i in df['frequency']]
df.dropna(inplace=True)
return(df)
class CSV(MS2000):
""""""
def __init__(self, path):
"""Import a Mastersizer 2000 exported CSV and split into data and metadata."""
df = self._load_csv(path)
self.df = df
def _load_csv(self, path):
""""""
df = csv_to_df(path)
return(df)
def _splitdf(self, df):
"""Split the raw dataframe into separate data and metadata dataframes"""
h2 = df.columns.get_level_values(1)
hix, dix = [], []
for n, i in enumerate(h2):
try:
float(i)
dix += [n]
except:
hix += [n]
h = df[df.columns[(hix)]]
d = df[df.columns[(dix)]]
h.columns = h.columns.droplevel(level = 1)
d.columns = d.columns.droplevel(level = 0)
return(h, d)
def idx_from_sample_name(self, name):
"""Return a row from """
def idx_to_object(self, idx):
"""Return an MS2000 object from an idx"""
ms2 = MS2000(self.df.iloc[idx])
return(ms2)
class Wee:
def row_to_object(self, idx):
"""Convert a row of Mastersizer 2000 metadata to an object."""
ms2 = type('MS2000', (PSA,), {})()
row = self.metadata.iloc[idx]
for i in row.keys():
v = getattr(row, i)
k = i.replace('[4, 3] - ','').replace('[3, 2] - ','')
k = k.replace('(0.1)','10').replace('(0.5)','50').replace('(0.9)','90')
k = k.rstrip()
k = k.replace(' ','_')
k = k.lower()
setattr(ms2, k, v)
df = pd.DataFrame(self.data.iloc[idx]).reset_index()
df.columns = ['um', 'frequency']
df.dropna(inplace=True)
setattr(ms2, 'distribution', df)
return(ms2)
def sample_names(self):
"""Return unique sample names."""
names = self.meta['Sample Name'].unique().tolist()
return(names)
def subset_metadata(self, df):
"""Define the metadata to process by a subsetted pandas dataframe. The pandas dataframe should be subsetted externally as required."""
self.metadata = df.copy()
print(df)
class MasterSizer2000(PSA):
def __init__(self, df):
"""Useful operations for mastersizer 2000 data"""
self.original_metadata, self.data = self._splitdf(df)
self.metadata = self.original_metadata.copy()
self.psa = [self.row_to_object(i) for i in self.metadata.index]
def _splitdf(self, df):
"""Split the raw dataframe into separate data and metadata dataframes"""
h2 = df.columns.get_level_values(1)
hix, dix = [], []
for n, i in enumerate(h2):
try:
float(i)
dix += [n]
except:
hix += [n]
h = df[df.columns[(hix)]]
d = df[df.columns[(dix)]]
h.columns = h.columns.droplevel(level = 1)
d.columns = d.columns.droplevel(level = 0)
return(h, d)
def row_to_object(self, idx):
"""Convert a row of Mastersizer 2000 metadata to an object."""
ms2 = type('MS2000', (PSA,), {})()
row = self.metadata.iloc[idx]
for i in row.keys():
v = getattr(row, i)
k = i.replace('[4, 3] - ','').replace('[3, 2] - ','')
k = k.replace('(0.1)','10').replace('(0.5)','50').replace('(0.9)','90')
k = k.rstrip()
k = k.replace(' ','_')
k = k.lower()
setattr(ms2, k, v)
df = pd.DataFrame(self.data.iloc[idx]).reset_index()
df.columns = ['um', 'frequency']
df.dropna(inplace=True)
setattr(ms2, 'distribution', df)
return(ms2)
def sample_names(self):
"""Return unique sample names."""
names = self.meta['Sample Name'].unique().tolist()
return(names)
def subset_metadata(self, df):
"""Define the metadata to process by a subsetted pandas dataframe. The pandas dataframe should be subsetted externally as required."""
self.metadata = df.copy()
print(df)
def pkeys(self):
"""Return primary keys as a list of tuples"""
m = self.meta
df = pd.concat([m['Sample Name'], pd.to_datetime(m['Measurement date and time'].dt.strftime('%Y-%m-%d'))], axis=1).drop_duplicates()
tuples = [tuple(i) for i in df.values]
return(tuples)
def idx(self, pkey):
"""Return indices for a given primary key"""
m = self.meta
idx = m[m['Sample Name']==pkey[0]].index
return(idx)
def replicates(self, indices):
"""Return a list of replicate series from given dataframe indices"""
d = self.data
lst = [self.row2series(i) for i in indices]
return(lst)
def row2series(self, idx):
"""Return a pandas series of a given replicate"""
# retrieve dataframe row
d = self.data
# drop first column of NaN values
d = d.drop('0.010000', axis=1)
s = d.iloc[idx]
# convert index from microns to mm and drop maximum size; this converts the size boundaries to lower limits to conform with traditional numerical methods
s.index = self.sizes_mm()[0:100]
return(s)
def sizes_mm(self):
"""Return a list of floats of the sizes in mm"""
d = self.data
mm = [round(float(i) * 10**-3, 9) for i in d.columns]
return(mm)
class SampleMetaData(MetaData):
"""Create a csv to establish groups of samples."""
def __init__(self, folder, filename='metadata.csv', columns=[], lab_id=[], groups=['group']):
"""
Attributes:
datdir <str>: A filesystem path to the folder to place the output csv;
filename <str>: Name of a CSV file containing, or to contain, segment coordinate data;
columns <list>: List of field names to override the default.
"""
self.csv_path = join(folder, filename)
MetaData.__init__(self, self.csv_path, self._columns(columns, groups))
if self.metadata_is_empty():
if lab_id:
self.append(lab_id)
print('WARNING: No metadata exists.')
def _columns(self, columns, groups):
"""Return a list of column names."""
lst = [
'id', # unique integer identifier
'sample_name', # unique name of sample
] + groups + [
'note'
]
c = columns if columns else lst
return(c)
#######################################################################
#########################################
def splitdf(df):
"""Split the raw dataframe into seperate data and metadata dataframes"""
h2 = df.columns.get_level_values(1)
hix, dix = [], []
for n, i in enumerate(h2):
try:
float(i)
dix += [n]
except:
hix += [n]
h = df[df.columns[[hix]]]
d = df[df.columns[[dix]]]
h.columns = h.columns.droplevel(level = 1)
d.columns = d.columns.droplevel(level = 0)
return(h, d)
def replicatesafvafvafv(df, pkeys = None):
"""Return a dictionary of data frame indices corresponding to unique sample names and measurement dates; unique groups of replicates are assumed here to be represented by a unique sample name and a unique date.
Attributes:
df <pd.DataFrame>: A pandas data frame including the columns 'Sample Name' and 'Measurement date and time'
"""
# convert dataframe timestamp to date
if not pkeys:
dfk = pd.concat([df['Sample Name'], pd.to_datetime(df['Measurement date and time'].dt.strftime('%Y-%m-%d'))], axis=1).drop_duplicates()
pkeys = [tuple(i) for i in dfk.values]
else:
pkeys = [(i[0], pd.to_datetime(i[1])) for i in pkeys]
print(df2pkeys(df))
#df = df.drop_duplicates()
#if not pkeys:
# pkeys = df
#print(df[['Sample Name','Measurement date and time']])
#print(df['Sample Name', 'Measurement date and time'])
#g = df.groupby(['Sample Name', 'Measurement date and time'])
#return(g.groups)
def keycols():
"""Return a list of column names defining the primary keys of Mastersizer 2000 data"""
lst = ['Sample Name', 'Measurement date and time']
return(lst)
def df2pkeys(df, cols = keycols()):
"""Return a list of tuples of primary keys for Mastersizer data"""
subset = pd.concat([df[cols[0]], pd.to_datetime(cols[1].dt.strftime('%Y-%m-%d'))], axis=1).drop_duplicates()
pkeys = [tuple(i) for i in dfk.values]
################# REVISE ALL BELOW ###########################
def create_index(df):
"""Create dataframe index"""
class data(object):
"""A class to handleMastersizer 2000 data
Attributes:
paths <string or list>: a single string or list of strings
"""
def __init__(self, csvpath):
"""Load a mastersizer 2000 csv"""
try:
raw = csv2df(csvpath)
print(raw)
df = formatIndices(raw)
h, d = splitdf(df)
d = cols2lower(d)
self.head = h
self.data = d
except Exception as e:
print(e)
def formatIndices(df):
"""Set the indices of a Mastersizer dataset to primary keys"""
fmt = pkeys2MultiIndex(df)
fmt.columns = df.columns
fmt.index.set_names(pkcols(), inplace = True)
return(fmt)
def pkeys2MultiIndex(df):
"""Return a Mastersizer 2000 dataframe with primary keys set as indices"""
pk = get_pkeys(df)
pknames = ['pk1', 'pk2']
pk.columns = pknames
df = pd.concat([df, pk], axis = 1)
df = df.set_index(pknames, drop = True)
return(df)
def pkcols():
"""Return the names of the primary key columns"""
lst = ['Sample Name', 'Measurement date and time']
return(lst)
def get_pkeys(df):
"""Return formatted primary key columns from head dataset"""
colnames = pkcols()
tmp = df[colnames]
tmp.columns = colnames
col1 = tmp[colnames[0]]
col2 = timestamp2day(tmp[colnames[1]])
pk = | pd.concat([col1, col2], axis=1) | pandas.concat |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.numpy_ import PandasDtype
from .base import BaseExtensionTests
class BaseSetitemTests(BaseExtensionTests):
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[0] = data[1]
assert data[0] == data[1]
def test_setitem_sequence(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[[0, 1]] = [data[1], data[0]]
assert data[0] == original[1]
assert data[1] == original[0]
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
original = ser.copy()
value = [data[0]]
if as_array:
value = data._from_sequence(value)
xpr = "cannot set using a {} indexer with a different length"
with pytest.raises(ValueError, match=xpr.format("list-like")):
ser[[0, 1]] = value
# Ensure no modifications made before the exception
self.assert_series_equal(ser, original)
with pytest.raises(ValueError, match=xpr.format("slice")):
ser[slice(3)] = value
self.assert_series_equal(ser, original)
def test_setitem_empty_indxer(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[np.array([], dtype=int)] = []
self.assert_equal(data, original)
def test_setitem_sequence_broadcasts(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[[0, 1]] = data[2]
assert data[0] == data[2]
assert data[1] == data[2]
@pytest.mark.parametrize("setter", ["loc", "iloc"])
def test_setitem_scalar(self, data, setter):
arr = pd.Series(data)
setter = getattr(arr, setter)
operator.setitem(setter, 0, data[1])
assert arr[0] == data[1]
def test_setitem_loc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.loc[0, "B"] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_loc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.iloc[0, 1] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_iloc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.iloc[10, 0] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.iloc[10, 1] = data[1]
assert df.loc[10, "B"] == data[1]
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series):
arr = data[:5].copy()
expected = arr.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[mask] = data[0]
self.assert_equal(expected, arr)
def test_setitem_mask_raises(self, data, box_in_series):
# wrong length
mask = np.array([True, False])
if box_in_series:
data = pd.Series(data)
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:3] = True
mask[3:5] = pd.NA
if box_in_series:
data = | pd.Series(data) | pandas.Series |
import anonypy
import pandas as pd
data = [
[6, "1", "test1", "x", 20],
[6, "1", "test1", "x", 30],
[8, "2", "test2", "x", 50],
[8, "2", "test3", "w", 45],
[8, "1", "test2", "y", 35],
[4, "2", "test3", "y", 20],
[4, "1", "test3", "y", 20],
[2, "1", "test3", "z", 22],
[2, "2", "test3", "y", 32],
]
columns = ["col1", "col2", "col3", "col4", "col5"]
categorical = set(("col2", "col3", "col4"))
def test_k_anonymity():
df = pd.DataFrame(data=data, columns=columns)
print(df)
for name in categorical:
df[name] = df[name].astype("category")
feature_columns = ["col1", "col2", "col3"]
sensitive_column = "col4"
p = anonypy.Preserver(df, feature_columns, sensitive_column)
rows = p.anonymize_k_anonymity(k=2)
dfn = | pd.DataFrame(rows) | pandas.DataFrame |
"""
Function to do speed tests easily.
"""
import numpy as np
import pandas as pd
from timeit import default_timer
def speedtest(speed_inputs, speed_input_labels, funcs):
"""
Runs speed tests, and asserts outputs are all the same. Runs the first test before timing anything to make sure
numba functions are initialized properly.
:param speed_inputs: list of tuples of (args, kwargs) where args is the *args and kwargs is the **kwargs
:param speed_input_labels: names to use for each speed_input test
:param funcs: the functions to test
"""
func_times, outputs = [], []
# Initialize numba
for func in funcs:
func(*speed_inputs[0][0], **speed_inputs[0][1])
# Run timing tests
for func in funcs:
times = []
these_outputs = []
for args, kwargs in speed_inputs:
t = default_timer()
these_outputs.append(func(*args, **kwargs))
times.append(default_timer() - t)
func_times.append(times)
outputs.append(these_outputs)
for i in range(1, len(outputs)):
for j in range(len(outputs[0])):
np.testing.assert_array_almost_equal(outputs[i][j], outputs[i - 1][j])
print("Timings:\n")
print( | pd.DataFrame(func_times, columns=speed_input_labels, index=[f.__name__ for f in funcs]) | pandas.DataFrame |
# Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from probatus.utils import preprocess_labels, get_single_scorer, preprocess_data, \
BaseFitComputePlotClass
from probatus.utils.shap_helpers import shap_calc, calculate_shap_importance
from sklearn.inspection import permutation_importance
import matplotlib.pyplot as plt
import shap
import warnings
class BaseResemblanceModel(BaseFitComputePlotClass):
"""
This model checks for similarity of two samples. A possible use case is analysis whether train sample differs
from test sample, due to e.g. non-stationarity.
This is a base class and needs to be extended by a fit() method, which implements how data is split, how model is
trained and evaluated. Further, inheriting classes need to implement how feature importance should be indicated.
"""
def __init__(self, clf, scoring='roc_auc', test_prc=0.25, n_jobs=1, verbose=0, random_state=None):
"""
Initializes the class.
Args:
clf (model object):
Binary classification model or pipeline.
scoring (string or probatus.utils.Scorer, optional):
Metric for which the model performance is calculated. It can be either a metric name aligned with
predefined [classification scorers names in sklearn](https://scikit-learn.org/stable/modules/model_evaluation.html).
Another option is using probatus.utils.Scorer to define a custom metric. Recommended option for this
class is 'roc_auc'.
test_prc (float, optional):
Percentage of data used to test the model. By default 0.25 is set.
n_jobs (int, optional):
Number of parallel executions. If -1 use all available cores. By default 1.
verbose (int, optional):
Controls verbosity of the output:
- 0 - nether prints nor warnings are shown
- 1 - 50 - only most important warnings
- 51 - 100 - shows other warnings and prints
- above 100 - presents all prints and all warnings (including SHAP warnings).
random_state (int, optional):
Random state set at each round of feature elimination. If it is None, the results will not be
reproducible and in random search at each iteration a different hyperparameters might be tested. For
reproducible results set it to integer.
"""
self.clf = clf
self.test_prc = test_prc
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.scorer = get_single_scorer(scoring)
def _init_output_variables(self):
"""
Initializes variables that will be filled in during fit() method, and are used as output
"""
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.train_score = None
self.test_score = None
self.report = None
def fit(self, X1, X2, column_names=None, class_names=None):
"""
Base fit functionality that should be executed before each fit.
Args:
X1 (np.ndarray or pd.DataFrame):
First sample to be compared. It needs to have the same number of columns as X2.
X2 (np.ndarray or pd.DataFrame):
Second sample to be compared. It needs to have the same number of columns as X1.
column_names (list of str, optional):
List of feature names of the provided samples. If provided it will be used to overwrite the existing
feature names. If not provided the existing feature names are used or default feature names are
generated.
class_names (None, or list of str, optional):
List of class names assigned, in this case provided samples e.g. ['sample1', 'sample2']. If none, the
default ['First Sample', 'Second Sample'] are used.
Returns:
(BaseResemblanceModel):
Fitted object
"""
# Set seed for results reproducibility
if self.random_state is not None:
np.random.seed(self.random_state)
# Set class names
self.class_names = class_names
if self.class_names is None:
self.class_names = ['First Sample', 'Second Sample']
# Ensure inputs are correct
self.X1, self.column_names = preprocess_data(X1, X_name='X1', column_names=column_names, verbose=self.verbose)
self.X2, _ = preprocess_data(X2, X_name='X2', column_names=column_names, verbose=self.verbose)
# Prepare dataset for modelling
self.X = pd.DataFrame(pd.concat([
self.X1,
self.X2
], axis=0), columns = self.column_names).reset_index(drop=True)
self.y = pd.Series(np.concatenate([
np.zeros(self.X1.shape[0]),
np.ones(self.X2.shape[0]),
])).reset_index(drop=True)
# Assure the type and number of classes for the variable
self.X, _ = preprocess_data(self.X, X_name='X', column_names=self.column_names, verbose=self.verbose)
self.y = preprocess_labels(self.y, y_name='y', index=self.X.index, verbose=self.verbose)
# Reinitialize variables in case of multiple times being fit
self._init_output_variables()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_prc,
random_state=self.random_state,
shuffle=True, stratify=self.y)
self.clf.fit(self.X_train, self.y_train)
self.train_score = np.round(self.scorer.score(self.clf, self.X_train, self.y_train), 3)
self.test_score = np.round(self.scorer.score(self.clf, self.X_test, self.y_test), 3)
self.results_text = f'Train {self.scorer.metric_name}: {np.round(self.train_score, 3)},\n' \
f'Test {self.scorer.metric_name}: {np.round(self.test_score, 3)}.'
if self.verbose > 50:
print(f'Finished model training: \n{self.results_text}')
if self.verbose > 0:
if self.auc_train > self.auc_test:
warnings.warn(f'Train {self.scorer.metric_name} > Test {self.scorer.metric_name}, which might indicate '
f'an overfit. \n Strong overfit might lead to misleading conclusions when analysing '
f'feature importance. Consider retraining with more regularization applied to the model.')
self.fitted = True
return self
def get_data_splits(self):
"""
Returns the data splits used to train the Resemblance model.
Returns:
(pd.DataFrame, pd.DataFrame, pd.Series, pd.Series):
X_train, X_test, y_train, y_test.
"""
self._check_if_fitted()
return self.X_train, self.X_test, self.y_train, self.y_test
def compute(self, return_scores=False):
"""
Checks if fit() method has been run and computes the output variables.
Args:
return_scores (bool, optional):
Flag indicating whether the method should return a tuple (feature importances, train score,
test score), or feature importances. By default the second option is selected.
Returns:
(tuple(pd.DataFrame, float, float) or pd.DataFrame):
Depending on value of return_tuple either returns a tuple (feature importances, train AUC, test AUC), or
feature importances.
"""
self._check_if_fitted()
if return_scores:
return self.report, self.train_score, self.test_score
else:
return self.report
def fit_compute(self, X1, X2, column_names=None, class_names=None, return_scores=False, **fit_kwargs):
"""
Fits the resemblance model and computes the report regarding feature importance.
Args:
X1 (np.ndarray or pd.DataFrame):
First sample to be compared. It needs to have the same number of columns as X2.
X2 (np.ndarray or pd.DataFrame):
Second sample to be compared. It needs to have the same number of columns as X1.
column_names (list of str, optional):
List of feature names of the provided samples. If provided it will be used to overwrite the existing
feature names. If not provided the existing feature names are used or default feature names are
generated.
class_names (None, or list of str, optional):
List of class names assigned, in this case provided samples e.g. ['sample1', 'sample2']. If none, the
default ['First Sample', 'Second Sample'] are used.
return_scores (bool, optional):
Flag indicating whether the method should return a tuple (feature importances, train score,
test score), or feature importances. By default the second option is selected.
**fit_kwargs:
In case any other arguments are accepted by fit() method, they can be passed as keyword arguments
Returns:
(tuple of (pd.DataFrame, float, float) or pd.DataFrame):
Depending on value of return_tuple either returns a tuple (feature importances, train AUC, test AUC), or
feature importances.
"""
self.fit(X1, X2, column_names=column_names, class_names=class_names, **fit_kwargs)
return self.compute(return_scores=return_scores)
def plot(self):
raise(NotImplementedError('Plot method has not been implemented.'))
class PermutationImportanceResemblance(BaseResemblanceModel):
"""
This model checks for similarity of two samples. A possible use case is analysis whether train sample differs
from test sample, due to e.g. non-stationarity.
It assigns to labels to each sample, 0 to first sample, 1 to the second. Then, It randomly selects a portion of
data to train on. The resulting model tries to distinguish which sample does a given test row comes from. This
provides insights on how distinguishable these samples are and which features contribute to that. The feature
importance is calculated using permutation importance.
If the model achieves test AUC significantly different than 0.5, it indicates that it is possible to distinguish
the samples, and therefore, the samples differ. Features with high permutation importance contribute to that
effect the most. Thus, their distribution might differ between two samples.
Examples:
```python
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from probatus.sample_similarity import PermutationImportanceResemblance
X1, _ = make_classification(n_samples=100, n_features=5)
X2, _ = make_classification(n_samples=100, n_features=5, shift=0.5)
clf = RandomForestClassifier(max_depth=2)
perm = PermutationImportanceResemblance(clf)
feature_importance = perm.fit_compute(X1, X2)
perm.plot()
```
<img src="../img/sample_similarity_permutation_importance.png" width="500" />
"""
def __init__(self, clf, iterations=100, scoring='roc_auc', test_prc=0.25, n_jobs=1, verbose=0, random_state=None):
"""
Initializes the class.
Args:
clf (model object):
Binary classification model or pipeline.
iterations (int, optional):
Number of iterations performed to calculate permutation importance. By default 100 iterations per
feature are done.
scoring (string or probatus.utils.Scorer, optional):
Metric for which the model performance is calculated. It can be either a metric name aligned with
predefined [classification scorers names in sklearn](https://scikit-learn.org/stable/modules/model_evaluation.html).
Another option is using probatus.utils.Scorer to define a custom metric. Recommended option for this
class is 'roc_auc'.
test_prc (float, optional):
Percentage of data used to test the model. By default 0.25 is set.
n_jobs (int, optional):
Number of parallel executions. If -1 use all available cores. By default 1.
verbose (int, optional):
Controls verbosity of the output:
- 0 - nether prints nor warnings are shown
- 1 - 50 - only most important warnings
- 51 - 100 - shows other warnings and prints
- above 100 - presents all prints and all warnings (including SHAP warnings).
random_state (int, optional):
Random state set at each round of feature elimination. If it is None, the results will not be
reproducible and in random search at each iteration a different hyperparameters might be tested. For
reproducible results set it to integer.
"""
super().__init__(clf=clf, scoring=scoring, test_prc=test_prc, n_jobs=n_jobs, verbose=verbose,
random_state=random_state)
self.iterations = iterations
self.iterations_columns = ['feature', 'importance']
self.iterations_results = | pd.DataFrame(columns=self.iterations_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Covid-19 em São Paulo
Gera gráficos para acompanhamento da pandemia de Covid-19
na cidade e no estado de São Paulo.
@author: https://github.com/DaviSRodrigues
"""
from datetime import datetime, timedelta
from io import StringIO
import locale
import math
from tableauscraper import TableauScraper
import traceback
import unicodedata
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
from plotly.subplots import make_subplots
import requests
def main():
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
print('Carregando dados...')
hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total = carrega_dados_cidade()
dados_munic, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_imunizantes, atualizacao_imunizantes = carrega_dados_estado()
print('\nLimpando e enriquecendo dos dados...')
dados_cidade, dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes = pre_processamento(hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes)
evolucao_cidade, evolucao_estado = gera_dados_evolucao_pandemia(dados_munic, dados_estado, isolamento, dados_vacinacao, internacoes)
evolucao_cidade, evolucao_estado = gera_dados_semana(evolucao_cidade, evolucao_estado, leitos_estaduais, isolamento, internacoes)
print('\nGerando gráficos e tabelas...')
gera_graficos(dados_munic, dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, evolucao_cidade, evolucao_estado, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes)
print('\nAtualizando serviceWorker.js...')
atualiza_service_worker(dados_estado)
print('\nFim')
def carrega_dados_cidade():
hospitais_campanha = pd.read_csv('dados/hospitais_campanha_sp.csv', sep=',')
leitos_municipais = pd.read_csv('dados/leitos_municipais.csv', sep=',')
leitos_municipais_privados = pd.read_csv('dados/leitos_municipais_privados.csv', sep=',')
leitos_municipais_total = pd.read_csv('dados/leitos_municipais_total.csv', sep=',')
return hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total
def carrega_dados_estado():
hoje = data_processamento
ano = hoje.strftime('%Y')
mes = hoje.strftime('%m')
data = hoje.strftime('%Y%m%d')
try:
print('\tAtualizando dados dos municípios...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/dados_covid_sp.csv'
dados_munic = pd.read_csv(URL, sep=';', decimal=',')
opcoes_zip = dict(method='zip', archive_name='dados_munic.csv')
dados_munic.to_csv('dados/dados_munic.zip', sep=';', decimal=',', index=False, compression=opcoes_zip)
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print('\tErro ao buscar dados_covid_sp.csv do GitHub: lendo arquivo local.\n')
dados_munic = pd.read_csv('dados/dados_munic.zip', sep=';', decimal=',')
try:
print('\tAtualizando dados estaduais...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/sp.csv'
dados_estado = pd.read_csv(URL, sep=';')
dados_estado.to_csv('dados/dados_estado_sp.csv', sep=';')
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print('\tErro ao buscar dados_estado_sp.csv do GitHub: lendo arquivo local.\n')
dados_estado = pd.read_csv('dados/dados_estado_sp.csv', sep=';', decimal=',', encoding='latin-1', index_col=0)
try:
print('\tCarregando dados de isolamento social...')
isolamento = pd.read_csv('dados/isolamento_social.csv', sep=',')
except Exception as e:
print(f'\tErro ao buscar isolamento_social.csv\n\t{e}')
try:
print('\tAtualizando dados de internações...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/plano_sp_leitos_internacoes.csv')
internacoes = pd.read_csv(URL, sep=';', decimal=',', thousands='.')
internacoes.to_csv('dados/internacoes.csv', sep=';', decimal=',')
except Exception as e:
try:
print(f'\tErro ao buscar internacoes.csv do GitHub: lendo arquivo da Seade.\n\t{e}')
URL = (f'http://www.seade.gov.br/wp-content/uploads/{ano}/{mes}/Leitos-e-Internacoes.csv')
internacoes = pd.read_csv(URL, sep=';', encoding='latin-1', decimal=',', thousands='.', engine='python',
skipfooter=2)
except Exception as e:
print(f'\tErro ao buscar internacoes.csv da Seade: lendo arquivo local.\n\t{e}')
internacoes = pd.read_csv('dados/internacoes.csv', sep=';', decimal=',', thousands='.', index_col=0)
try:
print('\tAtualizando dados de doenças preexistentes...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/casos_obitos_doencas_preexistentes.csv.zip')
doencas = pd.read_csv(URL, sep=';')
if len(doencas.asma.unique()) == 3:
opcoes_zip = dict(method='zip', archive_name='doencas_preexistentes.csv')
doencas.to_csv('dados/doencas_preexistentes.zip', sep=';', compression=opcoes_zip)
else:
global processa_doencas
processa_doencas = False
raise Exception('O arquivo de doeças preexistentes não possui registros SIM/NÃO/IGNORADO para todas as doenças.')
except Exception as e:
try:
print(f'\tErro ao buscar doencas_preexistentes.csv do GitHub: lendo arquivo local.\n\t{e}')
doencas = pd.read_csv('dados/doencas_preexistentes.zip', sep=';', index_col=0)
except Exception as e:
print(f'\tErro ao buscar doencas_preexistentes.csv localmente: lendo arquivo da Seade.\n\t{e}')
URL = f'http://www.seade.gov.br/wp-content/uploads/{ano}/{mes}/casos_obitos_doencas_preexistentes.csv'
doencas = pd.read_csv(URL, sep=';', encoding='latin-1')
try:
print('\tAtualizando dados de casos/óbitos por raça e cor...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/casos_obitos_raca_cor.csv.zip')
dados_raciais = pd.read_csv(URL, sep=';')
opcoes_zip = dict(method='zip', archive_name='dados_raciais.csv')
dados_raciais.to_csv('dados/dados_raciais.zip', sep=';', compression=opcoes_zip)
except Exception as e:
print(f'\tErro ao buscar dados_raciais.csv do GitHub: lendo arquivo local.\n\t{e}')
dados_raciais = pd.read_csv('dados/dados_raciais.zip', sep=';', index_col=0)
print('\tAtualizando dados da campanha de vacinação...')
headers = {'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/88.0.4324.182 '
'Safari/537.36 '
'Edg/88.0.705.74'}
try:
print('\t\tDoses aplicadas por município...')
URL = f'https://www.saopaulo.sp.gov.br/wp-content/uploads/{ano}/{mes}/{data}_vacinometro.csv'
req = requests.get(URL, headers=headers, stream=True)
req.encoding = req.apparent_encoding
doses_aplicadas = pd.read_csv(StringIO(req.text), sep=';', encoding='utf-8-sig')
except Exception as e:
print(f'\t\tErro ao buscar {data}_vacinometro.csv da Seade: {e}')
doses_aplicadas = None
try:
print('\t\tDoses recebidas por cada município...')
URL = f'https://www.saopaulo.sp.gov.br/wp-content/uploads/{ano}/{mes}/{data}_painel_distribuicao_doses.csv'
req = requests.get(URL, headers=headers, stream=True)
req.encoding = req.apparent_encoding
doses_recebidas = pd.read_csv(StringIO(req.text), sep=';', encoding='utf-8-sig')
except Exception as e:
print(f'\t\tErro ao buscar {data}_painel_distribuicao_doses.csv da Seade: {e}')
doses_recebidas = None
try:
print('\t\tAtualizando doses aplicadas por vacina...')
url = 'https://www2.simi.sp.gov.br/views/PaineldeEstatsticasGerais_14_09_2021_16316423974680/PaineldeEstatsticasGerais'
scraper = TableauScraper()
scraper.loads(url)
sheet = scraper.getWorkbook().getWorksheet('donuts imunibiológico')
atualizacao_imunizantes = sheet.data.copy()
atualizacao_imunizantes['data'] = data_processamento
atualizacao_imunizantes = atualizacao_imunizantes[['data', 'Imunibiológico_Ajustado-alias', 'CNT(Imunibiológico_Ajustado)-alias']]
atualizacao_imunizantes.columns = ['data', 'vacina', 'aplicadas']
atualizacao_imunizantes.sort_values(by='vacina', inplace=True)
except Exception as e:
print(f'\t\tErro ao buscar dados de vacinas do Tableau: {e}')
traceback.print_exception(type(e), e, e.__traceback__)
atualizacao_imunizantes = None
leitos_estaduais = pd.read_csv('dados/leitos_estaduais.csv', index_col=0)
dados_vacinacao = pd.read_csv('dados/dados_vacinacao.zip')
dados_imunizantes = pd.read_csv('dados/dados_imunizantes.csv')
return dados_munic, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_imunizantes, atualizacao_imunizantes
def pre_processamento(hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes):
print('\tDados municipais...')
dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total = pre_processamento_cidade(dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total)
print('\tDados estaduais...')
dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_munic, dados_imunizantes = pre_processamento_estado(dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes)
return dados_cidade, dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes
def pre_processamento_cidade(dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total):
dados_cidade = dados_munic.loc[dados_munic.nome_munic == 'São Paulo', ['datahora', 'casos', 'casos_novos', 'obitos', 'obitos_novos', 'letalidade']]
dados_cidade.columns = ['data', 'confirmados', 'casos_dia', 'óbitos', 'óbitos_dia', 'letalidade']
dados_cidade['letalidade'] = dados_cidade.letalidade * 100
dados_cidade['data'] = pd.to_datetime(dados_cidade.data)
dados_cidade['dia'] = dados_cidade.data.apply(lambda d: d.strftime('%d %b %y'))
hospitais_campanha['data'] = pd.to_datetime(hospitais_campanha.data, format='%d/%m/%Y')
hospitais_campanha['dia'] = hospitais_campanha.data.apply(lambda d: d.strftime('%d %b %y'))
leitos_municipais['data'] = pd.to_datetime(leitos_municipais.data, format='%d/%m/%Y')
leitos_municipais['dia'] = leitos_municipais.data.apply(lambda d: d.strftime('%d %b %y'))
leitos_municipais_privados['data'] = pd.to_datetime(leitos_municipais_privados.data, format='%d/%m/%Y')
leitos_municipais_privados['dia'] = leitos_municipais_privados.data.apply(lambda d: d.strftime('%d %b %y'))
leitos_municipais_total['data'] = pd.to_datetime(leitos_municipais_total.data, format='%d/%m/%Y')
leitos_municipais_total['dia'] = leitos_municipais_total.data.apply(lambda d: d.strftime('%d %b %y'))
return dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total
def formata_municipio(m):
return m.title() \
.replace(' Da ', ' da ') \
.replace(' De ', ' de ') \
.replace(' Do ', ' do ') \
.replace(' Das ', ' das ') \
.replace(' Dos ', ' dos ')
def pre_processamento_estado(dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes):
dados_estado.columns = ['data', 'total_casos', 'total_obitos']
dados_estado['data'] = pd.to_datetime(dados_estado.data)
dados_estado['dia'] = dados_estado.data.apply(lambda d: d.strftime('%d %b %y'))
dados_munic['datahora'] = pd.to_datetime(dados_munic.datahora)
isolamento['data'] = | pd.to_datetime(isolamento.data) | pandas.to_datetime |
# coding: utf-8
# Copyright (c) <NAME>.
# Distributed under the terms of the MIT License.
"""
This module implements utility functions for other modules in the package.
"""
import string
from io import StringIO
import os
import re
import math
import sys
from typing import List, Dict, Union, Tuple, Optional, Any
from typing_extensions import Final
import numpy as np
import pandas as pd
from pymatgen.core import Molecule
from pymatgen.io.lammps.data import CombinedData
from MDAnalysis import Universe
from MDAnalysis.core.groups import Atom, Residue, AtomGroup
from mdgo.volume import molecular_volume
__author__ = "<NAME>"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Feb 9, 2021"
MM_of_Elements: Final[Dict[str, float]] = {
"H": 1.00794,
"He": 4.002602,
"Li": 6.941,
"Be": 9.012182,
"B": 10.811,
"C": 12.0107,
"N": 14.0067,
"O": 15.9994,
"F": 18.9984032,
"Ne": 20.1797,
"Na": 22.98976928,
"Mg": 24.305,
"Al": 26.9815386,
"Si": 28.0855,
"P": 30.973762,
"S": 32.065,
"Cl": 35.453,
"Ar": 39.948,
"K": 39.0983,
"Ca": 40.078,
"Sc": 44.955912,
"Ti": 47.867,
"V": 50.9415,
"Cr": 51.9961,
"Mn": 54.938045,
"Fe": 55.845,
"Co": 58.933195,
"Ni": 58.6934,
"Cu": 63.546,
"Zn": 65.409,
"Ga": 69.723,
"Ge": 72.64,
"As": 74.9216,
"Se": 78.96,
"Br": 79.904,
"Kr": 83.798,
"Rb": 85.4678,
"Sr": 87.62,
"Y": 88.90585,
"Zr": 91.224,
"Nb": 92.90638,
"Mo": 95.94,
"Tc": 98.9063,
"Ru": 101.07,
"Rh": 102.9055,
"Pd": 106.42,
"Ag": 107.8682,
"Cd": 112.411,
"In": 114.818,
"Sn": 118.71,
"Sb": 121.760,
"Te": 127.6,
"I": 126.90447,
"Xe": 131.293,
"Cs": 132.9054519,
"Ba": 137.327,
"La": 138.90547,
"Ce": 140.116,
"Pr": 140.90465,
"Nd": 144.242,
"Pm": 146.9151,
"Sm": 150.36,
"Eu": 151.964,
"Gd": 157.25,
"Tb": 158.92535,
"Dy": 162.5,
"Ho": 164.93032,
"Er": 167.259,
"Tm": 168.93421,
"Yb": 173.04,
"Lu": 174.967,
"Hf": 178.49,
"Ta": 180.9479,
"W": 183.84,
"Re": 186.207,
"Os": 190.23,
"Ir": 192.217,
"Pt": 195.084,
"Au": 196.966569,
"Hg": 200.59,
"Tl": 204.3833,
"Pb": 207.2,
"Bi": 208.9804,
"Po": 208.9824,
"At": 209.9871,
"Rn": 222.0176,
"Fr": 223.0197,
"Ra": 226.0254,
"Ac": 227.0278,
"Th": 232.03806,
"Pa": 231.03588,
"U": 238.02891,
"Np": 237.0482,
"Pu": 244.0642,
"Am": 243.0614,
"Cm": 247.0703,
"Bk": 247.0703,
"Cf": 251.0796,
"Es": 252.0829,
"Fm": 257.0951,
"Md": 258.0951,
"No": 259.1009,
"Lr": 262,
"Rf": 267,
"Db": 268,
"Sg": 271,
"Bh": 270,
"Hs": 269,
"Mt": 278,
"Ds": 281,
"Rg": 281,
"Cn": 285,
"Nh": 284,
"Fl": 289,
"Mc": 289,
"Lv": 292,
"Ts": 294,
"Og": 294,
"ZERO": 0,
}
SECTION_SORTER: Final[Dict[str, Dict[str, Any]]] = {
"atoms": {
"in_kw": None,
"in_header": ["atom", "charge", "sigma", "epsilon"],
"sec_number": None,
"desired_split": None,
"desired_cols": None,
"out_kw": None,
"ff_header": ["epsilon", "sigma"],
"topo_header": ["mol-id", "type", "charge", "x", "y", "z"],
},
"bonds": {
"in_kw": "Stretch",
"in_header": ["atom1", "atom2", "k", "r0"],
"sec_number": 5,
"desired_split": 2,
"desired_cols": 4,
"out_kw": ["Bond Coeffs", "Bonds"],
"ff_header": ["k", "r0"],
"topo_header": ["type", "atom1", "atom2"],
},
"angles": {
"in_kw": "Bending",
"in_header": ["atom1", "atom2", "atom3", "k", "theta0"],
"sec_number": 6,
"desired_split": 1,
"desired_cols": 5,
"out_kw": ["Angle Coeffs", "Angles"],
"ff_header": ["k", "theta0"],
"topo_header": ["type", "atom1", "atom2", "atom3"],
},
"dihedrals": {
"in_kw": "proper Torsion",
"in_header": ["atom1", "atom2", "atom3", "atom4", "v1", "v2", "v3", "v4"],
"sec_number": 7,
"desired_split": 1,
"desired_cols": 8,
"out_kw": ["Dihedral Coeffs", "Dihedrals"],
"ff_header": ["v1", "v2", "v3", "v4"],
"topo_header": ["type", "atom1", "atom2", "atom3", "atom4"],
},
"impropers": {
"in_kw": "improper Torsion",
"in_header": ["atom1", "atom2", "atom3", "atom4", "v2"],
"sec_number": 8,
"desired_split": 1,
"desired_cols": 5,
"out_kw": ["Improper Coeffs", "Impropers"],
"ff_header": ["v1", "v2", "v3"],
"topo_header": ["type", "atom1", "atom2", "atom3", "atom4"],
},
}
BOX: Final[
str
] = """{0:6f} {1:6f} xlo xhi
{0:6f} {1:6f} ylo yhi
{0:6f} {1:6f} zlo zhi"""
MOLAR_VOLUME: Final[Dict[str, float]] = {"lipf6": 18, "litfsi": 100} # empirical value
ALIAS: Final[Dict[str, str]] = {
"ethylene carbonate": "ec",
"ec": "ec",
"propylene carbonate": "pc",
"pc": "pc",
"dimethyl carbonate": "dmc",
"dmc": "dmc",
"diethyl carbonate": "dec",
"dec": "dec",
"ethyl methyl carbonate": "emc",
"emc": "emc",
"fluoroethylene carbonate": "fec",
"fec": "fec",
"vinyl carbonate": "vc",
"vinylene carbonate": "vc",
"vc": "vc",
"1,3-dioxolane": "dol",
"dioxolane": "dol",
"dol": "dol",
"ethylene glycol monomethyl ether": "egme",
"2-methoxyethanol": "egme",
"egme": "egme",
"dme": "dme",
"1,2-dimethoxyethane": "dme",
"glyme": "dme",
"monoglyme": "dme",
"2-methoxyethyl ether": "diglyme",
"diglyme": "diglyme",
"triglyme": "triglyme",
"tetraglyme": "tetraglyme",
"acetonitrile": "acn",
"acn": "acn",
"water": "water",
"h2o": "water",
}
# From PubChem
MOLAR_MASS: Final[Dict[str, float]] = {
"ec": 88.06,
"pc": 102.09,
"dec": 118.13,
"dmc": 90.08,
"emc": 104.05,
"fec": 106.05,
"vc": 86.05,
"dol": 74.08,
"egme": 76.09,
"dme": 90.12,
"diglyme": 134.17,
"triglyme": 178.23,
"tetraglyme": 222.28,
"acn": 41.05,
"water": 18.01528,
}
# from Sigma-Aldrich
DENSITY: Final[Dict[str, float]] = {
"ec": 1.321,
"pc": 1.204,
"dec": 0.975,
"dmc": 1.069,
"emc": 1.006,
"fec": 1.454, # from qm-ht.com
"vc": 1.355,
"dol": 1.06,
"dme": 0.867,
"egme": 0.965,
"diglyme": 0.939,
"triglyme": 0.986,
"tetraglyme": 1.009,
"acn": 0.786,
"water": 0.99707,
}
def atom_vec(atom1: Atom, atom2: Atom, dimension: np.ndarray) -> np.ndarray:
"""
Calculate the vector of the positions from atom2 to atom1.
Args:
atom1: Atom obj 1.
atom2: Atom obj 2.
dimension: box dimension.
Return:
The obtained vector
"""
vec = [0, 0, 0]
for i in range(3):
diff = atom1.position[i] - atom2.position[i]
if diff > dimension[i] / 2:
vec[i] = diff - dimension[i]
elif diff < -dimension[i] / 2:
vec[i] = diff + dimension[i]
else:
vec[i] = diff
return np.array(vec)
def position_vec(
pos1: Union[List[float], np.ndarray],
pos2: Union[List[float], np.ndarray],
dimension: Union[List[float], np.ndarray],
) -> np.ndarray:
"""
Calculate the vector from pos2 to pos2.
Args:
pos1: Array of 3d coordinates 1.
pos2: Array of 3d coordinates 2.
dimension: box dimension.
Return:
The obtained vector.
"""
vec: List[Union[int, float, np.floating]] = [0, 0, 0]
for i in range(3):
diff = pos1[i] - pos2[i]
if diff > dimension[i] / 2:
vec[i] = diff - dimension[i]
elif diff < -dimension[i] / 2:
vec[i] = diff + dimension[i]
else:
vec[i] = diff
return np.array(vec)
def angle(a: np.ndarray, b: np.ndarray, c: np.ndarray) -> np.floating:
"""
Calculate the angle between three atoms.
Args:
a: Coordinates of atom A.
b: Coordinates of atom B.
c: Coordinates of atom C.
Returns:
The degree A-B-C.
"""
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
cosine_angle = np.clip(cosine_angle, -1.0, 1.0)
angle_in_radian = np.arccos(cosine_angle)
return np.degrees(angle_in_radian)
def mass_to_name(masses: np.ndarray) -> np.ndarray:
"""
Map atom names to element names.
Args:
masses: The masses array of atoms in an ``Universe``.
Return:
The element name array.
"""
names = []
for mass in masses:
for item in MM_of_Elements.items():
if math.isclose(mass, item[1], abs_tol=0.1):
names.append(item[0])
assert len(masses) == len(names), "Invalid mass found."
return np.array(names)
def lmp_mass_to_name(df: pd.DataFrame) -> Dict[int, str]:
"""
Create a dict for mapping atom type id to element from the mass information.
Args:
df: The masses attribute from LammpsData object
Return:
The element dict.
"""
atoms = {}
for row in df.index:
for item in MM_of_Elements.items():
if math.isclose(df["mass"][row], item[1], abs_tol=0.01):
atoms[int(row)] = item[0]
return atoms
def assign_name(u: Universe, names: np.ndarray):
"""
Assign resnames to residues in a MDAnalysis.universe object. The function will not overwrite existing names.
Args:
u: The universe object to assign resnames to.
names: The element name array.
"""
u.add_TopologyAttr("name", values=names)
def assign_resname(u: Universe, res_dict: Dict[str, str]):
"""
Assign resnames to residues in a MDAnalysis.universe object. The function will not overwrite existing resnames.
Args:
u: The universe object to assign resnames to.
res_dict: A dictionary of resnames, where each resname is a key
and the corresponding values are the selection language.
"""
u.add_TopologyAttr("resname")
for key, val in res_dict.items():
res_group = u.select_atoms(val)
res_names = res_group.residues.resnames
res_names[res_names == ""] = key
res_group.residues.resnames = res_names
def res_dict_from_select_dict(u: Universe, select_dict: Dict[str, str]) -> Dict[str, str]:
"""
Infer res_dict (residue selection) from select_dict (atom selection) in a MDAnalysis.universe object.
Args:
u: The universe object to assign resnames to.
select_dict: A dictionary of atom species, where each atom species name is a key
and the corresponding values are the selection language.
return:
A dictionary of resnames.
"""
saved_select = []
res_dict = {}
for key, val in select_dict.items():
res_select = "same resid as (" + val + ")"
res_group = u.select_atoms(res_select)
if key in ["cation", "anion"] or res_group not in saved_select:
saved_select.append(res_group)
res_dict[key] = res_select
if (
"cation" in res_dict
and "anion" in res_dict
and u.select_atoms(res_dict.get("cation")) == u.select_atoms(res_dict.get("anion"))
):
res_dict.pop("anion")
res_dict["salt"] = res_dict.pop("cation")
return res_dict
def res_dict_from_datafile(filename: str) -> Dict[str, str]:
"""
Infer res_dict (residue selection) from a LAMMPS data file.
Args:
filename: Path to the data file. The data file must be generated by a CombinedData object.
return:
A dictionary of resnames.
"""
res_dict = {}
with open(filename, "r") as f:
lines = f.readlines()
if lines[0] == "Generated by pymatgen.io.lammps.data.LammpsData\n" and lines[1].startswith("#"):
elyte_info = re.findall(r"\w+", lines[1])
it = iter(elyte_info)
idx = 1
for num in it:
name = next(it)
if name.isnumeric():
frag = int(name)
name = next(it)
names = [name + c for c in string.ascii_lowercase[0:frag]]
start = idx
idx += int(num) * frag
for i, n in enumerate(names):
res_dict[n] = "same mass as resid " + str(start + i)
else:
start = idx
idx += int(num)
end = idx
res_dict[name] = "resid " + str(start) + "-" + str(end - 1)
return res_dict
raise ValueError("The LAMMPS data file should be generated by pymatgen.io.lammps.data.")
def res_dict_from_lammpsdata(lammps_data: CombinedData) -> Dict[str, str]:
"""
Infer res_dict (residue selection) from a LAMMPS data file.
Args:
lammps_data: A CombinedData object.
return:
A dictionary of resnames.
"""
assert isinstance(lammps_data, CombinedData)
idx = 1
res_dict = {}
if hasattr(lammps_data, "frags"):
for name, num, frag in zip(lammps_data.names, lammps_data.nums, lammps_data.frags):
if frag == 1:
start = idx
idx += num
end = idx
res_dict[name] = "resid " + str(start) + "-" + str(end - 1)
else:
names = [name + c for c in string.ascii_lowercase[0:frag]]
start = idx
idx += int(num) * frag
for i, n in enumerate(names):
res_dict[n] = "same mass as resid " + str(start + i)
else:
for name, num in zip(lammps_data.names, lammps_data.nums):
start = idx
idx += num
end = idx
res_dict[name] = "resid " + str(start) + "-" + str(end - 1)
return res_dict
def select_dict_from_resname(u: Universe) -> Dict[str, str]:
"""
Infer select_dict (possibly interested atom species selection) from resnames in a MDAnalysis.universe object.
The resname must be pre-assigned already.
Args:
u: The universe object to work with.
return:
A dictionary of atom species.
"""
select_dict: Dict[str, str] = {}
resnames = np.unique(u.residues.resnames)
for resname in resnames:
if resname == "":
continue
residue = u.select_atoms("resname " + resname).residues[0]
if np.isclose(residue.charge, 0, atol=1e-5): # np.sum(residue.atoms.charges)
if len(residue.atoms.fragments) == 2:
for i, frag in enumerate(residue.atoms.fragments):
charge = np.sum(frag.charges)
if charge > 0.001:
extract_atom_from_ion(True, frag, select_dict)
elif charge < -0.001:
extract_atom_from_ion(False, frag, select_dict)
else:
extract_atom_from_molecule(resname, frag, select_dict, number=i + 1)
elif len(residue.atoms.fragments) >= 2:
cation_number = 1
anion_number = 1
molecule_number = 1
for frag in residue.atoms.fragments:
charge = np.sum(frag.charges)
if charge > 0.001:
extract_atom_from_ion(True, frag, select_dict, cation_number)
cation_number += 1
elif charge < -0.001:
extract_atom_from_ion(False, frag, select_dict, anion_number)
anion_number += 1
else:
extract_atom_from_molecule(resname, frag, select_dict, molecule_number)
molecule_number += 1
else:
extract_atom_from_molecule(resname, residue, select_dict)
elif residue.charge > 0:
extract_atom_from_ion(True, residue, select_dict)
else:
extract_atom_from_ion(False, residue, select_dict)
return select_dict
def extract_atom_from_ion(positive: bool, ion: Union[Residue, AtomGroup], select_dict: Dict[str, str], number: int = 0):
"""
Assign the most most charged atom and/or one unique atom in the ion into select_dict.
Args:
positive: Whether the charge of ion is positive. Otherwise negative. Default to True.
ion: Residue or AtomGroup
select_dict: A dictionary of atom species, where each atom species name is a key
and the corresponding values are the selection language.
number: The serial number of the ion.
"""
if positive:
if number == 0:
cation_name = "cation"
else:
cation_name = "cation_" + str(number)
if len(ion.atoms.types) == 1:
select_dict[cation_name] = "type " + ion.atoms.types[0]
else:
# The most positively charged atom in the cation
pos_center = ion.atoms[np.argmax(ion.atoms.charges)]
unique_types = np.unique(ion.atoms.types, return_counts=True)
# One unique atom in the cation
uni_center = unique_types[0][np.argmin(unique_types[1])]
if pos_center.type == uni_center:
select_dict[cation_name] = "type " + uni_center
else:
select_dict[cation_name + "_" + pos_center.name + pos_center.type] = "type " + pos_center.type
select_dict[cation_name] = "type " + uni_center
else:
if number == 0:
anion_name = "anion"
else:
anion_name = "anion_" + str(number)
if len(ion.atoms.types) == 1:
select_dict[anion_name] = "type " + ion.atoms.types[0]
else:
# The most negatively charged atom in the anion
neg_center = ion.atoms[np.argmin(ion.atoms.charges)]
unique_types = np.unique(ion.atoms.types, return_counts=True)
# One unique atom in the anion
uni_center = unique_types[0][np.argmin(unique_types[1])]
if neg_center.type == uni_center:
select_dict[anion_name] = "type " + uni_center
else:
select_dict[anion_name + "_" + neg_center.name + neg_center.type] = "type " + neg_center.type
select_dict[anion_name] = "type " + uni_center
def extract_atom_from_molecule(
resname: str, molecule: Union[Residue, AtomGroup], select_dict: Dict[str, str], number: int = 0
):
"""
Assign the most negatively charged atom in the molecule into select_dict
Args:
resname: The name of the molecule
molecule: The Residue or AtomGroup obj of the molecule.
select_dict: A dictionary of atom species, where each atom species name is a key
and the corresponding values are the selection language.
number: The serial number of the molecule under the name of resname.
"""
# neg_center = residue.atoms[np.argmin(residue.atoms.charges)]
# select_dict[resname + "-" + neg_center.name + neg_center.type] = "type " + neg_center.type
# pos_center = residue.atoms[np.argmax(residue.atoms.charges)]
# select_dict[resname + "+" + pos_center.name + pos_center.type] = "type " + pos_center.type
# The most negatively charged atom in the anion
if number > 0:
resname = resname + "_" + str(number)
neg_center = molecule.atoms[np.argmin(molecule.atoms.charges)]
select_dict[resname] = "type " + neg_center.type
def ff_parser(ff_dir: str, xyz_dir: str) -> str:
"""
A parser to convert a force field field from Maestro format
to LAMMPS data format.
Args:
ff_dir: The path to the Maestro force field file.
xyz_dir: The path to the xyz structure file.
Return:
The output LAMMPS data string.
"""
with open(xyz_dir, "r") as f_xyz:
molecule = | pd.read_table(f_xyz, skiprows=2, delim_whitespace=True, names=["atom", "x", "y", "z"]) | pandas.read_table |
import pandas as pd
import numpy as np
# import tensorflow as tf
import tensorflow.compat.v1 as tf
def data_prepare():
tf.disable_v2_behavior()
ratings_df = | pd.read_csv('./ml-latest-small/ratings.csv') | pandas.read_csv |
import logging, os, time, multiprocessing, sys, signal
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
import gym
import pybullet, pybullet_envs, pybullet_data
import numpy as np
import pandas as pd
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines.clac.policies import MlpPolicy as CLAC_MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import SAC, CLAC
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#ENVIRONMENT_NAMES = [Walker2DBulletEnv-v0, Robots/AntBulletEnv-v0 , "HopperBulletEnv-v0" , "HumanoidBulletEnv-v0", "HalfCheetahBulletEnv-v0"]
#FOLDERS = [ "Robots/AntBulletEnv" , "Robots/HopperBulletEnv" , "Robots/HumanoidBulletEnv", "Robots/HumanoidFlagrunBulletEnv"]
#physicsClient = pybullet.connect(pybullet.GUI) #or p.DIRECT for non-graphical version
#pybullet.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
# Robots
# RobotsGen
# RobotsExtremeGen
FOLDER = "Robots/AntBulletEnv"
# Create target Directory if don't exist
if not os.path.exists(FOLDER):
os.mkdir(FOLDER)
if not os.path.exists(FOLDER + "/models"):
os.mkdir(FOLDER + "/models")
if not os.path.exists(FOLDER + "/results"):
os.mkdir(FOLDER + "/results")
if not os.path.exists(FOLDER + "/features"):
os.mkdir(FOLDER + "/features")
NUM_RESAMPLES = 0
NUM_TRAINING_STEPS = 1000000
ENVIRONMENT_NAME = "AntBulletEnv-v0"
RANDOMIZATION_LEVEL = "None"
#RANDOMIZATION_LEVEL = "Test"
#RANDOMIZATION_LEVEL = "Normal"
#RANDOMIZATION_LEVEL = "Extreme"
CLAC_COEFS = [2.0]
agent_step = 9
for coef_index in range(len(CLAC_COEFS)):
mut_coef = CLAC_COEFS[coef_index]
if(agent_step == 1):
print(mut_coef, " ", NUM_TRAINING_STEPS, " ", ENVIRONMENT_NAME, " ", FOLDER)
features = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python3
import json
import sys
import subprocess
import pandas
import os
import shutil
import pprint
from deepdiff import DeepDiff
current_report = "/opt/ptx/trivy/reports_raw/current_report.json"
last_known = "/opt/ptx/trivy/last_known/last_output.json"
severity = "HIGH,CRITICAL"
def run_cmd(cmd):
cmd = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
cmd = cmd.stdout.read()
return cmd
docker_images_l = run_cmd("docker image ls -q").split()
docker_images_l = [ image.decode("utf-8") for image in docker_images_l ]
js_output= []
for image_id in docker_images_l:
cmd = run_cmd("docker image inspect " + image_id)
get_docker_image_info = json.loads(cmd)
name = "".join(get_docker_image_info[0]["RepoTags"])
tempd = { "image_name": name, "image_id": image_id }
### perform the Trivy scan on the image
try:
report = run_cmd("trivy -f json -q --severity " + severity + " " + name)
except Exception as e:
pass
print(str(e))
continue
tempd["report"] = json.loads(report)
### We may encouner multiple vulnerabilities list from different subpackages
all_vuln = []
for result in tempd["report"]["Results"]:
all_vuln.append(result["Vulnerabilities"])
tempd["report"] = [i for l in all_vuln for i in l]
js_output.append(tempd)
current_report_content = js_output
def analyze_report(vulnerabilities):
temp = | pandas.DataFrame(vulnerabilities) | pandas.DataFrame |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
import datetime
import operator
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_120
from cudf.testing import _utils as utils
from cudf.testing._utils import assert_eq, assert_exceptions_equal
_TIMEDELTA_DATA = [
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[
136457654736252,
134736784364431,
245345345545332,
223432411,
2343241,
3634548734,
23234,
],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
]
_TIMEDELTA_DATA_NON_OVERFLOW = [
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
]
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_create(data, dtype):
if dtype not in ("timedelta64[ns]"):
pytest.skip(
"Bug in pandas" "https://github.com/pandas-dev/pandas/issues/35465"
)
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.Series(data, dtype=dtype)
assert_eq(psr, gsr)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize("cast_dtype", ["int64", "category"])
def test_timedelta_from_typecast(data, dtype, cast_dtype):
if dtype not in ("timedelta64[ns]"):
pytest.skip(
"Bug in pandas" "https://github.com/pandas-dev/pandas/issues/35465"
)
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.Series(data, dtype=dtype)
if cast_dtype == "int64":
assert_eq(psr.values.view(cast_dtype), gsr.astype(cast_dtype).values)
else:
assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype))
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("cast_dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_to_typecast(data, cast_dtype):
psr = pd.Series(cp.asnumpy(data) if isinstance(data, cp.ndarray) else data)
gsr = cudf.Series(data)
assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype))
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_from_pandas(data, dtype):
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.from_pandas(psr)
assert_eq(psr, gsr)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_to_numpy(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
expected = np.array(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
expected = expected[~np.isnan(expected)]
actual = gsr.dropna().to_numpy()
np.testing.assert_array_equal(expected, actual)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_to_pandas(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
expected = np.array(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
expected = pd.Series(expected)
actual = gsr.to_pandas()
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data,other",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"ops",
[
"eq",
"ne",
"lt",
"gt",
"le",
"ge",
"add",
"radd",
"sub",
"rsub",
"floordiv",
"truediv",
"mod",
],
)
def test_timedelta_ops_misc_inputs(data, other, dtype, ops):
gsr = cudf.Series(data, dtype=dtype)
other_gsr = cudf.Series(other, dtype=dtype)
psr = gsr.to_pandas()
other_psr = other_gsr.to_pandas()
expected = getattr(psr, ops)(other_psr)
actual = getattr(gsr, ops)(other_gsr)
if ops in ("eq", "lt", "gt", "le", "ge"):
actual = actual.fillna(False)
elif ops == "ne":
actual = actual.fillna(True)
if ops == "floordiv":
expected[actual.isna().to_pandas()] = np.nan
assert_eq(expected, actual)
@pytest.mark.parametrize(
"datetime_data,timedelta_data",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
(
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
],
)
@pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES)
@pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"ops",
["add", "sub"],
)
def test_timedelta_ops_datetime_inputs(
datetime_data, timedelta_data, datetime_dtype, timedelta_dtype, ops
):
gsr_datetime = cudf.Series(datetime_data, dtype=datetime_dtype)
gsr_timedelta = cudf.Series(timedelta_data, dtype=timedelta_dtype)
psr_datetime = gsr_datetime.to_pandas()
psr_timedelta = gsr_timedelta.to_pandas()
expected = getattr(psr_datetime, ops)(psr_timedelta)
actual = getattr(gsr_datetime, ops)(gsr_timedelta)
assert_eq(expected, actual)
if ops == "add":
expected = getattr(psr_timedelta, ops)(psr_datetime)
actual = getattr(gsr_timedelta, ops)(gsr_datetime)
assert_eq(expected, actual)
elif ops == "sub":
assert_exceptions_equal(
lfunc=operator.sub,
rfunc=operator.sub,
lfunc_args_and_kwargs=([psr_timedelta, psr_datetime],),
rfunc_args_and_kwargs=([gsr_timedelta, gsr_datetime],),
compare_error_message=False,
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(
{
"A": pd.Series(pd.date_range("2012-1-1", periods=3, freq="D")),
"B": pd.Series([ | pd.Timedelta(days=i) | pandas.Timedelta |
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from collections import Counter
import csv
import itertools as IT
import operator
def preprocessing():
train = pd.read_csv('original_dataset.csv')
newdict = {'status':[],'o':[],'c':[],'e':[],'a':[],'n':[]}
train.sort_values(by="#AUTHID")
authid = ''
for row in train.itertuples():
if row[1]==authid:
newdict['status'][-1] += ' ' + row[2]
else:
newdict['status'].append(row[2])
newdict['o'].append((row[12]))
newdict['c'].append((row[11]))
newdict['e'].append((row[8]))
newdict['a'].append((row[10]))
newdict['n'].append((row[9]))
authid = row[1]
df= | pd.DataFrame(newdict) | pandas.DataFrame |
import numpy as np
import pandas as pd
from decisionengine.framework.modules import Source
PRODUCES = ["provisioner_resources"]
class ProvisionerResourceList(Source.Source):
def __init__(self, *args, **kwargs):
pass
def produces(self, schema_id_list):
return PRODUCES
# The DataBlock given to the source is t=0
def acquire(self):
resource_list = [
{"ResourceName": "AWS1", "ResourceCpus": 2,
"ResourceMemory": 8, "EC2Type": "m4.large"},
{"ResourceName": "AWS2", "ResourceCpus": 4,
"ResourceMemory": 16, "EC2Type": "m4.xlarge"},
{"ResourceName": "AWS3", "ResourceCpus": 2,
"ResourceMemory": 7.5, "EC2Type": "m3.large"},
{"ResourceName": "AWS4", "ResourceCpus": 4,
"ResourceMemory": 15, "EC2Type": "m3.xlarge"},
{"ResourceName": "AWS5", "ResourceCpus": 4,
"ResourceMemory": 7.5, "EC2Type": "c4.xlarge"}
]
resource_keys = resource_list[0].keys()
pandas_data = {}
for key in resource_keys:
pandas_data[key] = pd.Series([d[key] for d in resource_list])
return {"provisioner_resources": | pd.DataFrame(pandas_data) | pandas.DataFrame |
"""
Updated on Thursday December 10th, 2020
@author: <NAME>
The object of this script is to perform the following tasks:
1. Grab the current List of S&P 500 Company Tickers
2. Using the Yahoo Finance API, Download all the data for a given time period
and save them to a csv. (open, high, low, close,volume, dividends, stock splits)
3. Update the data when called. In this case the update period will be calculated.
If the data has not been updated in greater than 3 months, the data should be
refreshed using the original grab function, as opposed to the update function.
4. Using any csv list of stock tickers, download or update the data accordingly.
5. Read a CSV with stock tickers and import them into a csv file for use as above.
Notes:
Yahoo Finance acceptable time periods:
valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#imports
import bs4 as bs
import requests
import os
import time
import yfinance as yf
import pandas as pd
from datetime import datetime
import glob as g
import sys
'''
Function Name: save_sp_500_tickers(data_directory)
Function Purpose: To get the current list of S&P500 ticker Symbols from wikipedia
and save them to a file tickers.csv
Arguments: data_directory: A string representing the data directory where csv files containing tickers are stored
Output: sp500tickers.csv
'''
def save_sp_500_tickers(data_directory):
resp=requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup=bs.BeautifulSoup(resp.text, 'lxml')
table=soup.find('table',{'class': 'wikitable sortable'})
tickers=[]
tickers.append("Ticker")
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text.replace('.','-')
ticker = ticker[:-1]
tickers.append(ticker)
if os.path.exists(data_directory+'sp500tickers.csv'):
os.remove(data_directory+'sp500tickers.csv')
with open(data_directory+'sp500tickers.csv',"a",newline='') as f:
tickersDf= | pd.DataFrame(tickers) | pandas.DataFrame |
# %load training_functions.py
import pandas as pd
import os
import numpy as np
from datetime import datetime
import json
from os import listdir
from os.path import isfile, join
def pdf(data):
return pd.DataFrame(data)
def read_csv_power_file(file_path, filename):
csv_path = os.path.join(file_path, filename)
df = | pd.read_csv(csv_path) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from copy import deepcopy
from sklearn.utils import shuffle
from tqdm import tqdm
############ Make test networks ############
def make_triangonal_net():
"""
Make a triangonal network.
"""
dict_nodes = {'x': [1,3,2],
'y': [2,2,1],
'a': [1,0,0],
'b': [0,1,0],
'c': [0,0,1]}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = [[0,1],
[1,2],
[2,0]]
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_trigonal_net():
"""
Make a trigonal network.
"""
dict_nodes = {'x': [1,3,2,0,4,2],
'y': [2,2,1,3,3,0],
'a': [1,0,0,1,0,0],
'b': [0,1,0,0,1,0],
'c': [0,0,1,0,0,1]}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = [[0,1],
[1,2],
[2,0],
[0,3],
[1,4],
[2,5]]
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_P_net():
"""
Make a P-shaped network.
"""
dict_nodes = {'x': [0,0,0,0,1,1],
'y': [0,1,2,3,3,2],
'a': [1,0,0,0,0,0],
'b': [0,0,0,0,1,0],
'c': [0,1,1,1,0,1]}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = [[0,1],
[1,2],
[2,3],
[3,4],
[4,5],
[5,2]]
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_high_assort_net():
"""
Make a highly assortative network.
"""
dict_nodes = {'x': np.arange(12).astype(int),
'y': np.zeros(12).astype(int),
'a': [1] * 4 + [0] * 8,
'b': [0] * 4 + [1] * 4 + [0] * 4,
'c': [0] * 8 + [1] * 4}
nodes = pd.DataFrame.from_dict(dict_nodes)
edges_block = np.vstack((np.arange(3), np.arange(3) +1)).T
data_edges = np.vstack((edges_block, edges_block + 4, edges_block + 8))
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_high_disassort_net():
"""
Make a highly dissassortative network.
"""
dict_nodes = {'x': [1,2,3,4,4,4,3,2,1,0,0,0],
'y': [0,0,0,1,2,3,4,4,4,3,2,1],
'a': [1,0,0] * 4,
'b': [0,1,0] * 4,
'c': [0,0,1] * 4}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = np.vstack((np.arange(12), np.roll(np.arange(12), -1))).T
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_random_graph_2libs(nb_nodes=100, p_connect=0.1, attributes=['a', 'b', 'c'], multi_mod=False):
import networkx as nx
# initialize the network
G = nx.fast_gnp_random_graph(nb_nodes, p_connect, directed=False)
pos = nx.kamada_kawai_layout(G)
nodes = pd.DataFrame.from_dict(pos, orient='index', columns=['x','y'])
edges = pd.DataFrame(list(G.edges), columns=['source', 'target'])
# set attributes
if multi_mod:
nodes_class = np.random.randint(0, 2, size=(nb_nodes, len(attributes))).astype(bool)
nodes = nodes.join(pd.DataFrame(nodes_class, index=nodes.index, columns=attributes))
else:
nodes_class = np.random.choice(attributes, nb_nodes)
nodes = nodes.join(pd.DataFrame(nodes_class, index=nodes.index, columns=['nodes_class']))
nodes = nodes.join(pd.get_dummies(nodes['nodes_class']))
if multi_mod:
for col in attributes:
# nx.set_node_attributes(G, df_nodes[col].to_dict(), col.replace('+','AND')) # only for glm extension file
nx.set_node_attributes(G, nodes[col].to_dict(), col)
else:
nx.set_node_attributes(G, nodes['nodes_class'].to_dict(), 'nodes_class')
return nodes, edges, G
############ Assortativity ############
def count_edges_undirected(nodes, edges, attributes):
"""Compute the count of edges whose end nodes correspond to given attributes.
Parameters
----------
nodes : dataframe
Attributes of all nodes
edges : dataframe
Edges between nodes given by their index
attributes: list
The attributes of nodes whose edges are selected
Returns
-------
count : int
Count of edges
"""
pairs = np.logical_or(np.logical_and(nodes.loc[edges['source'], attributes[0]].values, nodes.loc[edges['target'], attributes[1]].values),
np.logical_and(nodes.loc[edges['target'], attributes[0]].values, nodes.loc[edges['source'], attributes[1]].values))
count = pairs.sum()
return count
def count_edges_directed(nodes, edges, attributes):
"""Compute the count of edges whose end nodes correspond to given attributes.
Parameters
----------
nodes : dataframe
Attributes of all nodes
edges : dataframe
Edges between nodes given by their index
attributes: list
The attributes of nodes whose edges are selected
Returns
-------
count : int
Count of edges
"""
pairs = np.logical_and(nodes.loc[edges['source'], attributes[0]].values, nodes.loc[edges['target'], attributes[1]].values)
count = pairs.sum()
return count
def mixing_matrix(nodes, edges, attributes, normalized=True, double_diag=True):
"""Compute the mixing matrix of a network described by its `nodes` and `edges`.
Parameters
----------
nodes : dataframe
Attributes of all nodes
edges : dataframe
Edges between nodes given by their index
attributes: list
Categorical attributes considered in the mixing matrix
normalized : bool (default=True)
Return counts if False or probabilities if True.
double_diag : bool (default=True)
If True elements of the diagonal are doubled like in NetworkX or iGraph
Returns
-------
mixmat : array
Mixing matrix
"""
mixmat = np.zeros((len(attributes), len(attributes)))
for i in range(len(attributes)):
for j in range(i+1):
mixmat[i, j] = count_edges_undirected(nodes, edges, attributes=[attributes[i],attributes[j]])
mixmat[j, i] = mixmat[i, j]
if double_diag:
for i in range(len(attributes)):
mixmat[i, i] += mixmat[i, i]
if normalized:
mixmat = mixmat / mixmat.sum()
return mixmat
# NetworkX code:
def attribute_ac(M):
"""Compute assortativity for attribute matrix M.
Parameters
----------
M : numpy array or matrix
Attribute mixing matrix.
Notes
-----
This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] <NAME>, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
try:
import numpy
except ImportError:
raise ImportError(
"attribute_assortativity requires NumPy: http://scipy.org/ ")
if M.sum() != 1.0:
M = M / float(M.sum())
M = numpy.asmatrix(M)
s = (M * M).sum()
t = M.trace()
r = (t - s) / (1 - s)
return float(r)
def mixmat_to_df(mixmat, attributes):
"""
Make a dataframe of a mixing matrix.
"""
return pd.DataFrame(mixmat, columns=attributes, index=attributes)
def mixmat_to_columns(mixmat):
"""
Flattens a mixing matrix taking only elements of the lower triangle and diagonal.
To revert this use `series_to_mixmat`.
"""
N = mixmat.shape[0]
val = []
for i in range(N):
for j in range(i+1):
val.append(mixmat[i,j])
return val
def series_to_mixmat(series, medfix=' - ', discard=' Z'):
"""
Convert a 1D pandas series into a 2D dataframe.
To revert this use `mixmat_to_columns`.
"""
N = series.size
combi = [[x.split(medfix)[0].replace(discard, ''), x.split(medfix)[1].replace(discard, '')] for x in series.index]
# get unique elements of the list of mists
from itertools import chain
uniq = [*{*chain.from_iterable(combi)}]
mat = | pd.DataFrame(data=None, index=uniq, columns=uniq) | pandas.DataFrame |
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QTabWidget
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QTableWidget
from PyQt5 import QtGui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
class LearnerTabs(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle("Learning Analytics Dashboard")
tabwidget = QTabWidget()
tabwidget.addTab(SupportTab(), "Support")
tabwidget.addTab(AnalyticsTab(), "Analytics")
tabwidget.addTab(TrackerTab(), "Tracking")
vboxLayout = QVBoxLayout()
vboxLayout.addWidget(tabwidget)
self.setLayout(vboxLayout)
class SupportTab(QWidget):
def __init__(self):
super().__init__()
filenameLabel = QLabel("Name:")
fileNameEdit = QLineEdit()
dob = QLabel("Birth Date:")
dobedit = QLineEdit()
age = QLabel("Age:")
ageedit = QLineEdit()
PhoneNu = QLabel("Phone:")
phonedit = QLineEdit()
ftablayout = QVBoxLayout()
ftablayout.addWidget(filenameLabel)
ftablayout.addWidget(fileNameEdit)
ftablayout.addWidget(dob)
ftablayout.addWidget(dobedit)
ftablayout.addWidget(age)
ftablayout.addWidget(ageedit)
ftablayout.addWidget(PhoneNu)
ftablayout.addWidget(phonedit)
self.setLayout(ftablayout)
class AnalyticsTab(QWidget):
def __init__(self, parent=None):
super(AnalyticsTab, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
self.adjustSize()
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
self.module_idx = 0
self.tracker_idx = 0
self.comboBox = QComboBox()
self.comboBox.addItem('Registration Tracking Board')
self.comboBox.addItem('Learning Progress Tracking Board')
self.comboBox.addItem('Performance Tracking Board')
self.comboBox.addItem('Learning Behavior Tracking Board')
self.comboBox.currentIndexChanged.connect(self.select_tracker)
course_modules = ['AAA','BBB','CCC','DDD','EEE','FFF','GGG']
self.moduleBox = QComboBox()
for module in course_modules:
self.moduleBox.addItem(f'Course Module - {module}')
self.moduleBox.currentIndexChanged.connect(self.select_module)
# Just some button connected to `plot` method
self.button = QPushButton('Plot')
self.button.clicked.connect(self.plot)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.comboBox)
layout.addWidget(self.moduleBox)
layout.addWidget(self.button)
self.setLayout(layout)
def select_module(self, i):
self.module_idx = i
def select_tracker(self, i):
self.tracker_idx = i
def regi_hist(self,course):
df_student_regi = pd.read_csv("../../data/studentRegistration.csv")
group = df_student_regi.groupby(['code_module']).get_group(course)
ax = group['date_registration'].hist(cumulative=True, histtype='bar')
ax.set_xlabel('registration date (relative to day 0)')
ax.set_ylabel('learners (cumulative)')
ax.set_title(f'Course Module - {course}')
def progress_plot(self,course):
df_stu_assess = | pd.read_csv('../../data/studentAssessment.csv') | pandas.read_csv |
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typically practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, target=140):
self.quest = | pd.read_csv(CONTROL_QUEST) | pandas.read_csv |
import datetime as dt
import pandas as pd
import numpy as np
def OpenFace(openface_features, PID, EXP):
"""
Tidy up OpenFace features in pandas data.frame to be stored in sqlite
database:
- Participant and experiment identifiers are added as columns
- Underscores in column names are removed, because sqlite does not like
underscores in column names.
- Only column names with 'AU' in the column name are considered relevant
features and kept, the rest is removed.
Parameters
----------
video_features : pandas.core.frame.DataFrame
Data frame with columns participant_id (str), experiment_id (str),
timestamp (datetime64), and columns for the OpenFace derived
video features:
AU01r (float64), AU02r (float64), AU01c (float64), AU02c (float64)
PID : str
Participant identifier
EXP : str
Experiment identifier.
Returns
-------
video_features : pandas.core.frame.DataFrame
New data frame
"""
# tidy up data frame:
filter_col = [col for col in openface_features if col.startswith('AU')]
filter_col.insert(0,'time')
filter_col.insert(0,'participant_id')
filter_col.insert(0,'experiment_id')
openface_features['participant_id'] = PID
openface_features['experiment_id'] = EXP
openface_features = openface_features[filter_col]
openface_features.columns = openface_features.columns.str.replace('_', '')
openface_features = openface_features.rename(columns = {'experimentid':'experiment_id'})
openface_features = openface_features.rename(columns = {'participantid':'participant_id'})
return openface_features
def Librosa(librosa_features, PID, EXP):
"""
Tidy up Librosa features in pandas data.frame to be stored in sqlite
database:
- Participant and experiment identifiers are added as columns
- Underscores in column names are removed, because sqlite does not like
underscores in column names.
- Column zrc is renamed as zcrate
- Only column names 'participant_id','experiment_id','time','pitch',
'rmse', 'zcrate are kept.
Parameters
----------
audio_features : pandas.core.frame.DataFrame
Data frame with columns participant_id (str), experiment_id (str),
timestamp (datetime64), and columns for the librosa derived
audio features:
pitch (float64), rmse (float32), zcrate (float64)
PID : str
Participant identifier
EXP : str
Experiment identifier.
Returns
-------
audio_features : pandas.core.frame.DataFrame
New data frame.
"""
librosa_features['participant_id'] = PID
librosa_features['experiment_id'] = EXP
librosa_features = librosa_features.rename(columns={'zrc': 'zcrate'})
librosa_features.drop(['timestamp'], axis = 1, inplace = True, errors = 'ignore')
librosa_features = librosa_features[['participant_id','experiment_id','time','pitch','rmse','zcrate']]
return librosa_features
def downsample(time_series,res = '0.2S'):
"""
Downsamples time_series pandas data frame
Parameters
----------
time_series: pandas.core.frame.DataFrame
A pandas data.frame for which there is a column 'timestamps'
with numeric timestamps.
res: str
Desired resolution in time after downsampling used by 'resample'
function.
Returns
-------
time_series: pandas.core.frame.DataFrame
New resampled data frame.
"""
Nvalues = len(time_series.index)
samplerate = 1/ ((time_series.timestamp[Nvalues-1] - time_series.timestamp[0]) / Nvalues)
timestart = dt.datetime(1970, 1, 1, 0, 0, 0, 0) #dt.datetime.now()
start = pd.Timestamp(timestart)
end = pd.Timestamp(timestart + dt.timedelta(seconds=Nvalues/samplerate))
t = np.linspace(start.value, end.value, Nvalues)
t = | pd.to_datetime(t) | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
from xgboost import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
# read data sets
train = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\train.csv")
test = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\test.csv")
campaign_data = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\campaign_data.csv")
coupon_item_mapping = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\coupon_item_mapping.csv")
customer_demographics = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\customer_demographics.csv")
customer_transaction_data = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\customer_transaction_data.csv")
item_data = | pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\item_data.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Scenario
#
# As an analyst for OilyGiant mining company our task is to find the best place for a new well.
#
# We will use several techniques, including machine learning and bootstrapping, to select the region with the highest profit margin.
#
# Machine learning prediction question: What is the predicted volume of reserves in thousand barrels for each region?
#
# Target (response): product (volume of reserves in thousand barrels)
#
# Useful Features (predictor variables): f0, f1, f2 unknown features important to analysis
#
# Datasets: geo_data_0.csv, geo_data_1.csv, geo_data_2.csv
#
# Analysis done December 2021
# In[1]:
# import libraries
# sklearn used for basic machine learning
from sklearn.linear_model import LinearRegression
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from sklearn import metrics
import pandas as pd
import numpy as np
import math
from scipy import stats as st
from numpy.random import RandomState
state = RandomState(12345)
# import sys and insert code to ignore warnings
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# # Step 1: Download and prepare the data
# In[2]:
# load the data for region0
try:
region0 = pd.read_csv('/datasets/geo_data_0.csv')
except:
print('ERROR: Unable to find or access file.')
# load the data for region1
try:
region1 = pd.read_csv('/datasets/geo_data_1.csv')
except:
print('ERROR: Unable to find or access file.')
# load the data for region2
try:
region2 = | pd.read_csv('/datasets/geo_data_2.csv') | pandas.read_csv |
__author__ = "<NAME>"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import os
from matplotlib import pyplot as plt
import numpy as np
from scipy.stats import linregress
import datetime
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
from delphin_6_automation.delphin_setup import damage_models
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
# Functions
def acronym_table(path):
table_ = pd.read_excel(path).dropna()
return table_
def load_results(folder, quantity, name, result_folder):
df = pd.DataFrame()
indices = 0
result_folder_ = f"{folder}/{result_folder}/results"
for file in os.listdir(result_folder_):
if quantity in file:
value_dict = delphin_parser.d6o_to_dict(result_folder_, file)[0]['result']
cell = list(value_dict.keys())[0]
values = value_dict[cell]
df[file.split('.')[0][-1]] = values[8760:]
indices += 1
df.index = pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(values[8760:]))
df.columns = pd.MultiIndex.from_arrays([df.columns, [name, ] * indices],
names=['location', 'brick type'])
df = df.sort_index(axis=1)
return df
def abs_diff(x1, x2):
return x2 - x1
def rel_diff(x1, x2):
return np.absolute(x2 - x1) / np.absolute(x2) * 100
def compute_differences(data_frame):
for column in data_frame.columns.levels[0]:
data_frame[column, 'brick', 'rel_diff'] = rel_diff(data_frame[column, 'brick', 'out'],
data_frame[column, '2d', 'out'])
data_frame[column, 'brick', 'abs_diff'] = abs_diff(data_frame[column, 'brick', 'out'],
data_frame[column, '2d', 'out'])
data_frame[column, 'mortar', 'rel_diff'] = rel_diff(data_frame[column, 'mortar', 'out'],
data_frame[column, '2d', 'out'])
data_frame[column, 'mortar', 'abs_diff'] = abs_diff(data_frame[column, 'mortar', 'out'],
data_frame[column, '2d', 'out'])
data_frame = data_frame.sort_index(axis=1)
return data_frame
def save_to_hdf(data_frame, quantity, folder):
hdf_file = folder + f'/{quantity}.h5'
data_frame.to_hdf(hdf_file, key='data', append=True)
def process_results(excel_file, result_folder, out_folder):
table = acronym_table(excel_file)
quantities = ['heat loss', 'temperature', 'relative humidity', 'moisture content', 'moisture integral']
for quantity in quantities:
big_table = pd.DataFrame()
for index in range(table.shape[0]):
new_table = load_results(result_folder, quantity, table['Brick Type'][index], table['Result IDs'][index])
big_table = pd.concat([big_table, new_table], axis=1)
save_to_hdf(big_table, quantity, out_folder)
def create_totals(out_folder, quantity):
hdf_file = out_folder + '/' + quantity + '.h5'
store = pd.HDFStore(hdf_file)
store_names = {'1a': {'48': {'insulated': [], 'uninsulated': []},
'36': {'insulated': [], 'uninsulated': []},
'24': {'insulated': [], 'uninsulated': []}
},
'2a': {'48': {'insulated': [], 'uninsulated': []},
'36': {'insulated': [], 'uninsulated': []},
'24': {'insulated': [], 'uninsulated': []}
},
'3a': {'48': {'insulated': [], 'uninsulated': []},
'36': {'insulated': [], 'uninsulated': []},
'24': {'insulated': [], 'uninsulated': []}
},
'4a': {'48': {'insulated': [], 'uninsulated': []},
'36': {'insulated': [], 'uninsulated': []},
'24': {'insulated': [], 'uninsulated': []}
}}
for group in store.groups():
group_name = group._v_name
if '_4a' in group_name:
if '_48_' in group_name:
if '_insulated_' in group_name:
store_names['4a']['48']['insulated'].append(group_name)
else:
store_names['4a']['48']['uninsulated'].append(group_name)
elif '_36_' in group_name:
if '_insulated_' in group_name:
store_names['4a']['36']['insulated'].append(group_name)
else:
store_names['4a']['36']['uninsulated'].append(group_name)
elif '_24_' in group_name:
if '_insulated_' in group_name:
store_names['4a']['24']['insulated'].append(group_name)
else:
store_names['4a']['24']['uninsulated'].append(group_name)
for key0 in store_names.keys():
for key1 in store_names[key0].keys():
for key2 in store_names[key0][key1].keys():
if store_names[key0][key1][key2]:
frame_list = []
for frame in store_names[key0][key1][key2]:
frame_list.append(store.select(frame))
store.append(value=pd.concat(frame_list), key=f'total_{key0}_{key1}_{key2}')
store.close()
def plot_linear_relation(data_frame, material, bounds, quantity, title):
quantities = ['heat loss', 'temperature', 'relative humidity', 'moisture content', 'moisture integral', 'damage',
'wood rot', 'mould index', 'frost']
units = ['W/m$^2$', '$^o$C', '%', 'kg/m$^3$', 'kg', '-', '%', '-', '-']
i = quantities.index(quantity.lower())
if quantity.lower() in ['heat loss', 'moisture integral']:
fig, axes = plt.subplots(figsize=(16, 8), )
fig.suptitle(f'{title} - {quantity}\n{material.title()}')
elif quantity.lower() in ['mould index', 'wood rot', 'frost']:
fig, axes = plt.subplots(ncols=2, nrows=1, sharex=True, sharey=True, figsize=(16, 8), )
fig.suptitle(f'{title} - {quantity}\n{material.title()}')
axes = axes.flatten()
elif 'Uninsulated' in title:
fig, axes = plt.subplots(ncols=3, nrows=2, sharex=True, sharey=True, figsize=(16, 8), )
fig.suptitle(f'{title} - {quantity}\n{material.title()}')
axes = axes.flatten()
else:
fig, axes = plt.subplots(ncols=4, nrows=2, sharex=True, sharey=True, figsize=(16, 8), )
fig.suptitle(f'{title} - {quantity}\n{material.title()}')
axes = axes.flatten()
for location in data_frame.columns.levels[0]:
if quantity.lower() in ['heat loss', 'moisture integral', ]:
ax = axes
else:
ax = axes[int(location)]
ax.set_title(f'Location {location}')
slope, intercept, r_value, p_value, std_err = linregress(data_frame[location, material, 'out'],
data_frame[location, '2d', 'out'])
ax.scatter(data_frame[location, material, 'out'], data_frame[location, '2d', 'out'], label='Data')
ax.plot(data_frame[location, material, 'out'], intercept + slope * data_frame[location, material, 'out'], 'r',
label=f'f(x) = {slope:.4f} * x + {intercept:.4f}\nR$^2$ = {r_value:.2f}')
ax.set_ylim(bounds[0], bounds[1])
ax.set_xlim(bounds[0], bounds[1])
ax.set_ylabel(f'2D Result - {units[i]}')
ax.set_xlabel(f'1D Result - {units[i]}')
ax.legend()
def abs_diff_boxplot(data_frame, bounds, quantity, title):
quantities = ['heat loss', 'temperature', 'relative humidity', 'moisture content', 'moisture integral', 'wood rot',
'mould index']
units = ['W/m$^2$', '$^o$C', '%', 'kg/m$^3$', 'kg', '%', '-']
i = quantities.index(quantity.lower())
abs_frame = data_frame.loc[:, pd.IndexSlice[:, :, 'abs_diff']]
abs_frame.columns = abs_frame.columns.droplevel(level=2)
plt.figure(figsize=(16, 8), tight_layout=True)
plt.title(f'Absolute Difference - {quantity}\n'
f'{title}')
abs_frame.boxplot(showfliers=False)
plt.ylim(bounds[0], bounds[1])
plt.ylabel(f'{quantity} - {units[i]}')
def rel_diff_boxplot(data_frame, bounds, quantity, title, log=False):
rel_frame = data_frame.loc[:, pd.IndexSlice[:, :, 'rel_diff']]
rel_frame.columns = rel_frame.columns.droplevel(level=2)
plt.figure(figsize=(16, 8), tight_layout=True)
plt.title(f'Relative Difference - {quantity}\n'
f'{title}')
rel_frame.boxplot(showfliers=False)
plt.ylim(bounds[0], bounds[1])
plt.ylabel(f'Relative Difference - %')
if log:
plt.gca().set_yscale('log')
def compute_damage_models(excel_file, folder):
table = acronym_table(excel_file)
acro_dict = match_2d_1d(table)
def frost(location):
damage_df[location, 'brick', 'out'] = damage_models.frost_risk(
pd.read_hdf(relative_humidity_file, key=acro_key)[location, 'brick', 'out'],
pd.read_hdf(temperature_file, key=acro_key)[location, 'brick', 'out'])
damage_df[location, 'mortar', 'out'] = damage_models.frost_risk(
pd.read_hdf(relative_humidity_file, key=acro_key)[location, 'mortar', 'out'],
pd.read_hdf(temperature_file, key=acro_key)[location, 'mortar', 'out'])
damage_df[location, '2d', 'out'] = damage_models.frost_risk(
pd.read_hdf(relative_humidity_file, key=acro_key)[location, '2d', 'out'],
pd.read_hdf(temperature_file, key=acro_key)[location, '2d', 'out'])
def rot(location):
damage_df[location, 'brick', 'out'] = damage_models.wood_rot(
pd.read_hdf(relative_humidity_file, key=acro_key)[location, 'brick', 'out'],
pd.read_hdf(temperature_file, key=acro_key)[location, 'brick', 'out'])[0]
damage_df[location, 'mortar', 'out'] = damage_models.wood_rot(
pd.read_hdf(relative_humidity_file, key=acro_key)[location, 'mortar', 'out'],
pd.read_hdf(temperature_file, key=acro_key)[location, 'mortar', 'out'])[0]
damage_df[location, '2d', 'out'] = damage_models.wood_rot(
pd.read_hdf(relative_humidity_file, key=acro_key)[location, '2d', 'out'],
| pd.read_hdf(temperature_file, key=acro_key) | pandas.read_hdf |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = | merge_asof(trades, quotes, on="time", by="ticker") | pandas.merge_asof |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = | DataFrame([["1"], [""]], columns=["foo"]) | pandas.core.frame.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series( | pd.Series([], dtype="str") | pandas.Series |
from .vcfwrapper import VCFWrapper, get_samples_from_vcf
from .annotation_parser import VEPAnnotation
from .cli import InputFile, error, warning, info
from .filters import VariantFilter, AnnotationFilter
from cyvcf2 import VCF
from scipy import stats, mean
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
import allel
import re
import math
import os
import csv
import time
# static factory method for returning a vcf object
def load_vcf(vcf_file):
pass
if os.path.exists(vcf_file):
vcf = VCF(vcf_file)
return vcf
else:
return None
# static factory method for returning a cohort object representing
# family relations
def load_group(tsv_file):
pass
def order_chroms(chroms):
"""
static method for ordering a list of chromosome names correctly without
knowledge of the chromosome name format (e.g. "chr1", "chr-1", or "CHROM1")
@param chroms : Iterable returning chromosome identifiers used in
a vcf file.
@return ordered List of chromosomes
"""
gonosomes = ["X", "Y"]
mit_chrom = ["M", "MT"]
ordered = []
# find the chromosome-id prefix: i.e.the part that is shared among
# all chromosome-ids
chrom_prefix_re = re.compile(r"^[a-zA-Z_:\*\+\-]*")
chrom_prefix = chrom_prefix_re.match(chroms[0]).group(0)
# order in which the chromosomes will be printed:
# first chr1 - chr23, then chrX and finally chrY
ordered_chromnum = list(str(i) for i in range(1, 25)) \
+ gonosomes \
+ mit_chrom
for i in ordered_chromnum:
chrom = chrom_prefix + i
if chrom in chroms:
ordered.append(chrom)
else:
chrom = chrom_prefix + i.zfill(2)
if chrom in chroms:
ordered.append(chrom)
else:
# chromosome was not originally in the list -> skip it
continue
return ordered
def get_color_map(n, name="hsv"):
"""
static method for generating a color map. This can be used for plots
to create distinct colors.
@param n : Number of different colors to be returned
@return a color map of distinct colors
"""
return plt.cm.get_cmap(name, n)
class AnalysisControl:
"""
Class for setting up and building cohorts for running analyses from
command line input.
@param job : An argparse namespace obtained from cli.parse_args()
representing command line options controlling an
analysis task.
@param analysis : One of ["versus", "cohort", "vcfstats"] representing
the Anaylsis taks to be run.
"""
def __init__(self,
analysis,
job):
# analysis task to be run
self.analysis = analysis
if self.analysis == "vcfstats":
self.run = self.run_vcfstats
elif analysis == "cohort":
self.run = self.run_cohort_analysis
elif analysis == "versus":
self.run = self.run_versus_analysis
else:
error("Unexpected Subcommand: {}".format(analysis))
# output options
self.base_dir = os.getcwd()
self.interactive_report = job.interactive_report
self.report_dir = "DiscavarReports"
if job.report_dir:
self.report_dir = job.report_dir
if not os.path.exists(self.report_dir):
os.mkdir(self.report_dir)
else:
warning("Report outdir {} exists".format(self.report_dir))
if not self.analysis == "vcfstats":
self.vcf_dir = "Discavar"
if job.vcf_dir:
self.vcf_dir = job.vcf_dir
if not os.path.exists(self.vcf_dir):
os.mkdir(self.vcf_dir)
else:
warning("VCF outdir {} exists".format(self.vcf_dir))
os.chdir(self.vcf_dir)
# isec and subt options
self.call_rate = job.call_rate
self.call_rate2 = job.call_rate2
self.alt_ratio = job.alt_ratio
self.alt_ratio2 = job.alt_ratio2
self.cohorts = []
if job.input.is_tsv:
self.cohorts = Cohort.from_tsv(job.input.filepath,
build_cohort=True)
elif job.input.is_vcf:
self.cohorts = [Cohort.from_vcf("discavar-cohort",
job.input.filepath,
outfile=job.vcf_copy)]
else:
error("Invalid Input file ending.")
if self.cohorts:
# only apply filters when cohort analysis is selected
if self.analysis == "cohort":
info("Applying Filters")
# set up filters
var_filter = VariantFilter(
min_gq=job.filter_gq,
min_mq=job.filter_mq,
min_dp=job.filter_dp,
intervals=job.filter_regions,
excl_intervals=job.filter_excl_regions,
vtypes=job.filter_vtypes,
excl_vtypes=job.filter_excl_vtypes,
perc_pass=job.filter_perc_pass)
# set up vep filter
file = self.cohorts[0].vcf_file
vcf = VCF(file)
vep_parser = VEPAnnotation(list(vcf.header_iter()))
vep_filter = AnnotationFilter(
vep_parser,
IMPACT__in=job.vep_impact,
CLIN_SIG__in=job.vep_significance)
for c in self.cohorts:
c.add_filter(var_filter)
c.add_filter(vep_filter)
c.apply_filters()
else:
warning("No Cohorts or VCF specified.")
def run_vcfstats(self):
info("Gathering VCF Statistics")
for c in self.cohorts:
# Reporting
info("Writing reports for {}".format(c.cohort_id))
os.chdir(self.base_dir)
os.chdir(self.report_dir)
if not os.path.exists(c.cohort_id):
os.mkdir(c.cohort_id)
os.chdir(c.cohort_id)
c.write(write_vcf=False,
write_reports=True)
def run_cohort_analysis(self):
info("Starting Cohort Analysis")
for c in self.cohorts:
os.chdir(self.base_dir)
# Variant analysis
info("Running cohort variant operations on {}".format(c.cohort_id))
os.chdir(self.vcf_dir)
affected = c.get_affected_sample_ids()
unaffected = c.get_unaffected_sample_ids()
time_start = time.time()
if affected and unaffected:
c.healthy_vs_diseased(self.alt_ratio,
self.call_rate,
self.alt_ratio2,
self.call_rate2)
else:
c.intersect(self.alt_ratio, self.call_rate)
time_stop = time.time()
duration = (time_stop - time_start) / 60
info("cohort analysis of {} was completed in {} minutes".format(
c.cohort_id, duration))
c.write(write_vcf=True,
write_reports=False)
# Reporting
info("Writing reports for {}".format(c.cohort_id))
os.chdir(self.base_dir)
os.chdir(self.report_dir)
if not os.path.exists(c.cohort_id):
os.mkdir(c.cohort_id)
os.chdir(c.cohort_id)
c.write(write_vcf=False,
write_reports=True)
def run_versus_analysis(self):
#info("Starting Versus Analysis")
error("Versus Analysis not implemented")
class Report:
"""
Class for writing a report textfile with statistics gathered from a
cohort object.
@param cohort : A Cohort object for which a report is written
"""
def __init__(self,
cohort,
commandline="",
report_file=""):
self.cohort = cohort
self.report_file = report_file
if not report_file:
self.report_file = self.cohort.cohort_id + "_report.txt"
self.report = ""
def add_line(self, s="", line_prepend=""):
"""
Adds a line to the report.
@param s : a string representing the line that should be added.
@param line_prepend : a string representing an arbitrary set of
characters that is written before every line
"""
s = line_prepend + s.replace("\n", "\n"+line_prepend)
line = "{}\n".format(s)
self.report += line
def write(self):
"""
Write the report to a plain text file.
"""
with open(self.report_file, "w") as outfile:
outfile.write(self.report)
class CohortReport(Report):
def gather(self):
self.add_line("Cohort Report for {}".format(self.cohort.cohort_id))
self.add_line("{} Samples in this Cohort: {}".format(
len(self.cohort.get_sample_ids()),
self.cohort.get_sample_ids())
)
self.add_line()
# output gene List
genes = self.cohort.gene_list()
# Plotting
data = self.cohort.to_dataframe()
if data is not None and \
isinstance(data, pd.DataFrame) and \
not data.empty:
_ = self.cohort.manhattan(df=data)
_ = self.cohort.callrate_vs_qual(df=data)
_ = self.cohort.qual_dist()
dp_dist = self.cohort.dp_dist()
mq_dist = self.cohort.mq_dist()
vep_csq = self.cohort.vep_consequences()
vtypes = self.cohort.vtypes_dist()
# Todo: Order by Chromosomes
var_stats = data.describe()
#dp_stats = dp_dist.describe()
#mq_stats = mq_dist.describe()
else:
error("No Statistics on the Variants could be collected")
self.add_line("Variants with Cohort Relevance: {}".format(len(data)))
self.add_line("Callrates:")
self.add_line("\tMean:\t\t{}".format(var_stats["Callrate"]["mean"]))
self.add_line("\tMax:\t\t{}".format(var_stats["Callrate"]["max"]))
self.add_line("\tStd:\t\t{}".format(var_stats["Callrate"]["std"]))
self.add_line("\t25%:\t\t{}".format(var_stats["Callrate"]["25%"]))
self.add_line("\t50%:\t\t{}".format(var_stats["Callrate"]["50%"]))
self.add_line("\t75%:\t\t{}".format(var_stats["Callrate"]["75%"]))
self.add_line("")
self.add_line("Alternate Allele Counts:")
self.add_line("\tMean:\t\t{}".format(var_stats["AltAlleles"]["mean"]))
self.add_line("\tMax:\t\t{}".format(var_stats["AltAlleles"]["max"]))
self.add_line("\tStd:\t\t{}".format(var_stats["AltAlleles"]["std"]))
self.add_line("\t25%:\t\t{}".format(var_stats["AltAlleles"]["25%"]))
self.add_line("\t50%:\t\t{}".format(var_stats["AltAlleles"]["50%"]))
self.add_line("\t75%:\t\t{}".format(var_stats["AltAlleles"]["75%"]))
self.add_line()
self.add_line("Variant Quality:")
self.add_line("\tMean:\t\t{}".format(var_stats["QUAL"]["mean"]))
self.add_line("\tMax:\t\t{}".format(var_stats["QUAL"]["max"]))
self.add_line("\tStd:\t\t{}".format(var_stats["QUAL"]["std"]))
self.add_line("\t25%:\t\t{}".format(var_stats["QUAL"]["25%"]))
self.add_line("\t50%:\t\t{}".format(var_stats["QUAL"]["50%"]))
self.add_line("\t75%:\t\t{}".format(var_stats["QUAL"]["75%"]))
#self.add_line()
#self.add_line("Mapping Quality:")
#self.add_line(str(mq_stats))
#self.add_line()
#self.add_line("Read Depth:")
#self.add_line(str(dp_stats))
self.add_line()
self.add_line("Variant Scores")
self.add_line("\tMean:\t\t{}".format(var_stats["SCORE"]["mean"]))
self.add_line("\tMin:\t\t{}".format(min(data["SCORE"])))
self.add_line("\tMax:\t\t{}".format(var_stats["SCORE"]["max"]))
self.add_line("\tStd:\t\t{}".format(var_stats["SCORE"]["std"]))
self.add_line("\t25%:\t\t{}".format(var_stats["SCORE"]["25%"]))
self.add_line("\t50%:\t\t{}".format(var_stats["SCORE"]["50%"]))
self.add_line("\t75%:\t\t{}".format(var_stats["SCORE"]["75%"]))
self.add_line()
self.add_line("Variant Types")
self.add_line(str(vtypes), line_prepend="\t")
# TODO: Summary statistics
self.add_line("VEP Consequences")
self.add_line(str(vep_csq), line_prepend="\t")
self.add_line()
self.add_line("Genes Affected")
unknown_gene_rowname = "unknown"
num_affected = len(genes[genes["GENES"] != unknown_gene_rowname])
self.add_line("\tTotal number affected:\t\t{}".format(num_affected))
self.add_line("\tMost affected Genes:")
self.add_line(str(genes.head(n=10)), line_prepend="\t")
class VersusReport(Report):
def gather(self):
pass
class Sample:
"""
Class to represent a sample with a id, a vcf file, sex and disease
status.
"""
def __init__(self,
sample_id,
vcf_file,
sex,
affected):
self.sample_id = sample_id
self.vcf_file = vcf_file
self.sex = sex
self.affected = affected
class Cohort:
"""
Class to represent a related cohort of Variants from different samples
e.g. a family or healthy/diseased study.
input_file : cli.InputFile Representation of the user input
"""
def __init__(self,
cohort_id,
input_files=None,
filters=[],
regions=[],
info_rules=[],
use_database=False,
extra_threads=0,
build_cohort=False):
self.cohort_id = cohort_id
self.vcf_file = self.cohort_id + ".vcf.gz"
# and instance of a VCFWrapper
self.variants = None
self.samples = []
self.filters = filters
self.regions = regions
self.info_rules = info_rules
self.build_cohort = False
self.use_database = use_database
self.extra_threads = extra_threads
@classmethod
def from_vcf(cls,
cohort_id,
file,
outfile="",
use_database=False):
"""
Instantiate a Cohort from a single multisample vcf file.
Assumes the vcf_file to contain a single cohort.
vcf_file : String representing the path to a vcf_file
"""
if not isinstance(file, InputFile):
file = InputFile(file)
if file.exists and file.is_vcf:
info("Loading VCF {}".format(file.filepath))
# new Cohort without VCFWrapper and Samples
cohort = cls(cohort_id=cohort_id)
# if an outfile is given
if outfile != "":
cohort.vcf_file = outfile
else:
cohort.vcf_file = file.filepath
# build VCFWrapper
variants = VCFWrapper.wrapper_factory(
files=[file.filepath],
cohort_filename=cohort.vcf_file,
use_database=use_database)
# extract sample ids from wrapper object
sample_ids = variants.get_sample_names()
# add VCFWrapper and Sample objects to the new cohort
cohort.variants = variants
for sid in sample_ids:
cohort.add_sample(Sample(sample_id=sid,
vcf_file=file.filepath,
sex="NA",
affected=False))
return cohort
else:
warning("Invalid VCF Input: " +
"File doesn't exist or not in VCF Format")
return None
@classmethod
def from_tsv(cls,
file,
build_cohort=True,
extra_threads=0,
use_database=False):
"""
Instantiate a Cohort from a tsv file.
tsv_file : String representing the path to a tsv file.
"""
if not isinstance(file, InputFile):
input_file = InputFile(file)
cohorts = []
if input_file.exists and input_file.is_tsv:
info("Loading Cohorts from TSV file {}"
.format(input_file.filepath))
with open(input_file.filepath, "r") as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
# keep track of all used sample ids for duplicate check
used_sids = []
# fetch all data from the tsv
for line in reader:
# skip header lines
if line[0].startswith("#"):
continue
# unpack the line
sample_cohort = line[0]
sample_vcffile = os.path.abspath(line[3])
sample_status = line[1]
sample_sex = line[2]
if len(line) == 5:
# read sample ids
# the fifth column in the tsv is a comma separated list
# of ids.
sample_ids = [s.rstrip(" ").strip(" ")
for s in line[4].split(",")]
else:
# get Sample id from vcf file
if os.path.exists(sample_vcffile):
sample_ids = get_samples_from_vcf(sample_vcffile)
else:
error("VCF file listed in {} not found: {}"
.format(input_file.filepath,
sample_vcffile))
while sample_ids:
sid = sample_ids.pop()
unique_sid = cls.get_unique_sid(
sid, used_sids)
# reserve the sample_id that was constructed by adding
# it to the list of unique sample_ids for this cohort
used_sids.append(unique_sid)
# instatiate the sample
sample = Sample(vcf_file=sample_vcffile,
sample_id=unique_sid,
affected=sample_status,
sex=sample_sex)
# if the cohort exists, add the sample object to it
for c in cohorts:
if c.cohort_id == sample_cohort:
c.add_sample(sample)
break
# else create a new cohort, add the sample to it,
# and add the new cohort to the list of cohorts
else:
cohort = cls(sample_cohort,
build_cohort=build_cohort,
extra_threads=extra_threads)
cohort.add_sample(sample)
cohorts.append(cohort)
else:
warning("Invalid TSV input: " +
"File doesn't exist or not in .tsv format")
if build_cohort:
if extra_threads:
# build all cohorts at the same time
pass
else:
for cohort in cohorts:
time_build_start = time.time()
cohort.build()
time_build_stop = time.time()
duration = (time_build_stop - time_build_start) / 60
info("Cohort building of {} completed in {} min".format(
cohort.cohort_id, duration))
if cohorts:
info("{} Samples were loaded into {} Cohorts"
.format(len(used_sids), len(cohorts)))
else:
warning("No Cohorts were build!")
return cohorts
@classmethod
def get_unique_sid(cls, sid, used_sids):
"""
Finds and returns a unique sample id
"""
# find a unique sample ID for that cohort
# If a sample id already exists inside a cohort,
# a unique prefix of the form "<unique-int>:" is
# prepended.
# This mimicks the behavior of vcftools for dealing
# with duplicate ids.
unique_prefix = ""
unique_sid = unique_prefix + sid
# try all prefixes until one has not been used before
while (unique_sid in used_sids):
# construct a maybe unique prefix
# A: No prefix used yet
if unique_prefix == "":
unique_prefix = "2:"
# B: Or by incrementing the previous prefix
else:
unique_id = int(unique_prefix.strip(":")) + 1
unique_prefix = str(unique_id) + ":"
unique_sid = unique_prefix + sid
if sid != unique_sid:
warning("Sample ID '{}' was changed to the unique ID '{}'"
.format(sid, unique_sid))
return unique_sid
def get_sample_ids(self):
"""
Return a list with the ids of all samples
"""
return [s.sample_id for s in self.samples]
def get_affected_sample_ids(self):
"""
Returns a list with the ids of all affected samples
"""
affected = []
for s in self.samples:
if s.affected:
affected.append(s)
return affected
def get_unaffected_sample_ids(self):
"""
Returns a list with the ids of all unaffected samples
"""
unaffected = []
for s in self.samples:
if not s.affected:
unaffected.append(s)
def add_sample(self, sample):
"""
Add sample object to representation of cohort
sample : Sample representing a sample that should be added
"""
self.samples.append(sample)
def add_filter(self, fltr):
"""
Add a filter of type Filter to the Cohort for building
"""
self.filters.append(fltr)
def add_filters(self, fltrs):
"""
Add a set of filters to the Cohort for building
"""
for fltr in fltrs:
self.filters.append(fltr)
def apply_filters(self, filters=[]):
"""
Apply all filters.
"""
if not filters:
filters = self.filters
time_start = time.time()
self.variants.apply(filters)
time_stop = time.time()
duration = (time_stop - time_start) / 60
info("Filtering of {} was completed in {} minutes".format(
self.cohort_id, duration))
def filter(self, filters=[]):
"""
Query based on all filters.
Returns an iterable of variants that passed the filter
"""
return self.variants.query(filters=filters)
def build(self):
"""
Initialize the underlying wrapper object
"""
filenames = [s.vcf_file for s in self.samples]
self.variants = VCFWrapper.wrapper_factory(
filenames,
self.vcf_file,
use_database=self.use_database)
self.apply_filters(filters=self.filters)
def intersect(self,
alt_ratio,
call_rate,
sample_groups=[]):
"""
Intersect all samples in this cohort
"""
if not sample_groups:
# create one sample group containing all samples
sample_groups = [
[s.sample_id for s in self.samples]
]
info("Computing intersection of {}".format(sample_groups))
self.variants.intersect(sample_groups, alt_ratio, call_rate)
def healthy_vs_diseased(self,
healthy_ar=1,
healthy_cr=1,
diseased_ar=0,
diseased_cr=0.5):
"""
Perform a healthy versus diseased analysis of the samples in this
cohort.
"""
healthy = list(filter(lambda s: s.affected, self.samples))
diseased = list(filter(lambda s: not s.affected, self.samples))
self.variants.subtract(minuend=diseased,
subtrahend=healthy,
call_rate1=healthy_cr,
alt_ratio1=healthy_ar,
call_rate2=diseased_cr,
alt_ratio2=diseased_ar)
def versus(self,
cohort):
"""
Versus Analysis of two cohort objects
"""
pass
def gene_list(self,
outfile=""):
"""
Collects a list of genes and the number of variations that occur
in each gene.
Writes the List to a file
"""
# currently, VEP Annotations are used to determine the gene name
parser = self.variants.vep_parser
if not parser:
warning("Gene List can't be build without VEP Annotations")
return None
else:
genes = {}
for record in self.variants:
# get the annotation string from the record
ann_string = self.variants. \
get_var_info(record,
field=parser.INFO_COL_NAME)
# parse the string to obtain a dictionary representation
# for every variant in the record as a list
csq_annotations = parser.parse_annotation(ann_string)
# it is assumed that the gene name is the same for all
# variants, given that they all occur at the same
# chromosome and position
csq_ann = csq_annotations[0]
gene_name = csq_ann.get("Gene", "unknown")
count = genes.get(gene_name, 0) + 1
genes[gene_name] = count
df = pd.DataFrame(dict(GENES=list(genes.keys()),
VARIANTS=list(genes.values())))
df.sort_values(by="VARIANTS", inplace=True, ascending=False)
if not outfile:
outfile = self.cohort_id + "_genelist.tsv"
df.to_csv(outfile, sep="\t")
return df
def to_dataframe(self):
"""
Build a Dataframe from the underlying vcf.
This is more useful for plotting
"""
fields = ["variants/REF",
"variants/ALT",
"variants/CHROM",
"variants/POS",
"variants/QUAL",
"calldata/GT"]
data = allel.read_vcf(self.variants.cohort_filename,
fields=fields)
if data is None or \
not isinstance(data, dict):
warning("Missing Variant data: " +
"No or incomplete data could be read from {}"
.format(self.variants.cohort_filename))
return None
try:
df = pd.DataFrame(dict(CHROM=data["variants/CHROM"],
POS=data["variants/POS"],
QUAL=data["variants/QUAL"]))
g = allel.GenotypeArray(data["calldata/GT"])
except KeyError as e:
print(e)
warning("Missing Variant data: " +
"Not all required data could be read from {}"
.format(self.variants.cohort_filename))
gn = g.to_n_alt()
df["AltAlleles"] = [sum(vars) for vars in gn]
df["Callrate"] = [(g.n_samples - list(vars).count(0)) / g.n_samples
for vars in gn]
# compute variant score as the log product of their Quality and Callrate
df["SCORE"] = [math.log2(qual * cr) for qual, cr in
zip(df["QUAL"], df["Callrate"])]
# sort by variant position
df.sort_values(["POS"])
# sort again by chromosome name
df["CHROM"] = df["CHROM"].astype("category")
chromnames_ordered = order_chroms(df["CHROM"].unique())
df["CHROM"] = df["CHROM"] \
.cat.reorder_categories(chromnames_ordered)
# add an index column
df["IDX"] = range(len(df))
return df
def manhattan(self,
df=None,
outfile="",
only_data=False,
ax=None,
save_figure=True):
"""
Manhattan plot or dataframe for the cohort.
"""
if df is None or \
type(df) != pd.DataFrame or \
df.empty:
df = self.to_dataframe()
df_grouped = df.groupby("CHROM")
if not only_data:
# these colors will be repeated
colors = get_color_map(23)
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
save_figure = False
x_labels = []
x_labels_pos = []
for i, (name, group) in enumerate(df_grouped):
if not "Y" in str(name).upper():
group.plot(kind='scatter',
x='IDX',
y='SCORE',
color=colors(i),
ax=ax)
x_labels.append(name)
x_labels_pos.append((group.IDX.iloc[-1] -
(group.IDX.iloc[-1] -
group.IDX.iloc[0]) / 2))
ax.set_xticks(x_labels_pos)
ax.set_xticklabels(x_labels)
# roate the ticks, so each one is nicely readable
for label in ax.get_xticklabels():
label.set_rotation(50)
ax.set_xlim([0, len(df)])
ax.set_xlabel("Chromosome")
ax.set_ylabel("Score")
ax.set_title("Manhattan Distribution")
# widen the figure
fig.set_size_inches(15, 7)
# fix cutoff labels
fig.tight_layout()
if not outfile:
outfile = self.cohort_id + "_manhattan.png"
if save_figure:
fig.savefig(outfile)
return df
def vtypes_dist(self,
outfile="",
only_data=False,
ax=None,
save_figure=True):
"""
Manhattan plot.
"""
vtypes_hist = dict()
# chromosomes listed in the vcf
chroms = set()
# vep parser for parse the VEP Annotation in the INFO Column
parser = self.variants.vep_parser
vep_annotated = True if parser else False
for var in self.variants:
var_chrom = self.variants.get_var_chrom(var)
# save the chromosome the record references for plotting
chroms.add(var_chrom)
if vep_annotated:
# read the raw VEP annotation string from the record INFO
ann_string = self.variants. \
get_var_info(var,
field=parser.INFO_COL_NAME)
# parse the raw annotation string to obtain a dictionary
# of <vep-annotation-field>:<vep-annotation> for each
# variant in the record as a list.
record_annotations = parser.parse_annotation(ann_string)
for var_annotation in record_annotations:
var_type = var_annotation.get("VARIANT_CLASS", "unknown")
# count the variants number
chrom_hist = vtypes_hist.get(var_type, dict())
count = chrom_hist.get(var_chrom, 0) + 1
chrom_hist[var_chrom] = count
vtypes_hist[var_type] = chrom_hist
# read variant type from the VCFWrapper
else:
# supports only one consensus variant type
var_type = self.variants.get_var_type(var)
# count the variants number
chrom_hist = vtypes_hist.get(var_type, dict())
count = chrom_hist.get(var_chrom, 0)
chrom_hist[var_chrom] = count + 1
vtypes_hist[var_type] = chrom_hist
chroms = order_chroms(list(chroms))
if not only_data:
# plotting
if not outfile:
outfile = self.cohort_id + "_vartypes.png"
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
save_figure = False
idx = range(len(chroms))
bar_width = 0.8
bar_below = []
labels = []
for vtype in vtypes_hist.keys():
labels.append(vtype)
freqs = [vtypes_hist[vtype].get(c, 0) for c in chroms]
if bar_below:
ax.bar(idx,
freqs,
width=bar_width,
bottom=bar_below)
bar_below = [sum(x) for x in zip(freqs, bar_below)]
else:
ax.bar(idx, freqs, width=bar_width)
bar_below = freqs
ax.set_ylabel("Frequency")
ax.set_xlabel("Chromosome")
ax.set_xticklabels(chroms, rotation=50)
ax.legend(labels)
ax.set_title("Variant Types by Chromosome")
# fix cutoff labels
fig.tight_layout()
if save_figure:
fig.savefig(outfile)
return | pd.DataFrame(vtypes_hist) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 29 10:57:09 2021
@author: luis
"""
# Regresión lineal múltiple en Spyder con Python
# Cómo importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print("LIBRERÍAS IMPORTADAS")
# Importar el data set
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
print("DATATSET IMPORTADO")
# Codificar datos categóricos
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.compose import ColumnTransformer
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3]) # El "3" es el número de la columna que se desea convertir
onehotencoder = make_column_transformer((OneHotEncoder(), [3]), remainder = "passthrough")
#X = onehotencoder.fit_transform(X), Línea de código original, con está NO se modifica el type a float64
X = np.array(onehotencoder.fit_transform(X), dtype=np.float64)
print("DATOS CODIFICADOS X EN TYPE FLOAT64")
## Sentencia para cambiar a type = float64
#ct = ColumnTransformer([('one_hot_encoder', OneHotEncoder(categories='auto'), [0])],remainder='passthrough')
#X = np.array(ct.fit_transform(X), dtype=np.float64)
#print("CONVERTIDO A TYPE = FLOAT64")
## CÓDIFICAR LA CATEGORÍA Y
#labelencoder_y = LabelEncoder()
#y = labelencoder_y.fit_transform(y)
# Evitar la trampa de las variables ficticias
X = X[:, 1:] # Se elimina la una de las variables dummy
print("VARIABLES FICTICIAS ELIMINADA")
# Dividir el data set en conjunto de entrenamiento y conjunto de testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print("DATASET DIVIDIDO PARA TRAINING Y TESTING")
# Escalado de variables
#from sklearn.preprocessing import StandardScaler
#sc_X = StandardScaler()
#X_train = sc_X.fit_transform(X_train)
#X_test = sc_X.transform(X_test)
# Ajustar el modelo de Regresión lineal múltiple con el conjunto de entrenamiento
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(X_train, y_train)
print("MODELO DE REGRESIÓN READY!!!")
# Predicción de los resultados en el conjunto de testing
y_pred = regression.predict(X_test)
print("PREDICCIÓN DE RESULTADOS DEL DATASET DE TESTING GENERADO")
# NUEVO COMPARACIÓN DE RESULTADOS ENTRE Y_TEST & Y_PREDIC
df = | pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = | pd.read_json(v12_json) | pandas.read_json |
import pathlib
import pytest
import pandas as pd
import numpy as np
import gradelib
EXAMPLES_DIRECTORY = pathlib.Path(__file__).parent / "examples"
GRADESCOPE_EXAMPLE = gradelib.Gradebook.from_gradescope(
EXAMPLES_DIRECTORY / "gradescope.csv"
)
CANVAS_EXAMPLE = gradelib.Gradebook.from_canvas(EXAMPLES_DIRECTORY / "canvas.csv")
# the canvas example has Lab 01, which is also in Gradescope. Let's remove it
CANVAS_WITHOUT_LAB_EXAMPLE = gradelib.Gradebook(
points=CANVAS_EXAMPLE.points.drop(columns="lab 01"),
maximums=CANVAS_EXAMPLE.maximums.drop(index="lab 01"),
late=CANVAS_EXAMPLE.late.drop(columns="lab 01"),
dropped=CANVAS_EXAMPLE.dropped.drop(columns="lab 01"),
)
# given
ROSTER = gradelib.read_egrades_roster(EXAMPLES_DIRECTORY / "egrades.csv")
def assert_gradebook_is_sound(gradebook):
assert gradebook.points.shape == gradebook.dropped.shape == gradebook.late.shape
assert (gradebook.points.columns == gradebook.dropped.columns).all()
assert (gradebook.points.columns == gradebook.late.columns).all()
assert (gradebook.points.index == gradebook.dropped.index).all()
assert (gradebook.points.index == gradebook.late.index).all()
assert (gradebook.points.columns == gradebook.maximums.index).all()
# assignments property
# -----------------------------------------------------------------------------
def test_assignments_are_produced_in_order():
assert list(GRADESCOPE_EXAMPLE.assignments) == list(
GRADESCOPE_EXAMPLE.points.columns
)
# keep_pids()
# -----------------------------------------------------------------------------
def test_keep_pids():
# when
actual = GRADESCOPE_EXAMPLE.keep_pids(ROSTER.index)
# then
assert len(actual.pids) == 3
assert_gradebook_is_sound(actual)
def test_keep_pids_raises_if_pid_does_not_exist():
# given
pids = ["A12345678", "ADNEDNE00"]
# when
with pytest.raises(KeyError):
actual = GRADESCOPE_EXAMPLE.keep_pids(pids)
# keep_assignments() and remove_assignments()
# -----------------------------------------------------------------------------
def test_keep_assignments():
# when
actual = GRADESCOPE_EXAMPLE.keep_assignments(["homework 01", "homework 02"])
# then
assert set(actual.assignments) == {"homework 01", "homework 02"}
assert_gradebook_is_sound(actual)
def test_keep_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.keep_assignments(assignments)
def test_remove_assignments():
# when
actual = GRADESCOPE_EXAMPLE.remove_assignments(
GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
)
# then
assert set(actual.assignments) == {
"homework 01",
"homework 02",
"homework 03",
"homework 04",
"homework 05",
"homework 06",
"homework 07",
"project 01",
"project 02",
}
assert_gradebook_is_sound(actual)
def test_remove_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.remove_assignments(assignments)
# combine()
# -----------------------------------------------------------------------------
def test_combine_with_keep_pids():
# when
combined = gradelib.Gradebook.combine(
[GRADESCOPE_EXAMPLE, CANVAS_WITHOUT_LAB_EXAMPLE], keep_pids=ROSTER.index
)
# then
assert "homework 01" in combined.assignments
assert "midterm exam" in combined.assignments
assert_gradebook_is_sound(combined)
def test_combine_raises_if_duplicate_assignments():
# the canvas example and the gradescope example both have lab 01.
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine([GRADESCOPE_EXAMPLE, CANVAS_EXAMPLE])
def test_combine_raises_if_indices_do_not_match():
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine(
[CANVAS_WITHOUT_LAB_EXAMPLE, GRADESCOPE_EXAMPLE]
)
# number_of_lates()
# -----------------------------------------------------------------------------
def test_number_of_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=labs)
# then
assert list(actual) == [1, 4, 2, 2]
def test_number_of_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=[])
def test_number_of_lates_with_no_assignment_list_uses_all_assignments():
# when
actual = GRADESCOPE_EXAMPLE.number_of_lates()
# then
assert list(actual) == [1, 5, 2, 2]
# forgive_lates()
# -----------------------------------------------------------------------------
def test_forgive_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [0, 1, 0, 0]
assert_gradebook_is_sound(actual)
def test_forgive_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=[])
def test_forgive_lates_forgives_the_first_n_lates():
# by "first", we mean in the order specified by the `within` argument
# student A10000000 had late lab 01, 02, 03, and 07
assignments = ["lab 02", "lab 07", "lab 01", "lab 03"]
# when
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=2, within=assignments)
# then
assert not actual.late.loc["A10000000", "lab 02"]
assert not actual.late.loc["A10000000", "lab 07"]
assert actual.late.loc["A10000000", "lab 01"]
assert actual.late.loc["A10000000", "lab 03"]
def test_forgive_lates_does_not_forgive_dropped():
# given
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
dropped = GRADESCOPE_EXAMPLE.dropped.copy()
dropped.iloc[:, :] = True
example = gradelib.Gradebook(
points=GRADESCOPE_EXAMPLE.points,
maximums=GRADESCOPE_EXAMPLE.maximums,
late=GRADESCOPE_EXAMPLE.late,
dropped=dropped,
)
# when
actual = example.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [1, 4, 2, 2]
assert_gradebook_is_sound(actual)
# drop_lowest()
# -----------------------------------------------------------------------------
def test_drop_lowest_on_simple_example_1():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(1, within=homeworks)
# then
assert actual.dropped.iloc[0, 1]
assert actual.dropped.iloc[1, 2]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_on_simple_example_2():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(2, within=homeworks)
# then
assert not actual.dropped.iloc[0, 2]
assert not actual.dropped.iloc[1, 0]
assert list(actual.dropped.sum(axis=1)) == [2, 2]
assert_gradebook_is_sound(actual)
def test_drop_lowest_counts_lates_as_zeros():
# given
columns = ["hw01", "hw02"]
p1 = pd.Series(data=[10, 5], index=columns, name="A1")
p2 = pd.Series(data=[10, 10], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([10, 10], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.iloc[0, 0] = True
# since A1's perfect homework is late, it should count as zero and be
# dropped
# when
actual = gradebook.drop_lowest(1)
# then
assert actual.dropped.iloc[0, 0]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_ignores_assignments_alread_dropped():
# given
columns = ["hw01", "hw02", "hw03", "hw04"]
p1 = pd.Series(data=[9, 0, 7, 0], index=columns, name="A1")
p2 = pd.Series(data=[10, 10, 10, 10], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([10, 10, 10, 10], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw02"] = True
gradebook.dropped.loc["A1", "hw04"] = True
# since A1's perfect homeworks are already dropped, we should drop a third
# homework, too: this will be HW03
# when
actual = gradebook.drop_lowest(1)
# then
assert actual.dropped.loc["A1", "hw04"]
assert actual.dropped.loc["A1", "hw02"]
assert actual.dropped.loc["A1", "hw03"]
assert list(actual.dropped.sum(axis=1)) == [3, 1]
assert_gradebook_is_sound(actual)
# give_equal_weights()
# -----------------------------------------------------------------------------
def test_give_equal_weights_on_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.give_equal_weights(within=homeworks)
# then
assert actual.maximums.loc["hw01"] == 1
assert actual.maximums.loc["hw02"] == 1
assert actual.maximums.loc["hw03"] == 1
assert actual.maximums.loc["lab01"] == 20
assert actual.points.loc["A1", "hw01"] == 1 / 2
assert actual.points.loc["A1", "hw02"] == 30 / 50
# score()
# -----------------------------------------------------------------------------
def test_score_on_simple_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [121 / 152, 24 / 152], atol=1e-6)
def test_score_counts_lates_as_zero():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.loc["A1", "hw01"] = True
gradebook.late.loc["A1", "hw03"] = True
gradebook.late.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [30 / 152, 9 / 152], atol=1e-6)
def test_score_ignores_dropped_assignments():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw01"] = True
gradebook.dropped.loc["A1", "hw03"] = True
gradebook.dropped.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [30 / 50, 9 / 52], atol=1e-6)
# total()
# -----------------------------------------------------------------------------
def test_total_on_simple_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
earned, available = gradebook.total(homeworks)
# then
assert np.allclose(earned.values, [121, 24], atol=1e-6)
assert np.allclose(available.values, [152, 152], atol=1e-6)
def test_total_counts_lates_as_zero():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.loc["A1", "hw01"] = True
gradebook.late.loc["A1", "hw03"] = True
gradebook.late.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
earned, available = gradebook.total(homeworks)
# then
assert np.allclose(earned.values, [30, 9], atol=1e-6)
assert np.allclose(available.values, [152, 152], atol=1e-6)
def test_total_ignores_dropped_assignments():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw01"] = True
gradebook.dropped.loc["A1", "hw03"] = True
gradebook.dropped.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
earned, available = gradebook.total(homeworks)
# then
assert np.allclose(earned.values, [30, 9], atol=1e-6)
assert np.allclose(available.values, [50, 52], atol=1e-6)
# unify_assignments()
# -----------------------------------------------------------------------------
def test_unify_assignments():
"""test that points / maximums are added across unified assignments"""
# given
columns = ["hw01", "hw01 - programming", "hw02", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
HOMEWORK_01_PARTS = gradebook.assignments.starting_with("hw01")
# when
result = gradebook.unify_assignments({"hw01": HOMEWORK_01_PARTS})
# then
assert len(result.assignments) == 3
assert result.maximums["hw01"] == 52
assert result.points.loc["A1", "hw01"] == 31
assert result.maximums.shape[0] == 3
assert result.late.shape[1] == 3
assert result.dropped.shape[1] == 3
assert result.points.shape[1] == 3
def test_unify_assignments_with_multiple_in_dictionary():
"""test that points / maximums are added across unified assignments"""
# given
columns = ["hw01", "hw01 - programming", "hw02", "hw02 - testing"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
HOMEWORK_01_PARTS = gradebook.assignments.starting_with("hw01")
HOMEWORK_02_PARTS = gradebook.assignments.starting_with("hw02")
# when
result = gradebook.unify_assignments(
{"hw01": HOMEWORK_01_PARTS, "hw02": HOMEWORK_02_PARTS}
)
# then
assert len(result.assignments) == 2
assert result.maximums["hw01"] == 52
assert result.points.loc["A1", "hw01"] == 31
assert result.maximums["hw02"] == 120
assert result.points.loc["A1", "hw02"] == 110
assert result.maximums.shape[0] == 2
assert result.late.shape[1] == 2
assert result.dropped.shape[1] == 2
assert result.points.shape[1] == 2
def test_unify_assignments_with_callable():
"""test that points / maximums are added across unified assignments"""
# given
columns = ["hw01", "hw01 - programming", "hw02", "hw02 - testing"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
HOMEWORK_01_PARTS = gradebook.assignments.starting_with("hw01")
HOMEWORK_02_PARTS = gradebook.assignments.starting_with("hw02")
def assignment_to_key(s):
return s.split("-")[0].strip()
# when
result = gradebook.unify_assignments(assignment_to_key)
# then
assert len(result.assignments) == 2
assert result.maximums["hw01"] == 52
assert result.points.loc["A1", "hw01"] == 31
assert result.maximums["hw02"] == 120
assert result.points.loc["A1", "hw02"] == 110
assert result.maximums.shape[0] == 2
assert result.late.shape[1] == 2
assert result.dropped.shape[1] == 2
assert result.points.shape[1] == 2
def test_unify_considers_new_assignment_late_if_any_part_late():
# given
columns = ["hw01", "hw01 - programming", "hw02", "lab01"]
p1 = | pd.Series(data=[1, 30, 90, 20], index=columns, name="A1") | pandas.Series |
import pandas
from bokeh.plotting import figure, gridplot
from bokeh.embed import components
from bokeh.models import HoverTool, TapTool, OpenURL, WheelZoomTool
from bokeh.models import GMapPlot, GMapOptions
from bokeh.tile_providers import CARTODBPOSITRON, get_provider
from cached_property import cached_property_with_ttl
import numpy as np
import math
x_range,y_range=([-15187814,-6458032], [2505715,6567666])
#FUNCTION TO CONVERT GCS WGS84 TO WEB MERCATOR
def wgs84_to_web_mercator(df, lon="long", lat="lat"):
k = 6378137
df["x"] = df[lon] * (k * np.pi/180.0)
df["y"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k
return df
def merc(lo, la):
x, y = pyproj.transform(google_projection, project_projection, lo, la)
return x, y
class Covid(object):
def __init__(self, db_engine):
self.db = db_engine
def update(self):
# invalidate the cache
if 'df' in self.__dict__.keys():
del self.__dict__['df']
# get all data
Confirmed = pandas.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
Deaths = pandas.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
Recovered = | pandas.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
csv_path = "./tweets.csv"
save_path = "./fixed_tweets.csv"
df = | pd.read_csv(csv_path, header=None) | pandas.read_csv |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = pd.read_csv(test_path, header=None)
y_columns = df_train_valid.columns[-1:]
# Page ID is not included, but can be derived. Page IDs can not be
# in both training and validation sets
page_columns = list(range(29))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class Facebookmetrics (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
download_unzip(url, dataset_path)
filename = 'dataset_Facebook.csv'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';')
class ForestFires(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Forest+Fires).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'forestfires.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class GNFUV(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00452/GNFUV USV Dataset.zip'
download_unzip(url, dataset_path)
dfs = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
dfs.append(pd.read_csv(file_path, header=None))
class GNFUV_2(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data+Set+2).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00466/CNFUV_Datasets.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None))
class Greenhouse_Gas_Observing_Network (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Greenhouse+Gas+Observing+Network).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00328/ghg_data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None, sep='\s+'))
class Hungarian_Chickenpox_Cases (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Hungarian+Chickenpox+Cases).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00580/hungary_chickenpox.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, index_col='Date', parse_dates=True))
class IIWA14_R820_Gazebo_Dataset_10Trajectories(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/IIWA14-R820-Gazebo-Dataset-10Trajectories).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00574/IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, header=None)
class Metro_Interstate_Traffic_Volume(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Metro+Interstate+Traffic+Volume).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Metro_Interstate_Traffic_Volume.csv.gz'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_News_Final(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'News_Final.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/News_Final.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Online_Video_Characteristics_and_Transcoding_Time(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Online+Video+Characteristics+and+Transcoding+Time+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00335/online_video_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'README.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class OnlineNews(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00332/OnlineNewsPopularity.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'OnlineNewsPopularity', 'OnlineNewsPopularity.csv')
df = pd.read_csv(file_path, )
df.drop(columns=['url', ' timedelta'], inplace=True)
y_columns = [' shares']
df[y_columns[0]] = np.log(df[y_columns[0]])
self.x, self. y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Parkinson(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/parkinsons).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path: str = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/' \
'parkinsons/telemonitoring/parkinsons_updrs.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path)
y_columns = ['motor_UPDRS', 'total_UPDRS']
df_train_valid = df[df['subject#'] <= 30]
df_test = deepcopy(df[df['subject#'] > 30])
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'subject#')
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res.drop(columns='subject#', inplace=True)
self.x, self.y = xy_split(df_res, y_columns)
class Physicochemical_Properties_of_Protein_Tertiary_Structure(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Physicochemical+Properties+of+Protein+Tertiary+Structure).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CASP.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = | pd.read_csv(file_path) | pandas.read_csv |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
msg = "unable to collapse Joint Filters"
# not implemented
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[df.index[2:7], "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
"""A collection of Methods to support the Change History feature in DFCX."""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
import numpy as np
import pandas as pd
import requests
from dfcx_scrapi.core.scrapi_base import ScrapiBase
# logging config
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class ChangeHistory(ScrapiBase):
"""Tools class that contains methods to support Change History feature."""
def __init__(
self,
creds_path: str = None,
creds_dict: Dict = None,
creds = None,
scope = False,
agent_id = None
):
super().__init__(
creds_path=creds_path,
creds_dict=creds_dict,
creds=creds,
scope=scope
)
if agent_id:
self.agent_id = agent_id
def get_change_history(self, agent_id: str = None):
"""Extract the Change History log for a single DFCX Agent.
Args:
agent_id, the formatted CX Agent ID
Returns:
logs, a List of logs from the Agent ID
"""
if not agent_id:
agent_id = self.agent_id
location = agent_id.split("/")[3]
if location != "global":
base_url = "https://{}-dialogflow.googleapis.com/v3alpha1".format(
location
)
else:
base_url = "https://dialogflow.googleapis.com/v3alpha1"
url = "{0}/{1}/changelogs".format(base_url, agent_id)
headers = {"Authorization": "Bearer {}".format(self.token)}
# Make REST call
results = requests.get(url, headers=headers)
results.raise_for_status()
res = results.json()
logs = []
for log in res["changelogs"]:
logs.append(log)
next_token = res.get("nextPageToken",None)
while next_token is not None:
results = requests.get(
url, headers=headers, params={"page_token": next_token}
)
res = results.json()
for log in res["changelogs"]:
logs.append(log)
if "nextPageToken" in res:
next_token = res["nextPageToken"]
else:
next_token = None
print("All done!")
return logs
def change_history_to_dataframe(self, agent_id):
"""Format the output of get_change_history into a Pandas Dataframe.
Args:
agent_id, the formatted CX Agent ID
Returns:
final_dataframe, the final dataframe output of the formatted logs
"""
change_logs = self.get_change_history(agent_id)
final_dataframe = | pd.DataFrame.from_records(data=change_logs) | pandas.DataFrame.from_records |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from eemeter.features import (
compute_occupancy_feature,
compute_temperature_features,
compute_temperature_bin_features,
compute_time_features,
compute_usage_per_day_feature,
estimate_hour_of_week_occupancy,
get_missing_hours_of_week_warning,
fit_temperature_bins,
merge_features,
)
from eemeter.segmentation import segment_time_series
def test_compute_temperature_features_no_freq_index(
il_electricity_cdd_hdd_billing_monthly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data.index.freq = None
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_no_meter_data_tz(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
meter_data.index = meter_data.index.tz_localize(None)
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_no_temp_data_tz(
il_electricity_cdd_hdd_billing_monthly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.tz_localize(None)
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_hourly_temp_mean(il_electricity_cdd_hdd_hourly):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(meter_data.index, temperature_data)
assert list(sorted(df.columns)) == [
"n_hours_dropped",
"n_hours_kept",
"temperature_mean",
]
assert df.shape == (2952, 3)
assert round(df.temperature_mean.mean()) == 62.0
def test_compute_temperature_features_hourly_hourly_degree_days(
il_electricity_cdd_hdd_hourly, snapshot
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
assert df.shape == (2952, 6)
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_hourly_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_hourly, snapshot
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (2952, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_hourly_daily_degree_days_fail(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="daily",
)
def test_compute_temperature_features_hourly_daily_missing_explicit_freq(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
meter_data.index.freq = None
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="daily",
)
def test_compute_temperature_features_hourly_bad_degree_days(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_hourly_data_quality(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (2952, 4)
assert list(sorted(df.columns)) == [
"n_hours_dropped",
"n_hours_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 1.0
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_daily_temp_mean(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (810, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_daily_daily_degree_days(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_daily_degree_days_use_mean_false(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_hourly_degree_days(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_bad_degree_days(
il_electricity_cdd_hdd_daily
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_daily_data_quality(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (810, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 23.99
assert round(df.temperature_null.mean(), 2) == 0.00
def test_compute_temperature_features_billing_monthly_temp_mean(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (27, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_billing_monthly_daily_degree_days(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_daily_degree_days_use_mean_false(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_hourly_degree_days(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_bad_degree_day_method(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_billing_monthly_data_quality(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (27, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 729.23
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_billing_bimonthly_temp_mean(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (14, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_billing_bimonthly_daily_degree_days(
il_electricity_cdd_hdd_billing_bimonthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (14, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_bimonthly_hourly_degree_days(
il_electricity_cdd_hdd_billing_bimonthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (14, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_bimonthly_bad_degree_days(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_billing_bimonthly_data_quality(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (14, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 1478.77
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_shorter_temperature_data(
il_electricity_cdd_hdd_daily
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
# drop some data
temperature_data = temperature_data[:-200]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (810, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 43958.0
def test_compute_temperature_features_shorter_meter_data(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
# drop some data
meter_data = meter_data[:-10]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (800, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 43904.0
# ensure last row is NaN'ed
assert pd.isnull(df.iloc[-1].n_days_kept)
def test_compute_temperature_features_with_duplicated_index(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
# these are specifically formed to give a less readable error if
# duplicates are not caught
meter_data = meter_data.append(meter_data).sort_index()
temperature_data = temperature_data.iloc[8000:]
with pytest.raises(ValueError) as excinfo:
compute_temperature_features(meter_data.index, temperature_data)
assert str(excinfo.value) == "Duplicates found in input meter trace index."
def test_compute_temperature_features_empty_temperature_data():
index = pd.DatetimeIndex([], tz="UTC", name="dt", freq="H")
temperature_data = pd.Series({"value": []}, index=index).astype(float)
result_index = temperature_data.resample("D").sum().index
meter_data_hack = pd.DataFrame({"value": 0}, index=result_index)
df = compute_temperature_features(
meter_data_hack.index,
temperature_data,
heating_balance_points=[65],
cooling_balance_points=[65],
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (0, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 0
def test_compute_temperature_features_empty_meter_data():
index = pd.DatetimeIndex([], tz="UTC", name="dt", freq="H")
temperature_data = pd.Series({"value": 0}, index=index)
result_index = temperature_data.resample("D").sum().index
meter_data_hack = pd.DataFrame({"value": []}, index=result_index)
meter_data_hack.index.freq = None
df = compute_temperature_features(
meter_data_hack.index,
temperature_data,
heating_balance_points=[65],
cooling_balance_points=[65],
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (0, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 0
def test_merge_features():
index = pd.date_range("2017-01-01", periods=100, freq="H", tz="UTC")
features = merge_features(
[
pd.Series(1, index=index, name="a"),
pd.DataFrame({"b": 2}, index=index),
pd.DataFrame({"c": 3, "d": 4}, index=index),
]
)
assert list(features.columns) == ["a", "b", "c", "d"]
assert features.shape == (100, 4)
assert features.sum().sum() == 1000
assert features.a.sum() == 100
assert features.b.sum() == 200
assert features.c.sum() == 300
assert features.d.sum() == 400
assert features.index[0] == index[0]
assert features.index[-1] == index[-1]
def test_merge_features_empty_raises():
with pytest.raises(ValueError):
features = merge_features([])
@pytest.fixture
def meter_data_hourly():
index = pd.date_range("2017-01-01", periods=100, freq="H", tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_compute_usage_per_day_feature_hourly(meter_data_hourly):
usage_per_day = compute_usage_per_day_feature(meter_data_hourly)
assert usage_per_day.name == "usage_per_day"
assert usage_per_day["2017-01-01T00:00:00Z"] == 24
assert usage_per_day.sum() == 2376.0
def test_compute_usage_per_day_feature_hourly_series_name(meter_data_hourly):
usage_per_day = compute_usage_per_day_feature(
meter_data_hourly, series_name="meter_value"
)
assert usage_per_day.name == "meter_value"
@pytest.fixture
def meter_data_daily():
index = pd.date_range("2017-01-01", periods=100, freq="D", tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_compute_usage_per_day_feature_daily(meter_data_daily):
usage_per_day = compute_usage_per_day_feature(meter_data_daily)
assert usage_per_day["2017-01-01T00:00:00Z"] == 1
assert usage_per_day.sum() == 99.0
@pytest.fixture
def meter_data_billing():
index = | pd.date_range("2017-01-01", periods=100, freq="MS", tz="UTC") | pandas.date_range |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import re
from qf_lib.common.enums.expiration_date_field import ExpirationDateField
from qf_lib.common.tickers.tickers import BloombergTicker
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.containers.series.qf_series import QFSeries
class BloombergFutureTicker(FutureTicker, BloombergTicker):
"""Representation of a Future Ticker, designed to be used by the BloombergDataProvider.
Parameters
----------
name: str
Field which contains a name (or a short description) of the FutureTicker.
family_id: str
Used to to verify if a specific BloombergTicker belongs to a certain futures family and to the active
Ticker string, which can be further used by the data provider to download the chain of corresponding Tickers.
The family ID pattern - e.g. for Cotton, an exemplary ticker string is of the following
form: "CTZ9 Comdty". The "Z9" part denotes the month and year codes - this is the only variable part of the
ticker. Thus, in order to verify if a ticker belongs to the cotton family, it should be in form of "CT{} Comdty".
For all other ticker families, the family_id should be in the form of specific ticker with the month and
year codes replaced with the "{}" placeholder.
N: int
Used to identify which specific Ticker should be considered by the Backtester, while using the general
Future Ticker class. For example N parameter set to 1, denotes the front future contract.
days_before_exp_date: int
Number of days before the expiration day of each of the contract, when the “current” specific contract
should be substituted with the next consecutive one.
point_value: int
Used to define the size of the contract.
designated_contracts: str
It is a string, which represents all month codes, that are being downloaded and stored
in the chain of future contracts. Any specific order of letters is not required. E.g. providing this
parameter value equal to "HMUZ", would restrict the future chain to only the contracts, which expire in
March, June, September and December, even if contracts for any other months exist and are returned by the
BloombergDataProvider get_futures_chain_tickers function.
"""
def __init__(self, name: str, family_id: str, N: int, days_before_exp_date: int, point_value: int = 1,
designated_contracts: str = "FGHJKMNQUVXZ"):
self.designated_contracts = designated_contracts
if not len(designated_contracts) > 0:
raise ValueError("At least one month code should be provided.")
super().__init__(name, family_id, N, days_before_exp_date, point_value)
def get_active_ticker(self) -> BloombergTicker:
""" Returns the active ticker. """
specific_ticker_string = self.family_id.format("A")
return BloombergTicker.from_string(specific_ticker_string)
def _get_futures_chain_tickers(self):
"""
Function used to download the expiration dates of the futures contracts, in order to return afterwards current
futures tickers. It uses the list of month codes of designated contracts and filter out these, that should not
be considered by the future ticker.
"""
futures_chain_tickers_df = self._data_provider.get_futures_chain_tickers(self,
ExpirationDateField.all_dates())[self]
# Get the minimum date
futures_chain_tickers = futures_chain_tickers_df.min(axis=1)
futures_chain_tickers = QFSeries(data=futures_chain_tickers.index, index=futures_chain_tickers.values)
futures_chain_tickers.index = | pd.to_datetime(futures_chain_tickers.index) | pandas.to_datetime |
import pandas as pd
from conftest import assert_frame_equal
import numpy as np
from numpy import dtype, nan
import pytest
from pvlib.iotools import crn
from conftest import DATA_DIR
@pytest.fixture
def columns():
return [
'WBANNO', 'UTC_DATE', 'UTC_TIME', 'LST_DATE', 'LST_TIME', 'CRX_VN',
'longitude', 'latitude', 'temp_air', 'PRECIPITATION', 'ghi',
'ghi_flag',
'SURFACE_TEMPERATURE', 'ST_TYPE', 'ST_FLAG', 'relative_humidity',
'relative_humidity_flag', 'SOIL_MOISTURE_5', 'SOIL_TEMPERATURE_5',
'WETNESS', 'WET_FLAG', 'wind_speed', 'wind_speed_flag']
@pytest.fixture
def dtypes():
return [
dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'),
dtype('int64'), dtype('O'), dtype('float64'), dtype('float64'),
dtype('float64'), dtype('float64'), dtype('float64'),
dtype('int64'), dtype('float64'), dtype('O'), dtype('int64'),
dtype('float64'), dtype('int64'), dtype('float64'),
dtype('float64'), dtype('int64'), dtype('int64'), dtype('float64'),
dtype('int64')]
@pytest.fixture
def testfile():
return DATA_DIR / 'CRNS0101-05-2019-AZ_Tucson_11_W.txt'
@pytest.fixture
def testfile_problems():
return DATA_DIR / 'CRN_with_problems.txt'
def test_read_crn(testfile, columns, dtypes):
index = pd.DatetimeIndex(['2019-01-01 16:10:00',
'2019-01-01 16:15:00',
'2019-01-01 16:20:00',
'2019-01-01 16:25:00'],
freq=None).tz_localize('UTC')
values = np.array([
[53131, 20190101, 1610, 20190101, 910, 3, -111.17, 32.24, nan,
0.0, 296.0, 0, 4.4, 'C', 0, 90.0, 0, nan, nan, 24, 0, 0.78, 0],
[53131, 20190101, 1615, 20190101, 915, 3, -111.17, 32.24, 3.3,
0.0, 183.0, 0, 4.0, 'C', 0, 87.0, 0, nan, nan, 1182, 0, 0.36, 0],
[53131, 20190101, 1620, 20190101, 920, 3, -111.17, 32.24, 3.5,
0.0, 340.0, 0, 4.3, 'C', 0, 83.0, 0, nan, nan, 1183, 0, 0.53, 0],
[53131, 20190101, 1625, 20190101, 925, 3, -111.17, 32.24, 4.0,
0.0, 393.0, 0, 4.8, 'C', 0, 81.0, 0, nan, nan, 1223, 0, 0.64, 0]])
expected = | pd.DataFrame(values, columns=columns, index=index) | pandas.DataFrame |
'''
This script exctracts training variables from all logs from
tensorflow event files ("event*"), writes them to Pandas
and finally stores in long-format to a CSV-file including
all (readable) runs of the logging directory.
The magic "5" infers there are only the following v.tags:
[lr, loss, acc, val_loss, val_acc]
'''
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from collections import defaultdict
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Get all event* runs from logging_dir subdirectories
# PATH = "C:/Users/egomez/Documents/Projectos/3D-PROTUCEL/IMAGE-PROCESSING/FULL-VIDEOS/mobilenet_lstm_experiment_decoder_5"
def smooth(y, box_pts):
box = np.ones(box_pts) / box_pts
y = np.array(y)
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def tabulate_events(dpath):
events = ['train', 'validation']
summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in events]
tags = summary_iterators[0].Tags()['scalars']
# for it in summary_iterators:
# assert it.Tags()['scalars'] == tags
out = defaultdict(list)
steps = []
for tag in tags:
if not tag.__contains__('batch'):
steps = [e.step for e in summary_iterators[1].Scalars(tag)]
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
return out, steps
def logs2dataframe(logging_dir):
data_loss = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
def load_and_process(path):
data = pd.read_csv(path)
newdf = (
pd.DataFrame(data)
.rename(columns={"alcohol": "Alc"}) #Abbreviating longer column names
.rename(columns={"free sulfur dioxide": "F.S.D."})
.rename(columns={"total sulfur dioxide": "T.S.D."})
.sort_values("quality", ascending=False) #Sorting by wine Quality to answer our research question
.reset_index(drop=True) #Adjusted Index to match our new sorted dataframe
)
return newdf
# In[4]:
def load_and_process2(path):
df = (
| pd.read_csv(path) | pandas.read_csv |
"""
Trading environment class
data: 12/10/2017
author: Tau
"""
from ..datafeed import *
from ..spaces import *
from .utils import *
from ..utils import *
from ..core import Env
import os
import smtplib
from socket import gaierror
from datetime import datetime, timedelta, timezone
from decimal import localcontext, ROUND_UP, Decimal
from time import sleep
import pandas as pd
import empyrical as ec
import optunity as ot
from bokeh.layouts import column
from bokeh.palettes import inferno
from bokeh.plotting import figure, show
from bokeh.models import HoverTool, Legend, Span, Label
from ..exchange_api.poloniex import ExchangeError
# Environments
class TradingEnvironment(Env):
"""
Trading environment base class
"""
## Setup methods
def __init__(self, period, obs_steps, tapi, fiat="USDT", name="TradingEnvironment"):
assert isinstance(name, str), "Name must be a string"
self.name = name
# Data feed api
self.tapi = tapi
# Environment configuration
self.epsilon = dec_zero
self._obs_steps = None
self._period = None
self.pairs = []
self._crypto = []
self._fiat = None
self.tax = {}
# Dataframes
self.obs_df = pd.DataFrame()
self.portfolio_df = pd.DataFrame()
self.action_df = pd.DataFrame()
# Logging and debugging
self.status = {'OOD': False,
'Error': False,
'ValueError': False,
'ActionError': False,
'NotEnoughFiat': False}
if not os.path.exists('./logs'):
os.makedirs('./logs')
# self.logger = Logger(self.name, './logs/')
Logger.info("Trading Environment initialization",
"Trading Environment Initialized!")
# Setup
self.period = period
self.obs_steps = obs_steps
# Init attributes for key sharing
self.results = None
self.action_space = None
self.observation_space = None
self.init_balance = None
self._symbols = []
self.add_pairs(self.tapi.pairs)
self.fiat = fiat
self.set_action_space()
self.reset_benchmark()
self.setup()
## Env properties
@property
def obs_steps(self):
return self._obs_steps
@obs_steps.setter
def obs_steps(self, value):
assert isinstance(value, int), "Obs steps must be a integer."
assert value >= 3, "Obs steps must be >= 3. Value: %s" % str(value)
self._obs_steps = value
@property
def period(self):
return self._period
@period.setter
def period(self, value):
assert isinstance(value, int) and value >= 1,\
"Period must be a integer >= 1."
self._period = value
@property
def symbols(self):
if self._symbols:
return self._symbols
else:
symbols = []
for pair in self.pairs:
symbols.append(pair.split('_')[1])
symbols.append(self._fiat)
self._symbols = tuple(symbols)
return self._symbols
@property
def fiat(self):
try:
i = -1
fiat = self.portfolio_df.at[self.portfolio_df.index[i], self._fiat]
while not convert_to.decimal(fiat.is_finite()):
i -= 1
fiat = self.portfolio_df.at[self.portfolio_df.index[-i], self._fiat]
return fiat
except IndexError:
Logger.error(TradingEnvironment.crypto, "No valid value on portfolio dataframe.")
raise KeyError
except KeyError as e:
Logger.error(TradingEnvironment.fiat, "You must specify a fiat symbol first.")
raise e
except Exception as e:
Logger.error(TradingEnvironment.fiat, self.parse_error(e))
raise e
@fiat.setter
def fiat(self, value):
try:
if isinstance(value, str):
symbols = []
for pair in self.pairs:
symbols.append(pair.split('_')[1])
symbols.append(self.pairs[0].split('_')[0])
assert value in symbols, "Fiat not in symbols."
self._fiat = value
symbols.remove(self._fiat)
self._crypto = symbols
elif isinstance(value, Decimal) or isinstance(value, float) or isinstance(value, int):
self.portfolio_df.at[self.timestamp, self._fiat] = convert_to.decimal(value)
elif isinstance(value, dict):
try:
timestamp = value['timestamp']
except KeyError:
timestamp = self.timestamp
self.portfolio_df.at[timestamp, self._fiat] = convert_to.decimal(value[self._fiat])
except IndexError:
raise AssertionError('You must enter pairs before set fiat.')
except Exception as e:
Logger.error(TradingEnvironment.fiat, self.parse_error(e))
raise e
@property
def crypto(self):
try:
crypto = {}
for symbol in self._crypto:
crypto[symbol] = self.get_crypto(symbol)
return crypto
except KeyError as e:
Logger.error(TradingEnvironment.crypto, "No valid value on portfolio dataframe.")
raise e
except Exception as e:
Logger.error(TradingEnvironment.crypto, self.parse_error(e))
raise e
def get_crypto(self, symbol):
try:
i = -1
value = self.portfolio_df.at[self.portfolio_df.index[i], symbol]
while not convert_to.decimal(value).is_finite():
i -= 1
value = self.portfolio_df.at[self.portfolio_df.index[i], symbol]
return value
except IndexError:
Logger.error(TradingEnvironment.crypto, "No valid value on portfolio dataframe.")
raise KeyError
except KeyError as e:
Logger.error(TradingEnvironment.crypto, "No valid value on portfolio dataframe.")
raise e
except Exception as e:
Logger.error(TradingEnvironment.crypto, self.parse_error(e))
raise e
@crypto.setter
def crypto(self, values):
try:
# assert isinstance(values, dict), "Crypto value must be a dictionary containing the currencies balance."
try:
timestamp = values['timestamp']
except KeyError:
timestamp = self.timestamp
for symbol, value in values.items():
if symbol not in [self._fiat, 'timestamp']:
self.portfolio_df.at[timestamp, symbol] = convert_to.decimal(value)
except TypeError:
raise AssertionError("Crypto value must be a dictionary containing the currencies balance.")
except Exception as e:
Logger.error(TradingEnvironment.crypto, self.parse_error(e))
raise e
@property
def balance(self):
# return self.portfolio_df.ffill().loc[self.portfolio_df.index[-1], self.symbols].to_dict()
balance = self.crypto
balance.update({self._fiat: self.fiat})
return balance
@balance.setter
def balance(self, values):
try:
assert isinstance(values, dict), "Balance must be a dictionary containing the currencies amount."
try:
timestamp = values['timestamp']
except KeyError:
timestamp = self.timestamp
for symbol, value in values.items():
if symbol is not 'timestamp':
self.portfolio_df.at[timestamp, symbol] = convert_to.decimal(value)
except Exception as e:
Logger.error(TradingEnvironment.balance, self.parse_error(e))
raise e
@property
def portval(self):
return self.calc_total_portval()
@portval.setter
def portval(self, value):
try:
self.portfolio_df.at[value['timestamp'], 'portval'] = convert_to.decimal(value['portval'])
except KeyError:
self.portfolio_df.at[self.timestamp, 'portval'] = convert_to.decimal(value['portval'])
except TypeError:
self.portfolio_df.at[self.timestamp, 'portval'] = convert_to.decimal(value)
except Exception as e:
Logger.error(TradingEnvironment.portval, self.parse_error(e))
raise e
@property
def benchmark(self):
return self._benchmark
@benchmark.setter
def benchmark(self, vector):
self._benchmark = self.assert_action(vector)
def reset_benchmark(self):
n_pairs = len(self.pairs)
self.benchmark = np.append(dec_vec_div(convert_to.decimal(np.ones(n_pairs, dtype=np.dtype(Decimal))),
dec_con.create_decimal(n_pairs)), [dec_zero])
def add_pairs(self, *args):
"""
Add pairs for tradeable symbol universe
:param args: str, list:
:return:
"""
universe = self.tapi.returnCurrencies()
for arg in args:
if isinstance(arg, str):
if set(arg.split('_')).issubset(universe):
self.pairs.append(arg)
else:
Logger.error(TradingEnvironment.add_pairs, "Symbol not found on exchange currencies.")
elif isinstance(arg, list):
for item in arg:
if set(item.split('_')).issubset(universe):
if isinstance(item, str):
self.pairs.append(item)
else:
Logger.error(TradingEnvironment.add_pairs, "Symbol name must be a string")
else:
Logger.error(TradingEnvironment.add_pairs, "Symbol name must be a string")
## Data feed methods
@property
def timestamp(self):
# return floor_datetime(datetime.now(timezone.utc) - timedelta(minutes=self.period), self.period)
# Poloniex returns utc timestamp delayed one full bar
return datetime.now(timezone.utc) - timedelta(minutes=self.period)
# Exchange data getters
def get_balance(self):
"""
Get last balance from exchange
:return: dict: Dict containing Decimal values for portfolio allocation
"""
try:
balance = self.tapi.returnBalances()
filtered_balance = {}
for symbol in self.symbols:
filtered_balance[symbol] = convert_to.decimal(balance[symbol])
return filtered_balance
except Exception as e:
try:
Logger.error(LiveTradingEnvironment.get_balance, self.parse_error(e, balance))
except Exception:
Logger.error(LiveTradingEnvironment.get_balance, self.parse_error(e))
raise e
def get_fee(self, symbol, fee_type='takerFee'):
"""
Return transaction fee value for desired symbol
:param symbol: str: Pair name
:param fee_type: str: Take or Maker fee
:return: Decimal:
"""
# TODO MAKE IT UNIVERSAL
try:
fees = self.tapi.returnFeeInfo()
assert fee_type in ['takerFee', 'makerFee'], "fee_type must be whether 'takerFee' or 'makerFee'."
return dec_con.create_decimal(fees[fee_type])
except Exception as e:
Logger.error(TradingEnvironment.get_fee, self.parse_error(e))
raise e
# High frequency getter
# def get_pair_trades(self, pair, start=None, end=None):
# # TODO WRITE TEST
# # TODO FINISH THIS
# try:
# # Pool data from exchage
# if isinstance(end, float):
# data = self.tapi.marketTradeHist(pair, end=end)
# else:
# data = self.tapi.marketTradeHist(pair)
# df = pd.DataFrame.from_records(data)
#
# # Get more data from exchange until have enough to make obs_steps rows
# if isinstance(start, float):
# while datetime.fromtimestamp(start) < \
# datetime.strptime(df.date.iat[-1], "%Y-%m-%d %H:%M:%S"):
#
# market_data = self.tapi.marketTradeHist(pair, end=datetime.timestamp(
# datetime.strptime(df.date.iat[-1], "%Y-%m-%d %H:%M:%S")))
#
# df2 = pd.DataFrame.from_records(market_data).set_index('globalTradeID')
# appended = False
# i = 0
# while not appended:
# try:
# df = df.append(df2.iloc[i:], verify_integrity=True)
# appended = True
# except ValueError:
# i += 1
#
# else:
# while datetime.strptime(df.date.iat[0], "%Y-%m-%d %H:%M:%S") - \
# timedelta(minutes=self.period * self.obs_steps) < \
# datetime.strptime(df.date.iat[-1], "%Y-%m-%d %H:%M:%S"):
#
# market_data = self.tapi.marketTradeHist(pair, end=datetime.timestamp(
# datetime.strptime(df.date.iat[-1], "%Y-%m-%d %H:%M:%S")))
#
# df2 = pd.DataFrame.from_records(market_data).set_index('globalTradeID')
# appended = False
# i = 0
# while not appended:
# try:
# df = df.append(df2.iloc[i:], verify_integrity=True)
# appended = True
# except ValueError:
# i += 1
#
# return df
#
# except Exception as e:
# Logger.error(TradingEnvironment.get_pair_trades, self.parse_error(e))
# raise e
#
# def sample_trades(self, pair, start=None, end=None):
# # TODO WRITE TEST
# df = self.get_pair_trades(pair, start=start, end=end)
#
# period = "%dmin" % self.period
#
# # Sample the trades into OHLC data
# df['rate'] = df['rate'].ffill().apply(convert_to.decimal, raw=True)
# df['amount'] = df['amount'].apply(convert_to.decimal, raw=True)
# df.index = df.date.apply(pd.to_datetime, raw=True)
#
# # TODO REMOVE NANS
# index = df.resample(period).first().index
# out = pd.DataFrame(index=index)
#
# out['open'] = convert_and_clean(df['rate'].resample(period).first())
# out['high'] = convert_and_clean(df['rate'].resample(period).max())
# out['low'] = convert_and_clean(df['rate'].resample(period).min())
# out['close'] = convert_and_clean(df['rate'].resample(period).last())
# out['volume'] = convert_and_clean(df['amount'].resample(period).sum())
#
# return out
# Low frequency getter
def get_ohlc(self, symbol, index):
"""
Return OHLC data for desired pair
:param symbol: str: Pair symbol
:param index: datetime.datetime: Time span for data retrieval
:return: pandas DataFrame: OHLC symbol data
"""
# Get range
start = index[0]
end = index[-1]
# Call for data
ohlc_df = pd.DataFrame.from_records(self.tapi.returnChartData(symbol,
period=self.period * 60,
start=datetime.timestamp(start),
end=datetime.timestamp(end)),
nrows=index.shape[0])
# TODO 1 FIND A BETTER WAY
# TODO: FIX TIMESTAMP
# Set index
ohlc_df.set_index(ohlc_df.date.transform(lambda x: datetime.fromtimestamp(x).astimezone(timezone.utc)),
inplace=True, drop=True)
# Get right values to fill nans
# TODO: FIND A BETTER PERFORMANCE METHOD
# last_close = ohlc_df.at[ohlc_df.close.last_valid_index(), 'close']
# Get last close value
i = -1
last_close = ohlc_df.at[ohlc_df.index[i], 'close']
while not dec_con.create_decimal(last_close).is_finite():
i -= 1
last_close = dec_con.create_decimal(ohlc_df.at[ohlc_df.index[i], 'close'])
# Replace missing values with last close
fill_dict = {col: last_close for col in ['open', 'high', 'low', 'close']}
fill_dict.update({'volume': '0E-16'})
# Reindex with desired time range and fill nans
ohlc_df = ohlc_df[['open','high','low','close',
'volume']].reindex(index).asfreq("%dT" % self.period).fillna(fill_dict)
return ohlc_df.astype(str)#.fillna('0.0')
# Observation maker
def get_history(self, start=None, end=None, portfolio_vector=False):
while True:
try:
obs_list = []
keys = []
# Make desired index
is_bounded = True
if not end:
end = self.timestamp
is_bounded = False
if not start:
start = end - timedelta(minutes=self.period * self.obs_steps)
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)[-self.obs_steps:]
is_bounded = False
else:
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)
if portfolio_vector:
# Get portfolio observation
port_vec = self.get_sampled_portfolio(index)
if port_vec.shape[0] == 0:
port_vec = self.get_sampled_portfolio().iloc[-1:]
port_vec.index = [index[0]]
# Update last observation so it can see possible inter step changes
last_balance = self.get_balance()
port_vec.at[port_vec.index[-1], list(last_balance.keys())] = list(last_balance.values())
# Get pairs history
for pair in self.pairs:
keys.append(pair)
history = self.get_ohlc(pair, index)
history = pd.concat([history, port_vec[pair.split('_')[1]]], axis=1)
obs_list.append(history)
# Get fiat history
keys.append(self._fiat)
obs_list.append(port_vec[self._fiat])
# Concatenate dataframes
obs = pd.concat(obs_list, keys=keys, axis=1)
# Fill missing portfolio observations
cols_to_bfill = [col for col in zip(self.pairs, self.symbols)] + [(self._fiat, self._fiat)]
obs = obs.fillna(obs[cols_to_bfill].ffill().bfill())
if not is_bounded:
assert obs.shape[0] >= self.obs_steps, "Dataframe is to small. Shape: %s" % str(obs.shape)
return obs.apply(convert_to.decimal, raw=True)
else:
# Get history
for pair in self.pairs:
keys.append(pair)
history = self.get_ohlc(pair, index)
obs_list.append(history)
# Concatenate
obs = pd.concat(obs_list, keys=keys, axis=1)
# Check size
if not is_bounded:
assert obs.shape[0] >= self.obs_steps, "Dataframe is to small. Shape: %s" % str(obs.shape)
return obs.apply(convert_to.decimal, raw=True)
except MaxRetriesException:
Logger.error(TradingEnvironment.get_history, "Retries exhausted. Waiting for connection...")
except Exception as e:
Logger.error(TradingEnvironment.get_history, self.parse_error(e))
raise e
def get_observation(self, portfolio_vector=False):
"""
Return observation df with prices and asset amounts
:param portfolio_vector: bool: whether to include or not asset amounts
:return: pandas DataFrame:
"""
try:
self.obs_df = self.get_history(portfolio_vector=portfolio_vector)
return self.obs_df
# except ExchangeError:
# sleep(1)
# self.obs_df = self.get_history(portfolio_vector=portfolio_vector)
# return self.obs_df
except Exception as e:
Logger.error(TradingEnvironment.get_observation, self.parse_error(e))
raise e
def get_sampled_portfolio(self, index=None):
"""
Return sampled portfolio df
:param index:
:return:
"""
if index is None:
start = self.portfolio_df.index[0]
end = self.portfolio_df.index[-1]
else:
start = index[0]
end = index[-1]
# TODO 1 FIND A BETTER WAY
if start != end:
return self.portfolio_df.loc[start:end].resample("%dmin" % self.period).last()
else:
return self.portfolio_df.loc[:end].resample("%dmin" % self.period).last()
def get_sampled_actions(self, index=None):
"""
Return sampled action df
:param index:
:return:
"""
if index is None:
start = self.action_df.index[0]
end = self.action_df.index[-1]
else:
start = index[0]
end = index[-1]
# TODO 1 FIND A BETTER WAY
if start != end:
return self.action_df.loc[start:end].resample("%dmin" % self.period).last()
else:
return self.action_df.loc[:end].resample("%dmin" % self.period).last()
## Trading methods
def get_open_price(self, symbol, timestamp=None):
"""
Get symbol open price
:param symbol: str: Pair name
:param timestamp:
:return: Decimal: Symbol open price
"""
if not timestamp:
timestamp = self.obs_df.index[-1]
return self.obs_df.at[timestamp, ("%s_%s" % (self._fiat, symbol), 'open')]
def calc_total_portval(self, timestamp=None):
"""
Return total portfolio value given optional timestamp
:param timestamp: datetime.datetime:
:return: Decimal: Portfolio value in fiat units
"""
portval = dec_zero
for symbol in self._crypto:
portval = self.get_crypto(symbol).fma(self.get_open_price(symbol, timestamp), portval)
portval = dec_con.add(self.fiat, portval)
return portval
def calc_posit(self, symbol, portval):
"""
Calculate current position vector
:param symbol: str: Symbol name
:param portval: Decimal: Portfolio value
:return:
"""
if symbol == self._fiat:
return safe_div(self.fiat, portval)
else:
return safe_div(dec_con.multiply(self.get_crypto(symbol), self.get_open_price(symbol)), portval)
def calc_portfolio_vector(self):
"""
Return portfolio position vector
:return: numpy array:
"""
portfolio = np.empty(len(self.symbols), dtype=Decimal)
portval = self.calc_total_portval()
for i, symbol in enumerate(self.symbols):
portfolio[i] = self.calc_posit(symbol, portval)
return portfolio
def assert_action(self, action):
"""
Assert that action vector is valid and have norm one
:param action: numpy array: Action array
:return: numpy array: Valid and normalized action vector
"""
# TODO WRITE TEST
try:
action = convert_to.decimal(action)
assert self.action_space.contains(action)
# normalize
if action.sum() != dec_one:
action = safe_div(action, action.sum())
action[-1] += dec_one - action.sum()
assert action.sum() - dec_one < dec_eps
return action
except AssertionError:
action = safe_div(action, action.sum())
action[-1] += dec_one - action.sum()
try:
assert action.sum() - dec_one < dec_eps
return action
except AssertionError:
action = safe_div(action, action.sum())
action[-1] += dec_one - action.sum()
assert action.sum() - dec_one < dec_eps
return action
except Exception as e:
Logger.error(TradingEnvironment.assert_action, self.parse_error(e))
raise e
def log_action(self, timestamp, symbol, value):
"""
Log action to action df
:param timestamp:
:param symbol:
:param value:
:return:
"""
if symbol == 'online':
self.action_df.at[timestamp, symbol] = value
else:
self.action_df.at[timestamp, symbol] = convert_to.decimal(value)
def log_action_vector(self, timestamp, vector, online):
"""
Log complete action vector to action df
:param timestamp:
:param vector:
:param online:
:return:
"""
for i, symbol in enumerate(self.symbols):
self.log_action(timestamp, symbol, vector[i])
self.log_action(timestamp, 'online', online)
def get_last_portval(self):
"""
Retrieve last valid portfolio value from portfolio dataframe
:return: Decimal
"""
try:
i = -1
portval = self.portfolio_df.at[self.portfolio_df.index[i], 'portval']
while not dec_con.create_decimal(portval).is_finite():
i -= 1
portval = self.portfolio_df.at[self.portfolio_df.index[i], 'portval']
return portval
except Exception as e:
Logger.error(TradingEnvironment.get_last_portval, self.parse_error(e))
raise e
def get_reward(self, previous_portval):
"""
Payoff loss function
Reference:
<NAME>.
Logarithmic Regret Algorithms for Online Convex ... - cs.Princeton
www.cs.princeton.edu/~ehazan/papers/log-journal.pdf
:previous_portval: float: Previous portfolio value
:return: numpy float:
"""
# TODO TEST
# Price change
pr = self.obs_df.xs('open', level=1, axis=1).iloc[-2:].values
pr = np.append(safe_div(pr[-1], pr[-2]), [dec_one])
pr_max = pr.max()
# Divide after dot product
# pr = safe_div(pr, pr_max)
# No taxes this way
# port_log_return = rew_con.log10(np.dot(convert_to.decimal(self.action_df.iloc[-1].values[:-1]), pr))
# This way you get taxes from the next reward right after the step init
# try:
# port_change = safe_div(self.portfolio_df.get_value(self.portfolio_df.index[-1], 'portval'),
# self.portfolio_df.get_value(self.portfolio_df.index[-2], 'portval'))
# except IndexError:
# port_change = dec_one
# This way you get taxes from the currently action, after wait for the bar to close
try:
port_change = safe_div(self.calc_total_portval(), previous_portval)
except IndexError:
port_change = dec_one
# Portfolio log returns
port_log_return = rew_con.ln(safe_div(port_change, pr_max))
# Benchmark log returns
bench_log_return = rew_con.ln(safe_div(np.dot(self.benchmark, pr), pr_max))
# Return -regret (negative regret) = Payoff
return rew_con.subtract(port_log_return, bench_log_return).quantize(dec_qua)
def simulate_trade(self, action, timestamp):
"""
Simulates trade on exchange environment
:param action: np.array: Desired portfolio vector
:param timestamp: datetime.datetime: Trade time
:return: None
"""
# TODO: IMPLEMENT SLIPPAGE MODEL
try:
# Assert inputs
action = self.assert_action(action)
# Calculate position change given action
posit_change = dec_vec_sub(action, self.calc_portfolio_vector())[:-1]
# Get initial portval
portval = self.calc_total_portval()
# Sell assets first
for i, change in enumerate(posit_change):
if change < dec_zero:
symbol = self.symbols[i]
crypto_pool = safe_div(dec_con.multiply(portval, action[i]), self.get_open_price(symbol))
with localcontext() as ctx:
ctx.rounding = ROUND_UP
fee = ctx.multiply(dec_con.multiply(portval, change.copy_abs()), self.tax[symbol])
self.fiat = {self._fiat: dec_con.add(self.fiat, portval.fma(change.copy_abs(), -fee)), 'timestamp': timestamp}
self.crypto = {symbol: crypto_pool, 'timestamp': timestamp}
# Uodate prev portval with deduced taxes
portval = self.calc_total_portval()
# Then buy some goods
for i, change in enumerate(posit_change):
if change > dec_zero:
symbol = self.symbols[i]
self.fiat = {self._fiat: dec_con.subtract(self.fiat, dec_con.multiply(portval, change.copy_abs())),
'timestamp': timestamp}
# if fiat_pool is negative, deduce it from portval and clip
if self.fiat < dec_zero:
portval += self.fiat
self.fiat = {self._fiat: dec_zero, 'timestamp': timestamp}
with localcontext() as ctx:
ctx.rounding = ROUND_UP
fee = ctx.multiply(dec_con.multiply(portval, change.copy_abs()), self.tax[symbol])
crypto_pool = safe_div(portval.fma(action[i], -fee), self.get_open_price(symbol))
self.crypto = {symbol: crypto_pool, 'timestamp': timestamp}
# Log executed action and final balance
self.log_action_vector(self.timestamp, self.calc_portfolio_vector(), True)
# Update portfolio_df
final_balance = self.balance
final_balance['timestamp'] = timestamp
self.balance = final_balance
# Calculate new portval
self.portval = {'portval': self.calc_total_portval(),
'timestamp': timestamp}
return True
except Exception as e:
Logger.error(TradingEnvironment.simulate_trade, self.parse_error(e))
if hasattr(self, 'email'):
self.send_email("TradingEnvironment Error: %s at %s" % (e,
datetime.strftime(self.timestamp, "%Y-%m-%d %H:%M:%S")),
self.parse_error(e))
raise e
## Env methods
def set_observation_space(self):
"""
Set environment observation space
:return:
"""
# Observation space:
obs_space = []
# OPEN, HIGH, LOW, CLOSE
for _ in range(4):
obs_space.append(Box(0.0, 1e12, 1))
# VOLUME
obs_space.append(Box(0.0, 1e12, 1))
# POSITION
obs_space.append(Box(0.0, 1.0, 1))
self.observation_space = Tuple(obs_space)
def set_action_space(self):
"""
Set valid action space
:return:
"""
# Action space
self.action_space = Box(dec_zero, dec_one, (len(self.symbols),))
# Logger.info(TrainingEnvironment.set_action_space, "Setting environment with %d symbols." % (len(self.symbols)))
def reset_status(self):
self.status = {'OOD': False, 'Error': False, 'ValueError': False, 'ActionError': False,
'NotEnoughFiat': False}
def setup(self):
# Reset index
self.data_length = self.tapi.data_length
# Set spaces
self.set_observation_space()
self.set_action_space()
# Get fee values
for symbol in self.symbols:
self.tax[symbol] = convert_to.decimal(self.get_fee(symbol))
# Start balance
self.init_balance = self.get_balance()
# Set flag
self.initialized = True
def reset(self):
"""
Setup env with initial values
:return: pandas DataFrame: observation
"""
raise NotImplementedError()
## Analytics methods
def get_results(self, window=7, benchmark="crp"):
"""
Calculate metrics
:param window: int:
:param benchmark: str: crp for constant rebalance or bah for buy and hold
:return:
"""
# Sample portfolio df
self.results = self.get_sampled_portfolio().join(self.get_sampled_actions(), rsuffix='_posit')[1:].ffill()
# Get history
obs = self.get_history(self.results.index[0], self.results.index[-1])
# Init df
self.results['benchmark'] = dec_zero
self.results['returns'] = convert_to.decimal(np.nan)
self.results['benchmark_returns'] = convert_to.decimal(np.nan)
self.results['alpha'] = convert_to.decimal(np.nan)
self.results['beta'] = convert_to.decimal(np.nan)
self.results['drawdown'] = convert_to.decimal(np.nan)
self.results['sharpe'] = convert_to.decimal(np.nan)
## Calculate benchmark portfolio
# Calc init portval
init_portval = dec_zero
init_time = self.results.index[0]
for symbol in self._crypto:
init_portval += convert_to.decimal(self.init_balance[symbol]) * \
obs.at[init_time, (self._fiat + '_' + symbol, 'open')]
init_portval += convert_to.decimal(self.init_balance[self._fiat])
# # Buy and Hold initial equally distributed assets
with localcontext() as ctx:
ctx.rounding = ROUND_UP
for i, symbol in enumerate(self.pairs):
self.results[symbol+'_benchmark'] = (dec_one - self.tax[symbol.split('_')[1]]) * \
obs[symbol, 'open'] * init_portval / (obs.at[init_time,
(symbol, 'open')] * Decimal(self.action_space.shape[0] - 1))
if benchmark == 'bah':
self.results['benchmark'] = self.results['benchmark'] + self.results[symbol + '_benchmark']
# Best Constant Rebalance Portfolio without taxes
hindsight = obs.xs('open', level=1, axis=1).rolling(2,
min_periods=2).apply(lambda x: (safe_div(x[-1],
x[-2]))).fillna(dec_one).applymap(dec_con.create_decimal)
hindsight[self._fiat] = dec_one
# hindsight = hindsight.apply(lambda x: safe_div(x, x.max()), axis=1)
# Take first operation fee just to start at the same point as strategy
if benchmark == 'crp':
self.results['benchmark'] = np.dot(hindsight, self.benchmark).cumprod() * init_portval * \
(dec_one - self.tax[symbol.split('_')[1]])
# Calculate metrics
self.results['returns'] = pd.to_numeric(self.results.portval.rolling(2,
min_periods=2).apply(lambda x: (safe_div(x[-1],
x[-2]) - 1)).fillna(dec_zero))
self.results['benchmark_returns'] = pd.to_numeric(self.results.benchmark.rolling(2,
min_periods=2).apply(lambda x: (safe_div(x[-1],
x[-2]) - 1)).fillna(dec_zero))
self.results['alpha'] = ec.utils.roll(self.results.returns,
self.results.benchmark_returns,
function=ec.alpha_aligned,
window=window,
risk_free=0.001
)
self.results['beta'] = ec.utils.roll(self.results.returns,
self.results.benchmark_returns,
function=ec.beta_aligned,
window=window)
self.results['drawdown'] = ec.roll_max_drawdown(self.results.returns, window=int(window))
self.results['sharpe'] = ec.roll_sharpe_ratio(self.results.returns, window=int(window + 5), risk_free=0.001)
return self.results
def plot_results(self, window=14, benchmark='crp', subset=None):
def config_fig(fig):
fig.background_fill_color = "black"
fig.background_fill_alpha = 0.1
fig.border_fill_color = "#232323"
fig.outline_line_color = "#232323"
fig.title.text_color = "whitesmoke"
fig.xaxis.axis_label_text_color = "whitesmoke"
fig.yaxis.axis_label_text_color = "whitesmoke"
fig.yaxis.major_label_text_color = "whitesmoke"
fig.xaxis.major_label_orientation = np.pi / 4
fig.grid.grid_line_alpha = 0.1
fig.grid.grid_line_dash = [6, 4]
if subset:
df = self.get_results(window=window, benchmark=benchmark).astype(np.float64).iloc[subset[0]:subset[1]]
else:
df = self.get_results(window=window, benchmark=benchmark).astype(np.float64)
# Results figures
results = {}
# Position
pos_hover = HoverTool(
tooltips=[
('date', '<span style="color: #000000;">@x{%F, %H:%M}</span>'),
('position', '<span style="color: #000000;">@y{%f}</span>'),
],
formatters={
'x': 'datetime', # use 'datetime' formatter for 'date' field
'y': 'printf', # use 'printf' formatter for 'adj close' field
},
# display a tooltip whenever the cursor is vertically in line with a glyph
mode='vline'
)
p_pos = figure(title="Position over time",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='position',
plot_width=900, plot_height=400 + len(self.pairs) * 5,
tools=['crosshair','reset','xwheel_zoom','pan,box_zoom', pos_hover, 'save'],
toolbar_location="above"
)
config_fig(p_pos)
palettes = inferno(len(self.symbols))
legend = []
for i, symbol in enumerate(self.symbols):
results[symbol + '_posit'] = p_pos.line(df.index, df[symbol + '_posit'], color=palettes[i], line_width=1.2)#, muted_color=palettes[i], muted_alpha=0.2)
p_pos.legend.click_policy = "hide"
legend.append((str(symbol), [results[symbol + '_posit']]))
p_pos.add_layout(Legend(items=legend, location=(0, -31)), 'right')
p_pos.legend.click_policy = "hide"
# Portifolio and benchmark values
val_hover = HoverTool(
tooltips=[
('date', '<span style="color: #000000;">@x{%F, %H:%M}</span>'),
('val', '<span style="color: #000000;">$@y{%0.2f}</span>'),
],
formatters={
'x': 'datetime', # use 'datetime' formatter for 'date' field
'y': 'printf', # use 'printf' formatter for 'adj close' field
},
# display a tooltip whenever the cursor is vertically in line with a glyph
mode='vline'
)
p_val = figure(title="Portfolio / Benchmark Value",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='value',
plot_width=900, plot_height=400,
tools=['crosshair', 'reset', 'xwheel_zoom', 'pan,box_zoom', val_hover, 'save'],
toolbar_location="above"
)
config_fig(p_val)
results['benchmark'] = p_val.line(df.index, df.benchmark, color='red', line_width=1.2)
results['m_bench'] = p_val.line(df.index, df.benchmark.rolling(int(window * 10)).mean(), color='black', line_width=1.2, alpha=0.8)
results['portval'] = p_val.line(df.index, df.portval, color='green', line_width=1.2)
results['m_portval'] = p_val.line(df.index, df.portval.rolling(int(window * 10)).mean(), color='yellow', line_width=1.2, alpha=0.8)
p_val.add_layout(Legend(items=[("portval", [results['portval']]),
("benchmark", [results['benchmark']]),
("mean portval", [results['m_portval']]),
("mean bench", [results['m_bench']])
], location=(0, -31)), 'right')
p_val.legend.click_policy = "hide"
# Individual assets portval
p_pval = figure(title="Pair Performance",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='performance',
plot_width=900, plot_height=400 + len(self.pairs) * 5,
tools=['crosshair', 'reset', 'xwheel_zoom', 'pan,box_zoom', val_hover, 'save'],
toolbar_location="above"
)
config_fig(p_pval)
legend = []
for i, symbol in enumerate(self.pairs):
results[symbol+'_benchmark'] = p_pval.line(df.index, df[symbol+'_benchmark'], color=palettes[i], line_width=1.2)
legend.append((symbol,[results[symbol+'_benchmark']]))
p_pval.add_layout(Legend(items=legend, location=(0, -31)), 'right')
p_pval.legend.click_policy = "hide"
# Portifolio and benchmark returns
p_ret = figure(title="Portfolio / Benchmark Returns",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='Returns',
plot_width=900, plot_height=400,
tools=['crosshair','reset','xwheel_zoom','pan', 'box_zoom', 'save'],
toolbar_location="above"
)
config_fig(p_ret)
roll_mu = df.returns.rolling(int(df.index.shape[0] / 5)).mean()
roll_std = df.returns.rolling(int(df.index.shape[0] / 5)).var()
results['bench_ret'] = p_ret.line(df.index, df.benchmark_returns, color='red', line_width=1.2)
results['port_ret'] = p_ret.line(df.index, df.returns, color='green', line_width=1.2, alpha=0.6)
results['ret_mean'] = p_ret.line(df.index, roll_mu,
color='yellow', line_width=1.2, alpha=0.6)
results['ret_std_1'] = p_ret.line(df.index, roll_mu + roll_std,
color='blue', line_width=1.2, alpha=0.6)
results['ret_std_2'] = p_ret.line(df.index, roll_mu - roll_std,
color='blue', line_width=1.2, alpha=0.6)
p_ret.add_layout(Legend(items=[("bench returns", [results['bench_ret']]),
("port returns", [results['port_ret']]),
("returns_mean", [results['ret_mean']]),
("returns_std", [results['ret_std_1'], results['ret_std_2']])
], location=(0, -31),), 'right')
p_ret.legend.click_policy = "hide"
# Returns histogram
p_hist = figure(title="Portfolio Value Pct Change Distribution",
x_axis_label='Pct Change',
y_axis_label='frequency',
plot_width=900, plot_height=400,
tools=['crosshair','reset','xwheel_zoom','pan', 'box_zoom', 'save'],
toolbar_location="above"
)
config_fig(p_hist)
hist, edges = np.histogram(df.returns, density=True, bins=100)
p_hist.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
sigma = df.returns.std()
mu = df.returns.mean()
quantiles = (df.returns.quantile(0.05), df.returns.quantile(0.95))
results['mhist'] = Span(location=mu, dimension='height', line_color='red',
line_dash='dashed', line_width=2)
p_hist.add_layout(results['mhist'])
p_hist.add_layout(Label(x=mu, y=max(hist), x_offset=4,
y_offset=-5, text='%.06f' % mu,
text_color='red'))
p_hist.add_layout(Label(x=quantiles[0], y=0, text='%.06f' % quantiles[0], text_color='yellow', angle=45,))
p_hist.add_layout(Label(x=quantiles[1], y=0, text='%.06f' % quantiles[1], text_color='yellow', angle=45))
# PDF
# x = np.linspace(df.returns.min(), df.returns.max(), 1000)
# pdf = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
#
# p_hist.line(x, pdf, line_color="#D95B43", line_width=1.8, alpha=0.7)
results['cihist'] = p_hist.line(np.linspace(quantiles[0], quantiles[1], 1000), 0, line_color='yellow',
line_width=3, alpha=0.7, line_dash='dashed')
p_hist.add_layout(Legend(items=[
("95% credible interval", [results['cihist']])
], location=(0, -31),), 'right')
# Portifolio rolling alpha
p_alpha = figure(title="Portfolio rolling alpha",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='alpha',
plot_width=900, plot_height=270,
tools=['crosshair','reset','xwheel_zoom','pan', 'box_zoom', 'save'],
toolbar_location="above"
)
config_fig(p_alpha)
mu = df.alpha.mean()
results['alpha'] = p_alpha.line(df.index, df.alpha, color='yellow', line_width=1.2)
p_alpha.add_layout(Span(location=0, dimension='width', line_color='black',
line_dash='dashed', line_width=1.5))
results['malpha'] = Span(location=mu, dimension='width', line_color='whitesmoke',
line_dash='dashed', line_width=1.5)
p_alpha.add_layout(results['malpha'])
p_alpha.add_layout(Label(x=df.index[window], y=mu, x_offset=10,
y_offset=1, text='mu: %.06f' % mu,
text_color='whitesmoke'))
p_alpha.add_layout(Legend(items=[("alpha", [results['alpha']])
], location=(0, -31),), 'right')
# Portifolio rolling beta
p_beta = figure(title="Portfolio rolling beta",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='beta',
plot_width=900, plot_height=270,
tools=['crosshair','reset','xwheel_zoom','pan', 'box_zoom', 'save'],
toolbar_location="above"
)
config_fig(p_beta)
mu = df.beta.mean()
results['beta'] = p_beta.line(df.index, df.beta, color='yellow', line_width=1.2)
p_beta.add_layout(Span(location=0, dimension='width', line_color='black',
line_dash='dashed', line_width=1.5))
results['mbeta'] = Span(location=mu, dimension='width', line_color='whitesmoke',
line_dash='dashed', line_width=1.5)
p_beta.add_layout(results['mbeta'])
p_beta.add_layout(Label(x=df.index[window], y=mu, x_offset=10,
y_offset=1, text='mu: %.06f' % mu,
text_color='whitesmoke'))
p_beta.add_layout(Legend(items=[("beta", [results['beta']])
], location=(0, -31),), 'right')
# Portifolio Sharpe ratio
p_sharpe = figure(title="Portfolio rolling Sharpe ratio",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='Sharpe ratio',
plot_width=900, plot_height=270,
tools=['crosshair','reset','xwheel_zoom','pan', 'box_zoom', 'save'],
toolbar_location="above"
)
config_fig(p_sharpe)
mu = df.sharpe.mean()
results['sharpe'] = p_sharpe.line(df.index, df.sharpe, color='yellow', line_width=1.2)
p_sharpe.add_layout(Span(location=0, dimension='width', line_color='black',
line_dash='dashed', line_width=1.5))
results['msharpe'] = Span(location=mu, dimension='width', line_color='whitesmoke',
line_dash='dashed', line_width=1.5)
p_sharpe.add_layout(results['msharpe'])
p_sharpe.add_layout(Label(x=df.index[window], y=mu, x_offset=10,
y_offset=1, text='mu: %.06f' % mu,
text_color='whitesmoke'))
p_sharpe.add_layout(Legend(items=[("sharpe", [results['sharpe']])
], location=(0, -31),), 'right')
# Rolling Drawdown
p_dd = figure(title="Portfolio rolling drawdown",
x_axis_type="datetime",
x_axis_label='timestep',
y_axis_label='drawdown',
plot_width=900, plot_height=270,
tools=['crosshair','reset','xwheel_zoom','pan', 'box_zoom', 'save'],
toolbar_location="above"
)
config_fig(p_dd)
md = df.drawdown.min()
results['drawdown'] = p_dd.line(df.index, df.drawdown, color='red', line_width=1.2)
results['mdrawdown'] = Span(location=md, dimension='width',
line_color='whitesmoke', line_dash='dashed', line_width=2)
p_dd.add_layout(results['mdrawdown'])
p_dd.add_layout(Label(x=df.index[window], y=md, x_offset=4,
y_offset=5, text='max dd: %.06f' % md,
text_color='whitesmoke'))
p_dd.add_layout(Legend(items=[("drawdown", [results['drawdown']])
], location=(0, -31),), 'right')
print("\n################### > Portfolio Performance Analysis < ###################\n")
print("Portfolio excess Sharpe: %f" % ec.excess_sharpe(df.returns, df.benchmark_returns))
print("Portfolio / Benchmark Sharpe ratio: %f / %f" % (ec.sharpe_ratio(df.returns),
ec.sharpe_ratio(df.benchmark_returns)))
print("Portfolio / Benchmark Omega ratio: %f / %f" % (ec.omega_ratio(df.returns),
ec.omega_ratio(df.benchmark_returns)))
print("Portfolio / Benchmark max drawdown: %f / %f" % (ec.max_drawdown(df.returns),
ec.max_drawdown(df.benchmark_returns)))
results['handle'] = show(column(p_val, p_pval, p_pos, p_ret, p_hist, p_sharpe, p_dd, p_alpha, p_beta),
notebook_handle=True)
return results
## Report methods
def parse_error(self, e, *args):
error_msg = '\n' + self.name + ' error -> ' + type(e).__name__ + ' in line ' + str(
e.__traceback__.tb_lineno) + ': ' + str(e)
for args in args:
error_msg += "\n" + str(args)
return error_msg
def set_email(self, email):
"""
Set Gmail address and password for log keeping
:param email: str: Gmail address
:param psw: str: account password
:return:
"""
try:
assert isinstance(email, dict)
self.email = email
Logger.info(TradingEnvironment.set_email, "Email report address set to: %s" % (str([email[key] for key in email if key == 'to'])))
except Exception as e:
Logger.error(TradingEnvironment.set_email, self.parse_error(e))
def send_email(self, subject, body):
try:
assert isinstance(self.email, dict) and \
isinstance(subject, str) and isinstance(body, str)
for key in self.email:
if key == 'email':
gmail_user = self.email[key]
elif key == 'psw':
gmail_pwd = self.email[key]
elif key == 'to':
TO = self.email[key] if type(self.email[key]) is list else [self.email[key]]
FROM = gmail_user
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
# If we have no internet, wait five seconds and retry
except gaierror:
try:
sleep(5)
self.send_email(subject, body)
except gaierror as e:
# If there is no internet yet, log error and move on
Logger.error(TradingEnvironment.send_email, self.parse_error(e))
except smtplib.SMTPServerDisconnected as e:
# If there is no internet yet, log error and move on
Logger.error(TradingEnvironment.send_email, self.parse_error(e))
except smtplib.SMTPSenderRefused as e:
# If there is no internet yet, log error and move on
Logger.error(TradingEnvironment.send_email, self.parse_error(e))
except Exception as e:
try:
Logger.error(TradingEnvironment.send_email, self.parse_error(e))
if hasattr(self, 'email'):
self.send_email("Error sending email: %s at %s" % (e,
datetime.strftime(self.timestamp, "%Y-%m-%d %H:%M:%S")),
self.parse_error(e))
except Exception as e:
Logger.error(TradingEnvironment.send_email, self.parse_error(e))
class BacktestEnvironment(TradingEnvironment):
"""
Backtest environment for financial strategies history testing
"""
def __init__(self, period, obs_steps, tapi, fiat, name):
assert isinstance(tapi, BacktestDataFeed), "Backtest tapi must be a instance of BacktestDataFeed."
super().__init__(period, obs_steps, tapi, fiat, name)
self.index = obs_steps
self.data_length = None
self.training = False
self.initialized = False
@property
def timestamp(self):
return datetime.fromtimestamp(self.tapi.ohlc_data[self.tapi.pairs[0]].index[self.index]).astimezone(timezone.utc)
def get_hindsight(self):
"""
Stay away from look ahead bias!
:return: pandas dataframe: Full history dataframe
"""
# Save env obs_steps
obs_steps = self.obs_steps
# Change it so you can recover all the data
self.obs_steps = self.data_length
self.index = self.obs_steps - 1
# Pull the entire data set
hindsight = self.get_observation()
# Change env obs_steps back
self.obs_steps = obs_steps
self.index = self.obs_steps
return hindsight
def optimize_benchmark(self, nb_steps, verbose=False):
# Init var
i = 0
## Acquire open price hindsight
hindsight = self.get_hindsight().xs('open', level=1,
axis=1).rolling(2, min_periods=2).apply(
lambda x: (safe_div(x[-1], x[-2]))).dropna().astype('f')
hindsight[self._fiat] = 1.0
# Scale it
hindsight = hindsight.apply(lambda x: safe_div(x, x.max()), axis=1)
# Calculate benchmark return
# Benchmark: Equally distributed constant rebalanced portfolio
ed_crp = array_normalize(np.append(np.ones(len(self.symbols) - 1), [0.0]))
ed_crp_returns = np.dot(hindsight, ed_crp)
initial_benchmark_returns = np.dot(hindsight, np.float64(self.benchmark))
initial_reward = np.log(initial_benchmark_returns).sum() - np.log(ed_crp_returns).sum()
## Define params
# Constraints declaration
# bench_constraints = [lambda **kwargs: sum([kwargs[key] for key in kwargs]) <= 1]
## Define benchmark optimization routine
# @ot.constraints.constrained(bench_constrains)
# @ot.constraints.violations_defaulted(-10)
def find_bench(**kwargs):
try:
# Init variables
nonlocal i, nb_steps, hindsight, ed_crp_returns
# Best constant rebalance portfolio
b_crp = array_normalize(np.array([kwargs[key] for key in kwargs]))
# Best constant rebalance portfolio returns
b_crp_returns = np.dot(hindsight, b_crp)
# Calculate sharpe regret
reward = np.log(b_crp_returns).sum() - np.log(ed_crp_returns).sum()
# Increment counter
i += 1
# Update progress
if verbose and i % 10 == 0:
print("Benchmark optimization step {0}/{1}, step reward: {2}".format(i,
int(nb_steps),
float(reward)),
end="\r")
return reward
except KeyboardInterrupt:
raise ot.api.fun.MaximumEvaluationsException(0)
# Search space declaration
n_assets = len(self.symbols)
bench_search_space = {str(i): j for i, j in zip(np.arange(n_assets), [[0, 1] for _ in range(n_assets)])}
print("Optimizing benchmark...")
# Call optimizer to benchmark
BCR, info, _ = ot.maximize_structured(
find_bench,
num_evals=int(nb_steps),
search_space=bench_search_space
)
if float(info.optimum) > float(initial_reward):
self.benchmark = convert_to.decimal(array_normalize(np.array([BCR[key] for key in BCR])))
print("\nOptimum benchmark reward: %f" % info.optimum)
print("Best Constant Rebalance portfolio found in %d optimization rounds:\n" % i, self.benchmark.astype(float))
else:
print("Initial benchmark was already optimum. Reward: %s" % str(initial_reward))
print("Benchmark portfolio: %s" % str(np.float32(self.benchmark)))
return self.benchmark
def get_history(self, start=None, end=None, portfolio_vector=False):
while True:
try:
obs_list = []
keys = []
# Make desired index
is_bounded = True
if not end:
end = self.timestamp
is_bounded = False
if not start:
start = end - timedelta(minutes=self.period * self.obs_steps)
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)[-self.obs_steps:]
is_bounded = False
else:
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)
if portfolio_vector:
# Get portfolio observation
port_vec = self.get_sampled_portfolio(index)
if port_vec.shape[0] == 0:
port_vec = self.get_sampled_portfolio().iloc[-1:]
port_vec.index = [index[0]]
# Get pairs history
for pair in self.pairs:
keys.append(pair)
history = self.get_ohlc(pair, index)
history = pd.concat([history, port_vec[pair.split('_')[1]]], axis=1)
obs_list.append(history)
# Get fiat history
keys.append(self._fiat)
obs_list.append(port_vec[self._fiat])
# Concatenate dataframes
obs = pd.concat(obs_list, keys=keys, axis=1)
# Fill missing portfolio observations
cols_to_bfill = [col for col in zip(self.pairs, self.symbols)] + [(self._fiat, self._fiat)]
obs = obs.fillna(obs[cols_to_bfill].ffill().bfill())
if not is_bounded:
assert obs.shape[0] >= self.obs_steps, "Dataframe is too small. Shape: %s" % str(obs.shape)
return obs.apply(convert_to.decimal, raw=True)
else:
# Get history
for pair in self.pairs:
keys.append(pair)
history = self.get_ohlc(pair, index)
obs_list.append(history)
# Concatenate
obs = pd.concat(obs_list, keys=keys, axis=1)
# Check size
if not is_bounded:
assert obs.shape[0] >= self.obs_steps, "Dataframe is to small. Shape: %s" % str(obs.shape)
return obs.apply(convert_to.decimal, raw=True)
except MaxRetriesException:
Logger.error(TradingEnvironment.get_history, "Retries exhausted. Waiting for connection...")
except Exception as e:
Logger.error(TradingEnvironment.get_history, self.parse_error(e))
raise e
def get_ohlc(self, symbol, index):
# Get range
start = index[0]
end = index[-1]
# Call for data
ohlc_df = pd.DataFrame.from_records(self.tapi.returnChartData(symbol,
period=self.period * 60,
start=datetime.timestamp(start),
end=datetime.timestamp(end)),
nrows=index.shape[0])
# TODO 1 FIND A BETTER WAY
# TODO: FIX TIMESTAMP
# Set index
ohlc_df.set_index(ohlc_df.date.transform(lambda x: datetime.fromtimestamp(x).astimezone(timezone.utc)),
inplace=True, drop=True)
# Disabled fill on backtest for performance.
# We assume that backtest data feed will not return nan values
# Get right values to fill nans
# fill_dict = {col: ohlc_df.loc[ohlc_df.close.last_valid_index(), 'close'] for col in ['open', 'high', 'low', 'close']}
# fill_dict.update({'volume': '0E-8'})
# Reindex with desired time range and fill nans
ohlc_df = ohlc_df[['open','high','low','close',
'volume']].reindex(index).asfreq("%dT" % self.period)#.fillna(fill_dict)
return ohlc_df.astype(str)
def reset(self):
"""
Setup env with initial values
:param reset_dfs: bool: Reset log dfs
:return: pandas DataFrame: Initial observation
"""
try:
# If need setup, do it
if not self.initialized:
self.setup()
# Get start point
if self.training:
self.index = np.random.random_integers(self.obs_steps, self.data_length - 3)
else:
self.index = self.obs_steps
# Reset log dfs
self.obs_df = pd.DataFrame()
self.portfolio_df = pd.DataFrame(columns=list(self.symbols) + ['portval'])
# Reset balance
self.balance = self.init_balance
# Get new index
self.index += 1
# Get fisrt observation
obs = self.get_observation(True)
# Reset portfolio value
self.portval = {'portval': self.calc_total_portval(self.obs_df.index[-1]),
'timestamp': self.portfolio_df.index[-1]}
# Clean actions
self.action_df = pd.DataFrame([list(self.calc_portfolio_vector()) + [False]],
columns=list(self.symbols) + ['online'],
index=[self.portfolio_df.index[-1]])
# Return first observation
return obs.astype(np.float64)
except IndexError:
print("Insufficient tapi data. You must choose a bigger time span or a lower period.")
raise IndexError
def step(self, action):
try:
# Get step timestamp
timestamp = self.timestamp
# Save portval for reward calculation
previous_portval = self.calc_total_portval()
# Simulate portifolio rebalance
self.simulate_trade(action, timestamp)
# Check for end condition
if self.index >= self.data_length - 2:
done = True
self.status["OOD"] += 1
else:
done = False
# Get new index
self.index += 1
# Get new observation
new_obs = self.get_observation(True)
# Get reward for action took
reward = self.get_reward(previous_portval)
# Return new observation, reward, done flag and status for debugging
return new_obs.astype(np.float64), np.float64(reward), done, self.status
except KeyboardInterrupt:
self.status["OOD"] += 1
# return self.get_observation(True).astype(np.float64), np.float64(0), False, self.status
raise KeyboardInterrupt
except Exception as e:
Logger.error(BacktestEnvironment.step, self.parse_error(e))
if hasattr(self, 'email'):
self.send_email("TradingEnvironment Error: %s at %s" % (e,
datetime.strftime(self.timestamp, "%Y-%m-%d %H:%M:%S")),
self.parse_error(e))
print("step action:", action)
raise e
class TrainingEnvironment(BacktestEnvironment):
def __init__(self, period, obs_steps, tapi, fiat, name):
super(TrainingEnvironment, self).__init__(period, obs_steps, tapi, fiat, name)
@property
def timestamp(self):
return datetime.fromtimestamp(self.data.index[self.index]).astimezone(timezone.utc)
def get_history(self, start=None, end=None, portfolio_vector=False):
while True:
try:
obs_list = []
keys = []
# Make desired index
end = self.timestamp
start = end - timedelta(minutes=self.period * self.obs_steps)
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)[-self.obs_steps:]
# Get portfolio observation
port_vec = self.get_sampled_portfolio(index)
if port_vec.shape[0] == 0:
port_vec = self.get_sampled_portfolio().iloc[-1:]
port_vec.index = [index[0]]
# Get pairs history
for pair in self.pairs:
keys.append(pair)
history = self.get_ohlc(pair, index)
history = pd.concat([history, port_vec[pair.split('_')[1]]], axis=1)
obs_list.append(history)
# Get fiat history
keys.append(self._fiat)
obs_list.append(port_vec[self._fiat])
# Concatenate dataframes
obs = | pd.concat(obs_list, keys=keys, axis=1) | pandas.concat |
## parse TCGA data
import pandas as pd
from collections import defaultdict
import numpy as np
import scipy.stats as stat
import os, time
def TCGA_ssGSEA(cancer_type, parse_reactome=True, simplify_barcode=True):
'''
Input
cancer_type: 'BLCA', 'SKCM' (melanoma), 'STAD' (gastric cancer)
simplify_barcode: if True, duplicate samples are removed
'''
fi_dir = '../../data/TCGA/ssgsea'
df = pd.DataFrame()
if 'TCGA-%s'%cancer_type in os.listdir(fi_dir):
df = pd.read_csv('%s/TCGA-%s/ssgsea.txt'%(fi_dir, cancer_type), sep='\t')
df = df.rename(columns={'testType':'pathway'})
df_col = ['pathway']
for sample in df.columns[1:]:
if 'TCGA' in sample:
df_col.append(sample)
df = pd.DataFrame(data=df, columns=df_col)
if parse_reactome == True:
df = df.loc[df['pathway'].str.contains('REACTOME'),:]
if simplify_barcode == True:
rename_dic = {}
cols = ['pathway']
samples = []
for col in df.columns[1:]:
samples.append(col[:12])
for sample, col in zip(samples, df.columns[1:]):
if samples.count(sample) == 1:
rename_dic[col] = sample
cols.append(sample)
df = df.rename(columns=rename_dic)
df = pd.DataFrame(data=df, columns=cols)
return df
def TCGA_gene_expression(cancer_type, official_gene_symbol=True, simplify_barcode=True):
'''
Input
cancer_type: 'BLCA', 'SKCM' (melanoma), 'STAD' (gastric cancer)
simplify_barcode: if True, duplicate samples are removed
'''
fi_dir = '../../data/TCGA'
df = pd.DataFrame()
if 'TCGA-%s'%cancer_type in os.listdir(fi_dir):
df = | pd.read_csv('%s/TCGA-%s/TMM_rna_seq.txt'%(fi_dir, cancer_type), sep='\t') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/31 13:19
Desc: 股票指数成份股数据, 新浪有两个接口, 这里使用老接口:
新接口:http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000001
老接口:http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page=1&indexid=399639
"""
import math
from io import BytesIO
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from akshare.utils import demjson
def index_stock_cons_sina(symbol: str = "000300") -> pd.DataFrame:
"""
新浪新版股票指数成份页面, 目前该接口可获取指数数量较少
http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000040
:param symbol: 指数代码
:type symbol: str
:return: 指数的成份股
:rtype: pandas.DataFrame
"""
if symbol == "000300":
symbol = "hs300"
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCountSimple"
params = {"node": f"{symbol}"}
r = requests.get(url, params=params)
page_num = math.ceil(int(r.json()) / 80) + 1
temp_df = pd.DataFrame()
for page in range(1, page_num):
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": "hs300",
"symbol": "",
"_s_r_a": "init",
}
r = requests.get(url, params=params)
temp_df = temp_df.append(
pd.DataFrame(demjson.decode(r.text)), ignore_index=True
)
return temp_df
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeDataSimple"
params = {
"page": 1,
"num": "3000",
"sort": "symbol",
"asc": "1",
"node": f"zhishu_{symbol}",
"_s_r_a": "setlen",
}
r = requests.get(url, params=params)
return pd.DataFrame(demjson.decode(r.text))
def index_stock_info() -> pd.DataFrame:
"""
聚宽-指数数据-指数列表
https://www.joinquant.com/data/dict/indexData
:return: 指数信息的数据框
:rtype: pandas.DataFrame
"""
index_df = pd.read_html("https://www.joinquant.com/data/dict/indexData")[0]
index_df["指数代码"] = index_df["指数代码"].str.split(".", expand=True)[0]
index_df.columns = ["index_code", "display_name", "publish_date", "-", "-"]
return index_df[["index_code", "display_name", "publish_date"]]
def index_stock_cons(symbol: str = "399639") -> pd.DataFrame:
"""
最新股票指数的成份股目录
http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page=1&indexid=399639
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新股票指数的成份股目录
:rtype: pandas.DataFrame
"""
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vII_NewestComponent/indexid/{symbol}.phtml"
r = requests.get(url)
r.encoding = "gb2312"
soup = BeautifulSoup(r.text, "lxml")
page_num = (
soup.find(attrs={"class": "table2"})
.find("td")
.find_all("a")[-1]["href"]
.split("page=")[-1]
.split("&")[0]
)
if page_num == "#":
temp_df = pd.read_html(r.text, header=1)[3].iloc[:, :3]
temp_df["品种代码"] = temp_df["品种代码"].astype(str).str.zfill(6)
return temp_df
temp_df = pd.DataFrame()
for page in range(1, int(page_num) + 1):
url = f"http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page={page}&indexid={symbol}"
r = requests.get(url)
r.encoding = "gb2312"
temp_df = temp_df.append(pd.read_html(r.text, header=1)[3], ignore_index=True)
temp_df = temp_df.iloc[:, :3]
temp_df["品种代码"] = temp_df["品种代码"].astype(str).str.zfill(6)
return temp_df
def index_stock_cons_csindex(symbol: str = "000300") -> pd.DataFrame:
"""
中证指数网站-成份股目录
http://www.csindex.com.cn/zh-CN/indices/index-detail/000300
:param symbol: 指数代码, 可以通过 ak.index_stock_info() 函数获取
:type symbol: str
:return: 最新指数的成份股
:rtype: pandas.DataFrame
"""
url = f"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/cons/{symbol}cons.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df.columns = [
"日期",
"指数代码",
"指数名称",
"指数英文名称",
"成分券代码",
"成分券名称",
"成分券英文名称",
"交易所",
"交易所英文名称",
]
temp_df['日期'] = pd.t | o_datetime(temp_df['日期'], format="%Y%m%d") | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
import os
import copy
import pandas
from os.path import join
from pandas.core.frame import DataFrame
from MyPythonDocx import *
def cal_va(df):
# df = DataFrame(page[1:], columns=page[0])
severity = ['嚴重', '高', '中', '低', '無']
vas = []
for idx in range(5):
mask = df['嚴重程度'] == severity[idx]
tmp = df[mask][['弱點名稱', '弱點描述']].values.tolist()
vas.append([])
for name in tmp:
if name and name not in vas[idx]:
vas[idx].append(name)
# print(vas)
return vas
def cal_risk_cnt(page):
try:
df = | DataFrame(page[1:], columns=page[0]) | pandas.core.frame.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = | tm.box_expected(tdser, box) | pandas.util.testing.box_expected |
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.flag = flag
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
self.df_raw = df_raw
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.15)
num_test = int(len(df_raw)*0.80)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
# Cắt lấy dòng tiêu đề và loại bỏ cột date. Dữ liệu bên trong: Index(['open', 'close', 'TT'], dtype='object')
cols_data = df_raw.columns[1:]
# lọc loại bỏ cột date
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
# dữ liệu dùng để train
train_data = df_data[border1s[0]:border2s[0]]
# tính mean và sdt chuẩn bị cho thu nhỏ dữ liệu
self.scaler.fit(train_data.values)
# thu nhỏ dữ liệu
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
print('Bảng ' + self.flag + ': \n', df_data[border1:border2])
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def length(self):
print (len(self.data_x) - self.seq_len- self.pred_len + 1, ' / ', len(self.df_raw) )
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.97)
num_test = int(len(df_raw)*0.0215)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Custom(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.flag = flag
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
self.df_raw = df_raw
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
# cols = list(df_raw.columns);
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
# đoạn này thấy có vẻ như nó đảo thứ tự các cột, đặt cột ngày lên đầu, cột
# cột target về cuối
# Nghĩa là bộ dữ liệu chỉ cần có cột date và cột target, chương trình tự đổi thứ tự các cột về chuẩn của nó
df_raw = df_raw[['date']+cols+[self.target]]
num_test = 4000
num_vali = 4000
num_train = int(len(df_raw)) - num_test - num_vali
# Chia dataset thành bộ train và test. Tỉ lệ 70% 20%
#num_train = int(len(df_raw)*0.70)
#num_test = int(len(df_raw)*0.15)
# vali nghĩa là
#num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
# Loại bỏ cột ngày, chỉ lấy các cột dữ liệu
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
# Chia dữ liệu cho bộ Train
train_data = df_data[border1s[0]:border2s[0]]
# Tính mean, sdt chuẩn bị thu nhỏ.
# Mỗi cột có mean và sdt khác nhau.
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
#df_stamp chỉ chứa cột ngày
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
#Làm gì đấy biến đổi thời gian thành tần suất. Chỉ có mỗi cột date. Tần xuất gì?
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
# data_x được cắt ra từ 1 phần data
self.data_x = data[border1:border2]
# có một vấn đề ở đây, data_y tại sao lại giống hệt data_x
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
print('Bảng ' + self.flag + ': \n', df_data[border1:border2])
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def length(self):
print (len(self.data_x) - self.seq_len- self.pred_len + 1, ' / ', len(self.df_raw) )
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
# phần này sẽ sử dụng khi chạy dự đoán, chương trình sẽ load dữ liệu bằng cái này.
class Dataset_Pred(Dataset):
def __init__(self, root_path, flag='pred', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None, pred_dates=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['pred']
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
df_raw = df_raw[['date']+cols+[self.target]]
border1 = len(df_raw)-self.seq_len
border2 = len(df_raw)
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
self.scaler.fit(df_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
# border1 lùi về trước bằng seq_len ngày
# border2 bằng ngày hiện tại
# tmp_stamp là số ngày từ ngày hiện tại lùi về trước đúng bằng seq_len
tmp_stamp = df_raw[['date']][border1:border2]
tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)
# pred_dates là ngày sẽ dự đoán trong tương lai
# pred_dates = ngày cuối cùng + số args.pred_len sẽ dự đoán từ 23-8 đến 22-9
pred_dates = | pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len+1, freq=self.freq) | pandas.date_range |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NAME:
debug_inp.py
DESCRIPTION:
debugs and fixes with user input .inp format files of CIT (sam file) type data.
SYNTAX:
~$ python debug_inp.py $INP_FILE
FLAGS:
-h, --help:
prints this help message
-dx, --dropbox:
Prioritize user's Dropbox folder when searching/debugging sam file paths;
the module will attempt to locate the Dropbox folder automatically.
Options to explicitly set value of inp fields:
--sam_path:
Path to .sam file
--magic_codes:
Magic method codes
--loc:
Site description given in .sam file; commonly location
--nc:
Naming convention; see docstring for debug_inp function.
Set to -1 if you are sure you want to change the current value
but want this module to try to figure out the correct value for you.
** WARNING **
This is not a robust functionality yet; you are safer explicitly
specifying the value.
--term:
Number of terminal characters in sample names (used to define specimens).
Default is 1
--no_ave:
Import all measurements (do not average repeat measurements)
--peak_AF:
Peak AF field used in ARM experiments
"""
import sys
import os
import argparse
import textwrap
import pandas as pd
import pmagpy.controlled_vocabularies3 as cv
from functools import reduce
from time import time, asctime
from funcs import shortpath
import pdb
# global top_dir, pkg_dir, data_dir, data_src, inp_dir, usr_configs_read
try: # get path names if set
from dmgui_au import pkg_dir, data_dir, data_src, inp_dir
usr_configs_read = True
except:
# if setup.py is running, don't issue warning
if sys.argv[0] != 'setup.py':
print("-W- Local path names have not been set. Please run setup.py")
usr_configs_read = False
nc_info_str ="""
Sample naming convention could not be determined. Choose from the list below:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
Enter number here: """
class Logger(object):
"""
log stdout to debug_inp.log
"""
def __init__(self):
self.terminal = sys.stdout
self.log = open("debug_inp.log", "a+")
self.log.write('\n{:-^80}\n\n'.format(' Starting session at {} '.format(asctime())))
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def start_logger():
sys.stdout = Logger()
def stop_logger():
sys.stdout.log.write('{:-^80}\n'.format(' Closing session '))
sys.stdout.log.close()
sys.stdout = sys.__stdout__
def debug_inp(inp_file, dropbox = False, noinput=False, usr_configs_read=None,
data_src=None, inp_dir=None, **kwargs):
"""Fixes .inp files
Parameters
----------
inp_file : filename
Name of .inp file; can be relative or absolute path.
data_src : path
Top-level directory to search for data (for debugging sam paths).
Defaults to the value provided in dmgui_au.conf, if applicable.
dropbox : bool, default is False
When searching for the correct paths to data files,
prioritize user Dropbox folder. If you have already
specified your data directory in the global configuration
(with setup.py) this does nothing. Defaults to False.
noinput : bool, default is False
bypass all user input; may result in unresolved issues
**kwargs : optional
Manually overwrite certain fields of the .inp file.
Possible fields are abbreviations of the actual header name,
as shown in the table below.
For calculated fields like `nc` and `term`, setting the
keyword argument to -1 will force these to be recalculated
by the module. This functionality is still in development,
so you may prefer to explicitly pass the correct values instead.
-------------------------------------------------------
kwargs --------------> inp fields
-------------------------------------------------------
sam_path sam_path
magic_codes field_magic_codes
loc location
nc naming_convention
term num_terminal_char
no_ave dont_average_replicate_measurements
peak_AF peak_AF
time time_stamp
Returns
-------
New .inp file
"""
inp_directory,inp_file_name = os.path.split(inp_file)
if inp_directory=='': inp_directory = '.'
inp_file = os.path.abspath(inp_file)
print("-I- Running on %s and changing CWD to '%s'" %
(inp_file_name, shortpath(inp_directory)))
os.chdir(inp_directory)
# first deal with any user-specified overrides
kwarg_map = {
'sam_path':'sam_path',
'magic_codes':'field_magic_codes',
'loc':'location',
'nc':'naming_convention',
'term':'num_terminal_char',
'no_ave':'dont_average_replicate_measurements',
'peak_AF':'peak_AF',
'time':'time_stamp'
}
force_rewrite_dict = dict.fromkeys(kwarg_map.values())
for key,value in kwargs.items():
if key in kwarg_map.keys():
force_rewrite_dict[kwarg_map[key]] = value
if any(force_rewrite_dict.values()):
df = pd.read_csv(inp_file, sep='\t', header=1, dtype=str)
old_values = {}
for key, value in force_rewrite_dict.items():
if value is not None:
if int(str(value)) == -1:
print("\n-I- Resetting {} to NULL...".format(key))
old_values[key] = df.ix[0][key]
df.ix[0][key]=None
else:
print("\n-I- Setting {} to {}...".format(key, value))
# df.ix[0][key]=str(value)
# print(df.ix[0][key])
df.ix[0][key] = value
inp_out = open(inp_file, 'w+')
inp_out.write("CIT\r\n")
df.to_csv(inp_out, sep="\t", header=True, index=False)
inp_out.close()
inpf = open(inp_file,'r')
inpl = inpf.read().splitlines()
header,sam_path,name_con,num_term_char = inpl[1].split('\t'),'','',''
for line in inpl[2:]:
if len(line.split('\t')) != len(header):
print("""\
-E- Some lines in file -- %s -- have different length entries than the header.
You will have to check this manually as this function is not supported yet. Aborting...
"""%inp_file)
return
if inpl[0]=='CIT':
if 'sam_path' not in header:
if noinput:
print("-W- No .sam file name or path in .inp file %s"%inp_file)
else:
sam_path = input("No .sam file name or path in .inp file %s, please provide a path: "%inp_file)
if 'naming_convention' not in header:
if noinput:
print('-W- No naming convention in .inp file %s'%inp_file)
else:
name_con = input(nc_info_str)
if 'num_terminal_char' not in header:
if noinput:
print("-W- Missing number of terminal characters in .inp file %s"%inp_file)
else:
num_term_char = input("""\
Missing number of terminal characters that define a specimen.
Please enter that number here or press enter to continue with default (=1): """)
df = pd.read_csv(inp_file, sep='\t', header=1, dtype=str)
for i in range(len(df.index)):
if sam_path=='': sam_path = df.ix[i]['sam_path']
while not os.path.isfile(str(sam_path)):
directory = os.path.split(str(sam_path))[0]
sam_file = os.path.split(str(sam_path))[1]
if dropbox or usr_configs_read:
if usr_configs_read:
search_path = data_src
elif dropbox:
if os.path.isfile(os.path.expanduser("~/.dropbox/info.json")):
drpbx_info_file = os.path.expanduser("~/.dropbox/info.json")
drpbx_info = open(drpbx_info_file, 'r')
drpbx_dict = drpbx_info.read().splitlines()[0]
drpbx_info.close()
drpbx_dict=dict(eval(drpbx_dict.replace('false','False').replace('true','True')))
drpbx_path=drpbx_dict['personal']['path']
else:
drpbx_path = input("Option '-dropbox' given but there was a problem finding your Dropbox folder.\n"
"Please provide the path to your Dropbox folder here (press Enter to skip): ")
if os.path.isdir(os.path.join(drpbx_path,"Hargraves_Data")):
drpbx_path = os.path.join(drpbx_path,"Hargraves_Data")
search_path = drpbx_path
for root, dirs, files in os.walk(search_path):
if sam_file in files:
new_directory=root
df.ix[i]['sam_path'] = os.path.join(new_directory,sam_file)
sam_path = df['sam_path'].tolist()[0]
break
if os.path.isfile(str(sam_path)):
break
if noinput:
print("-W- Could not resolve the file path in .inp file %s. Aborting..."%inp_file_name)
return
d_or_f = input("The .sam file path in inp_file %s does not exist.\n\n"
"Was given directory:\n\n %s\n\nand file:\n\n %s\n\n"
"Is the [f]ile name or [d]irectory bad? "
"If both, correct the file name first. ( [d] / f , or s to skip): "%(inp_file_name,directory,sam_file))
if d_or_f=='s':
return
if d_or_f=='f':
new_file_name = input("Please input the correct file name for the .sam file: ")
df['sam_path'] = os.path.join(directory,new_file_name)
else:
new_directory = input("If the new directory is known input here. Else just leave blank and the current directory and subdirectories will be searched for file %s and path will be corrected: "%(sam_file))
if new_directory=='':
for root, dirs, files in os.walk(os.getcwd()):
if sam_file in files: break
new_directory=root
df.ix[i]['sam_path'] = os.path.join(new_directory,sam_file)
sam_path = df['sam_path'].tolist()[0]
vocab = cv.Vocabulary()
# pdb.set_trace()
meth_codes, _ = vocab.get_meth_codes()
if type(meth_codes)==pd.DataFrame:
meth_codes = meth_codes.index.tolist()
if meth_codes==None:
meth_codes = 'FS-FD:FS-H:FS-LOC-GPS:FS-LOC-MAP:SO-POM:SO-ASC:SO-MAG:SO-SUN:SO-SM:SO-SIGHT'.split(':')
meth_codes_to_keep = []
for meth_code in df.ix[i]['field_magic_codes'].split(':'):
if meth_code in meth_codes: meth_codes_to_keep.append(meth_code)
df.ix[i]['field_magic_codes'] = reduce(lambda x,y: x+':'+y, meth_codes_to_keep)
sam_contents = open(sam_path, 'r')
sl = sam_contents.read().splitlines()
sam_contents.close()
if 'CIT' not in sl:
sl = sl[2:]
else:
sl = sl[3:]
if '' in sl:
sl.remove('')
nc = df.ix[i]['naming_convention']
if pd.isna(nc): # force rewrite
site_name = os.path.basename(os.path.dirname(sam_path))
if site_name not in sl[0]:
if noinput:
print("-W- Trouble with site name {} -- does not match samples (e.g. {}).".format(site_name,sl[0]))
print("-W- Naming convention reset to old value of {}".format(old_values['naming_convention']))
else:
site_name = input("Trouble with site name {} -- does not match samples (e.g. {}).\n"
"Input correct site name: ".format(site_name,sl[0]))
# catch delimeter if it is appended to site name (sometimes the case)
if not site_name[-1].isalnum():
if site_name[-1]=='-':
nc = int(2)
elif site_name[-1]=='.':
nc = int(3)
else:
samp_names = []
for samp in sl:
samp_names.append(samp.partition(site_name)[-1])
if all([not x[0].isalnum() for x in samp_names]):
if all([x[0]=='-' for x in samp_names]):
nc = int(2)
if all([x[0]=='.' for x in samp_names]):
nc = int(3)
if not pd.isna(nc):
new_nc = nc
df.ix[i]['naming_convention']=str(new_nc)
else:
if noinput:
print("-W- Could not determine correct naming convention...resetting to old value of {}".format(old_values['naming_convention']))
nc = old_values['naming_convention']
new_nc = nc
else:
nc = input(nc_info_str)
new_nc = nc
df.ix[i]['naming_convention']=str(new_nc)
if int(nc) > 7 or int(nc) < 1:
new_nc = input(nc_info_str)
df.ix[i]['naming_convention']=new_nc
nt = df.ix[i]['num_terminal_char']
if pd.isna(nt): # force rewrite
nt_ctr = 0
lastchar = pd.Series([t[-1] for t in sl])
the_rest = pd.Series([t[0:-1] for t in sl])
# nt_rename_timeout = 0
while True:
if len(the_rest)==len(the_rest.unique()) and len(lastchar.unique())<3:
try:
nt_ctr += 1
lastchar = pd.Series([t[-1] for t in the_rest])
the_rest = | pd.Series([t[0:-1] for t in the_rest]) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
| pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]) | pandas.DataFrame |
'''
1. 자음,모음,특수문자 제거 (온점, 쉼표 포함)
2. 띄어쓰기 교정
3. 단어 수정
4. 형태소분석기로 명사 and 형용사 추출
5. Fasttext embedding
- 사전 추가
'''
from chatspace import ChatSpace
from gensim.models import FastText
from konlpy.tag import Kkma
import json
import re
import pandas as pd
import numpy as np
class GlowpickPreprocessing(object):
def __init__(self, embed_size=100, random_state=223):
self.seed = 223
self.embed_size = embed_size
self.kkma = Kkma()
def fit(self, x: list, wordfix_path=None, posfix_path=None):
'''
args:
- x: texts
- wordfix_path: replace word directory path
- posfix_path: filtering pos directory path
return:
- texts: preprocessed texts
'''
# stopword
texts = list(map(self.stopword, x))
print('[{0:15s}]: complete stopword'.format('PREPROCESSING'))
# spacefix
texts = self.spacefix(texts)
print('[{0:15s}]: complete spacefix'.format('PREPROCESSING'))
# wordfixclrea
if wordfix_path:
texts = self.wordfix(texts, wordfix_path)
print('[{0:15s}]: complete wordfix'.format('PREPROCESSING'))
# posfix
if posfix_path:
texts = self.posfix(texts, posfix_path)
print('[{0:15s}]: complete posfix'.format('PREPROCESSING'))
return texts
def stopword(self, x):
pattern1 = '([ㄱ-ㅎㅏ-ㅣ]+)'
pattern2 = '[^\w\s,.]'
pattern3 = '[\d]'
repl = ''
x = re.sub(pattern=pattern1, repl=repl, string=x)
x = re.sub(pattern=pattern2, repl=repl, string=x)
x = re.sub(pattern=pattern3, repl=repl, string=x)
return x
def spacefix(self, x):
spacer = ChatSpace()
x = spacer.space(x, batch_size=64)
x = pd.Series(x).str.split(' ').tolist()
return x
def wordfix(self, x, path):
# replace old to new
def word_replace(x, word_dict):
for k, v in word_dict.items():
if k in x:
x = x.replace(k, v)
return x
# load word dictionary
word_dict = json.load(open(path,'r'))
# replace word
x = pd.Series(x).apply(lambda x: ' '.join(x))
x = x.apply(lambda sent: word_replace(sent, word_dict))
x = x.str.split(' ').tolist()
return x
def posfix(self, x, path):
# pos filtering
def pos_filtering(x, pos, in_pos, stopwords, words):
remove_value = []
for k, v in x:
if len(k) == 1:
remove_value.append((k,v))
elif (v in pos) & (k in stopwords):
remove_value.append((k,v))
elif (v in in_pos) &(k not in words):
remove_value.append((k,v))
elif (v not in pos) & (v not in in_pos):
remove_value.append((k,v))
for v in remove_value:
x.remove(v)
try:
return list(np.array(x)[:,0])
except: # NT : No Token
return ['NT']
# load pos dictionary
pos_dict = json.load(open(path,'r'))
x = | pd.Series(x) | pandas.Series |
import glob
import math
import brewer2mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator
SPINE_COLOR = 'gray'
#####################################################
# Process average from files #
#####################################################
def process_average(folder, scenarios, labels, header):
columns = ['property']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=header, names=columns)['property']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result = pd.concat(dfs1, axis=1)
return result
class Plotter():
#####################################################
# Latexify #
#####################################################
@staticmethod
def latexify(fig_width=None, fig_height=None, columns=1, fullwidth=False):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
if fullwidth:
fig_width = 3.39*2 if columns==1 else 6.9 # width in inches
else:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
if fullwidth:
fig_height = fig_width*golden_mean/2.0 # height in inches
else:
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {
'backend': 'ps',
'text.latex.preamble': ['\\usepackage{amssymb}'],
'axes.labelsize': 5, # fontsize for x and y labels (was 10)
'axes.titlesize': 5,
'lines.markersize' : 3,
'lines.markeredgewidth': 0.3,
'legend.fontsize': 4, # was 10
'text.usetex': True,
'legend.edgecolor': 'w',
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif',
'grid.linestyle': 'dashed',
'grid.color': 'grey',
'lines.dashed_pattern' : [150, 150],
'xtick.color': 'k',
'ytick.color': 'k',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.minor.width': 0.05,
'ytick.minor.width': 0.05,
'xtick.major.width': 0.1,
'ytick.major.width': 0.1,
'xtick.labelsize': 4,
'ytick.labelsize': 4,
'lines.linewidth' : 0.2,
'grid.linewidth': 0.01,
'axes.linewidth': 0.2,
'errorbar.capsize' : 1,
'xtick.minor.visible': False, # visibility of minor ticks on x-axis
# 'ytick.minor.visible': False, # visibility of minor ticks on x-axis
'boxplot.notch': False,
'boxplot.vertical': True,
'boxplot.whiskers': 1.5,
'boxplot.bootstrap': None,
'boxplot.patchartist': False,
'boxplot.showmeans': False,
'boxplot.showcaps': True,
'boxplot.showbox': True,
'boxplot.showfliers': True,
'boxplot.meanline': False,
'boxplot.flierprops.color': 'lightgrey',
'boxplot.flierprops.marker': 'o',
'boxplot.flierprops.markerfacecolor': 'none',
'boxplot.flierprops.markeredgecolor': 'lightgrey',
'boxplot.flierprops.markersize': 1,
'boxplot.flierprops.linestyle': 'none',
'boxplot.flierprops.linewidth': 0.1,
'boxplot.boxprops.color': 'C2',
'boxplot.boxprops.linewidth': 0.2,
'boxplot.boxprops.linestyle': '-',
'boxplot.whiskerprops.color': 'C2',
'boxplot.whiskerprops.linewidth': 0.2,
'boxplot.whiskerprops.linestyle': '-',
'boxplot.capprops.color': 'C2',
'boxplot.capprops.linewidth': 0.2,
'boxplot.capprops.linestyle': '-',
'boxplot.medianprops.color': 'C2',
'boxplot.medianprops.linewidth': 0.20,
'boxplot.medianprops.linestyle': '-',
'boxplot.meanprops.color': 'C2',
'boxplot.meanprops.marker': '^',
'boxplot.meanprops.markerfacecolor': 'C2',
'boxplot.meanprops.markeredgecolor': 'C2',
'boxplot.meanprops.markersize': 6,
'boxplot.meanprops.linestyle': 'none',
'boxplot.meanprops.linewidth': 0.20,
}
matplotlib.rcParams.update(params)
# for spine in ['top', 'right']:
# ax.spines[spine].set_visible(False)
# for spine in ['left', 'bottom']:
# ax.spines[spine].set_color(SPINE_COLOR)
# ax.spines[spine].set_linewidth(0.1)
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
# # Or if you want different settings for the grids:
# ax.grid(which='minor', alpha=0.2)
# ax.grid(which='major', alpha=0.5)
# for axis in [ax.xaxis, ax.yaxis]:
# axis.set_tick_params(direction='out', color=SPINE_COLOR)
# return ax
#####################################################
# Latency - Mean - 4 methods #
#####################################################
@staticmethod
def latency_avg_4methods(folder1, folder2, folder3, folder4, scenarios, labels, output, title, ylim, fullwidth=False):
plt.figure()
Plotter.latexify(fullwidth=fullwidth)
columns = ['latency']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder1 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=4, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result1 = pd.concat(dfs1, axis=1)
#print('result1\n', result1.describe())
#print('result1\n', result1.to_string())
std1 = result1.std()
ax1 = result1.mean().plot(label="Sourcey", legend = True, yerr=std1, color="red")
ax1.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs2 = []
for scenario in scenarios:
file = glob.glob(folder2 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs2 += [df]
result2 = pd.concat(dfs2, axis=1)
#print('result2\n', result2.describe())
#print('result2\n', result2.to_string())
std2 = result2.std()
ax2 = result2.mean().plot(label="Sourcey Fabric", legend = True, yerr=std2, color="orange")
ax2.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs3 = []
for scenario in scenarios:
file = glob.glob(folder3 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs3 += [df]
result3 = | pd.concat(dfs3, axis=1) | pandas.concat |
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if roll_func != "sum":
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)
tm.assert_almost_equal(result0, result1)
def test_nans_count():
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).count()
tm.assert_almost_equal(
result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()
)
@pytest.mark.parametrize(
"roll_func, kwargs",
[
["mean", {}],
["sum", {}],
["median", {}],
["min", {}],
["max", {}],
["std", {}],
["std", {"ddof": 0}],
["var", {}],
["var", {"ddof": 0}],
],
)
@pytest.mark.parametrize("minp", [0, 99, 100])
def test_min_periods(series, minp, roll_func, kwargs):
result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)(
**kwargs
)
expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)(
**kwargs
)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
def test_min_periods_count(series):
result = series.rolling(len(series) + 1, min_periods=0).count()
expected = series.rolling(len(series), min_periods=0).count()
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize(
"roll_func, kwargs, minp",
[
["mean", {}, 15],
["sum", {}, 15],
["count", {}, 0],
["median", {}, 15],
["min", {}, 15],
["max", {}, 15],
["std", {}, 15],
["std", {"ddof": 0}, 15],
["var", {}, 15],
["var", {"ddof": 0}, 15],
],
)
def test_center(roll_func, kwargs, minp):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)(
**kwargs
)
expected = getattr(
concat([obj, | Series([np.NaN] * 9) | pandas.Series |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
self.assert_(isinstance(series, TimeSeries))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp('D', 'end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
result = series.to_timestamp('D', 'start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
def test_constructor(self):
ii = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 9)
ii = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 4 * 9)
ii = | PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') | pandas.tseries.period.PeriodIndex |
import json
import pandas as pd
import os
test_score = "tianchi_datasets/test.json"
train_data = "tianchi_datasets/track3_round1_train.tsv"
test_data = "tianchi_datasets/track3_round1_testA.tsv"
def create_new_traindata(test_score, train_data, test_data):
tmp = []
dir_path = os.getcwd()
with open(os.path.join(dir_path, test_score), "r", encoding="utf-8") as f:
for line in f:
content = json.loads(line)
score = content["label"]
idx = content["id"]
if score > 0.95:
tmp.append([idx, 1])
elif score < 0.05:
tmp.append([idx, 0])
else:
tmp.append([idx, "unk"])
with open(os.path.join(dir_path, test_data), "r", encoding="utf-8") as fr:
with open("tianchi_datasets/track3_round1_testA_label.tsv", "w", encoding="utf-8") as fw:
data = fr.readlines()
for i in range(len(data)):
content = data[i].strip()
label = tmp[i][1]
if label in [0, 1]:
lis = content + "\t" + str(label) + "\n"
else:
lis = ""
fw.write(lis)
train = pd.read_csv(os.path.join(dir_path, train_data), sep="\t", header=None,
names=["sentence1", "sentence2", "labels"])
test = pd.read_csv("./tianchi_datasets/track3_round1_testA_label.tsv", sep="\t", header=None,
names=["sentence1", "sentence2", "labels"])
new_data = | pd.concat([train, test], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns##data visualization資料視覺化
import warnings
import gc##garbage collector interface
warnings.simplefilter('ignore')
matplotlib.rcParams['figure.dpi'] = 100
sns.set()
building = pd.read_csv(r'C:\Users\Lab408\Desktop\try_model_ashrae_energy_prediction_kaggle/building_metadata_forsmalldata.csv')#load data
weather_train = pd.read_csv(r'C:\Users\Lab408\Desktop\try_model_ashrae_energy_prediction_kaggle/weather_train_smalldata.csv')
weather_test = pd.read_csv(r'C:\Users\Lab408\Desktop\try_model_ashrae_energy_prediction_kaggle/weather_test_smalldata.csv')
train = pd.read_csv(r'C:\Users\Lab408\Desktop\try_model_ashrae_energy_prediction_kaggle/small_data_train_energy.csv')
train.head()
#test_1 = pd.read_csv('C:/Users/fishi_000/Desktop/try_model_ashrae_energy_prediction_kaggle/test_smallest_data.csv')
test = | pd.read_csv(r'C:\Users\Lab408\Desktop\try_model_ashrae_energy_prediction_kaggle/test_smallest_data.csv') | pandas.read_csv |
import pickle
from datetime import datetime
import re
import time
import getpass
import os
import sys
import re
#requirements
import json
import pandas as pd
import helium as h
from selenium.common.exceptions import NoSuchElementException
import pathlib
pd.set_option("max_rows",100)
#pd.set_option("display.max_columns",100)
pd.set_option("max_colwidth",1000)
def get_info(df,cod_gr):
# nombre_lider missing
try:
nombre_lider = df['Nombre Líder'].dropna().iloc[0]
except IndexError:
nombre_lider = 'Sin dato Registrado'
info= {
'Nombre_Grupo' : df['Nombre Grupo'].dropna().iloc[0],
'Nombre_Lider' : nombre_lider,
'CCRG Grupo' : cod_gr
}
dfi = pd.DataFrame(info, index=[0])
return dfi
# extra headers by products
DBEH = {
'INFO_GROUP': 'TABLE',
'MEMBERS':['Identificación', 'Nacionalidad', 'Tiene afiliación con UdeA', 'Si no tiene afiliación UdeA diligencie el nombre de la Institución','Nro. Horas de dedicación semanales que avala el Coordinador de grupo'], # 2
'NC_P': {'ART_IMP_P': {'ART_P_TABLE':['URL','DOI','Si no tiene URL o DOI agregue una evidencia en el repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'ART_ELE_P': {'ART_E_P_TABLE':['URL','DOI','Si no tiene URL o DOI agregue una evidencia en el repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'LIB_P': {'LIB_P_TABLE':['Proyecto de investigación del cual se derivó el libro (Código-Título)','Financiador(es) del proyecto del cual se derivó el libro', 'Financiador(es) de la publicación','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CAP_LIB_P': {'CAP_LIB_P_TABLE':['Proyecto de investigación del cual se derivó el libro que contiene el capítulo (Código-Título)','Financiador del proyecto del cual se derivó el libro que contiene el capítulo','Financiador de la publicación','Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'NOT_CIE_P': {'NOT_CIE_P_TABLE':['URL','DOI','Si no tiene URL o DOI genere una evidencia en el repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PAT_P': {'PAT_P_TABLE':['Autores', 'Examen de fondo favorable','Examen preliminar internacional favorable','Adjunta opiniones escritas de la bUsqueda internacional','Contrato de explotación','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 2 3 -1
'PRD_INV_ART_P': {'PAAD_P_TABLE':['Autores','Tiene certificado institucional de la obra','Tiene certificado de la entidad que convoca al evento en el que participa','Tiene certificado de la entidad que convoca al premio en el que obtiene','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 2 3 -1
'VAR_VEG_P': {'VV_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'VAR_ANI_P': {'VA_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'RAZ_PEC_P': {'RAZ_PEC_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'TRA_FIL_P': {'TRA_FIL_P_TABLE':['Proyecto de investigación del cual se derivó el libro (Código-Título)','Financiador(es) del proyecto del cual se derivó el libro','Financiador(es) de la publicación','Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}
},
'DTI_P': {'DIS_IND_P': {'DI_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CIR_INT_P': {'ECI_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'SOFT_P': {'SF_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','TRL','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'NUTRA_P': {'NUTRA_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # add
'COL_CIENT_P': {'COL_CIENT_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo', '¿El producto cumple con los requisitos para ser avalado?']},
'REG_CIENT_P': {'REG_CIENT_P_TABLE':['Autores','Contrato licenciamiento (si aplica)','Agregue las evidencias verificadas al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PLT_PIL_P': {'PP_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PRT_IND_P': {'PI_P_TABLE':['Autores','Nombre comercial (si aplica)','TRL','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'SEC_IND_P': {'SE_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PROT_VIG_EPID_P': {'PROT_VIG_EPID_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'EMP_BSE_TEC_P': {'EBT_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'EMP_CRE_CUL_P': {'ICC_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'INN_GES_EMP_P': {'IG_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'INN_PROC_P': {'IPP_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'REG_NORM_REGL_LEG_P': {'RNR_P_TABLE':['Autores','Contrato (si aplica)','Convenio (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CONP_TEC_P': {'CONP_TEC_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'REG_AAD_P': {'AAAD_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'SIG_DIS_P': {'SD_P_TABLE':['Autores','Contrato licenciamiento (si aplica)','Agregue las evidencias verificadas al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']}
},
'ASC_P': {'GEN_CONT_IMP_P': {'GC_I_P_TABLE_5':['Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PASC_P': {'PASC_FOR_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'PASC_TRA_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'PASC_GEN_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'PASC_CAD_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'DC_P': {'DC_CD_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'DC_CON_P_TABLE':['Medio de verificación','Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'DC_TRA_P_TABLE':['Medio de verificación','Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'DC_DES_P_TABLE':['Medio de verificación','Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}},
'FRH_P': {'TES_DOC_P': {'TD_P_TABLE':['Número de cédula del graduado','¿La fecha fin coincide con la fecha de grado del estudiante?','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 -1
'TES_MAST_P': {'TM_P_TABLE':['Número de cédula del graduado','¿La fecha fin coincide con la fecha de grado del estudiante?','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 -1
'TES_PREG_P': {'TP_P_TABLE':['Número de cédula del graduado','¿La fecha fin coincide con la fecha de grado del estudiante?','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 -1
'ASE_PRG_ACA_P': {'APGA_P_TABLE':['Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'ASE_CRE_CUR_P': {'ACC_P_TABLE':['Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'ASE_PRG_ONDAS_P': {'APO_P_TABLE':['Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}},
'NC' : {'LIB' : {'LIB_T_AVAL_TABLE': ['Proyecto de investigación del cual se derivó el libro (Código-Título)','Financiador(es) del proyecto del cual se derivó el libro', 'Financiador(es) de la publicación','Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CAP_LIB':{'CAP_LIB_T_AVAL_TABLE':['Proyecto de investigación del cual se derivó el libro que contiene el capítulo (Código-Título)','Financiador del proyecto del cual se derivó el libro que contiene el capítulo','Financiador de la publicación','Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}}
}
d = {
'1': 'C',
'2': 'D',
'3': 'E',
'4': 'F',
'5': 'G',
'6': 'H',
'7': 'I',
'8': 'J',
'9': 'K',
'10': 'L',
'11': 'M',
'12': 'N',
'13': 'O',
'14': 'P',
'15': 'Q',
'16': 'R',
'17': 'S',
'18': 'T',
'19': 'U',
'20': 'V'
}
def clean_df(df):
'remove innecesari collums'
c=[x for x in df.columns if x.find('Unnamed:') == -1 and x.find('Revisar') == -1 and x.find('Avalar integrante') == -1]
dfc=df[c]
return dfc
def clean_tables(df):
#droplevel
try:
df = df.droplevel(0,axis=1)
except ValueError:
pass
#ignore firts(NaN) and last(string:resultados) rows
df=df[1:-1]
#remove unnamed columns and revisar
cols = [x for x in df.columns if x.find('Unnamed:') == -1 and x.find('Revisar') == -1 and x.find('Avalar integrante') == -1]
return df[cols]
def rename_col(df,colr,colf):
df.rename(columns = {colr: colf,}, inplace = True)
return df
# WORKSHEET 4 - 12.
def format_df(df, sheet_name, start_row, writer,eh, veh = None):
'format headers'
df.to_excel(writer,sheet_name, startrow = start_row+1, startcol=2,index = False)
# Get the xlsxwriter workbook and worksheet objects.
worksheet = writer.sheets[sheet_name]
merge_format = workbook.add_format({
'bold': 1,
'border':1,
'text_wrap': True,
'align': 'center',
'valign': 'vcenter',
'font_color': 'blue'})
#form merge cells
if not df.empty:
start,end = 1,df.shape[1]
else:
start,end = 1,1
m_range = d.get(str(start)) + str(start_row + 1) + ':' + d.get(str(end)) + str(start_row +1)
worksheet.merge_range(m_range, 'Información suministrada por la Vicerrectoría de Investigación', merge_format)
# for merge headers cells
_m_range = d.get(str(end+1)) + str(start_row +1) + ':' + d.get(str(end+len(eh))) + str(start_row +1)
worksheet.merge_range(_m_range, 'Validación del Centro, Instituto o Corporación', merge_format)
worksheet.set_row_pixels(start_row+1, 120)
#worksheet.set_column('C:C',30,general)
# SET COLUMS FORMAT BY SHEET
if sheet_name=='3.Integrantes grupo':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('D:K',15,general)
if sheet_name=='4.ART y N':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('M:O',20, general)
if sheet_name=='5.LIB y LIB_FOR':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('I:P',20,general)
if sheet_name=='6.CAP':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:H',10,general)
worksheet.set_column('I:K',18,general)
worksheet.set_column('J:P',20,general)
if sheet_name=='7.Patente_Variedades':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:I',10,general)
worksheet.set_column('J:K',20,general)
worksheet.set_column('L:S',20,general)
if sheet_name=='8.AAD':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('F:K',10,general)
worksheet.set_column('L:P',25,general)
if sheet_name=='9.Tecnológico':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:I',10,general)
worksheet.set_column('J:S',18,general)
if sheet_name=='10.Empresarial':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:H',10,general)
worksheet.set_column('I:N',20,general)
if sheet_name=='11.ASC y Divulgación':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',28,general)
worksheet.set_column('I:I',15,general)
worksheet.set_column('J:N',20,general)
if sheet_name=='12.Formación y programas':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',25,general)
worksheet.set_column('D:G',10,general)
worksheet.set_column('L:O',15,general)
worksheet.set_column('N:N',20,general)
worksheet.write(start_row+1, 0, 'VoBo de VRI', merge_format)
# Add a header format.
fmt_header = workbook.add_format({
'bold': True,
'align': 'center',
'text_wrap': True,
'valign': 'vcenter',
'fg_color': '#33A584',
'font_color': '#FFFFFF',
'border': 1})
# Write the column headers with the defined format.
for col_num, value in enumerate(df.columns.values):
worksheet.write(start_row+1, col_num + 2, value, fmt_header)
# write extra headers
for col_num, value in enumerate(eh):
worksheet.write(start_row+1, col_num + df.shape[1] + 2, value, fmt_header)
v_range = 'A' + str(start_row +3) + ':' + 'A' + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
if sheet_name !='3.Integrantes grupo':
v_range = d.get(str(end+len(eh))) + str(start_row +3) + ':' + d.get(str(end+len(eh))) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
# Integrantes
if veh == 0:
v_range = d.get(str(end+len(eh)-2)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-2)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
# patentes
if veh == 1 :
v_range = d.get(str(end+len(eh)-3)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-3)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-4)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-4)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-5)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-5)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
if veh ==2:
v_range = d.get(str(end+len(eh)-2)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-2)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
if veh == 3:
v_range = d.get(str(end+len(eh)-2)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-3)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-3)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-4)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-4)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-5)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
##### WORKSHEET 2
def format_info(df, writer, sheet_name):
'''format worksheet'''
workbook=writer.book
normal=workbook.add_format({'font_size':12,'text_wrap':True})
merge_format = workbook.add_format({
'bold': 1,
'border':1,
'text_wrap': True,
'align': 'center',
'valign': 'vcenter',
'font_color': 'black'})
fmt_header = workbook.add_format({
'align': 'center',
'text_wrap': True,
'valign': 'top',
'fg_color': '#33A584',
'font_color': '#FFFFFF',
'border': 1})
# write df
start_row = 6
start_col = 3
df.to_excel(writer, sheet_name, startrow =start_row, startcol=start_col,index = False)
# get worksheet object
worksheet = writer.sheets[sheet_name]
for col_num, value in enumerate(df.columns.values):
worksheet.write(start_row, col_num + 3, value, fmt_header)
#Prepare image insertion: See → https://xlsxwriter.readthedocs.io/example_images.html
worksheet.set_column('A:A', 15)
worksheet.set_column('B:B', 15)
logo_path = str(pathlib.Path(__file__).parent.absolute()) + '/templates/img/logo.jpeg'
worksheet.insert_image('A1', logo_path)
# title 1 UNIVERSIDAD DE ANTIOQUIA
title = workbook.add_format({'font_size':16,'center_across':True})
# title 2 Vicerrectoria de Investigación
title2 = workbook.add_format({'font_size':16,'center_across':True})
# sub title 2 datos identificacion contacto
title3 = workbook.add_format({'font_size':12,'center_across':True})
# merge d1:f1
worksheet.merge_range('D1:F1', 'UNIVERSIDAD DE ANTIOQUIA', title)
# merge d2:f2
worksheet.merge_range('D2:F2', ' Vicerrectoria de Investigación', title2)
# merge d3:f3
worksheet.merge_range('D3:F3', ' Datos de identificación y contacto', title3)
# D5: F5
worksheet.merge_range('D5:E5','Número inscripcion a la convocatoria:',merge_format)
worksheet.write('F5','#',merge_format)
# d6:f6
worksheet.merge_range('D6:F6','Identificación del Grupo',merge_format)
# d9:f9
worksheet.merge_range('D10:F10','Identificación del Centro de Investigación',merge_format)
# write
a='Nombre del Centro, Instituto o Corporación'
worksheet.write('D11',a, fmt_header)
worksheet.set_column('D11:D11',30, fmt_header)
b='Nombre completo del Jefe de Centro, Instituto o Corporación'
worksheet.write('E11',b, fmt_header)
worksheet.set_column('E11:E11',30, fmt_header)
c='Email'
worksheet.write('F11',c, fmt_header)
worksheet.set_column('F11:F11',30, fmt_header)
# d13:f13
worksheet.merge_range('D13:F13','Identificación de quien diligencia el formato',merge_format)
a='Nombre completo del encargado de diligenciar el formato'
worksheet.write('D14',a, fmt_header)
worksheet.set_column('D14:D14',30, normal)
b='Email'
worksheet.write('E14',b, fmt_header)
worksheet.set_column('E14:E14',30, normal)
c='Teléfono de contacto'
worksheet.write('F14',c, fmt_header)
worksheet.set_column('F14:F14',30, normal)
# WORKSHEET 1
def format_ptt(workbook):
#Global variables
abstract_text='VERIFICACIÓN DE INFORMACIÓN PARA OTORGAR AVAL A LOS GRUPOS DE INVESTIGACIÓN E INVESTIGADORES PARA SU PARTICIPACIÓN EN LA CONVOCATORIA 894 DE 2021 DE MINCIENCIAS'
instructions='''Los grupos de investigación e investigadores de la Universidad de Antioquia que deseen participar en la Convocatoria Nacional para el reconocimiento y medición de grupos de investigación, desarrollo tecnológico o de innovación y para el reconocimiento de investigadores del Sistema Nacional de Ciencia, Tecnología e Innovación - SNCTI, 894 de 2021, deben presentar la información actualizada en las plataformas CvLAC y GrupLAC validada por el Centro de Investigación en el presente formato, y respaldada en el repositorio digital de evidencias dispuesto para este fin, para la obtención del aval institucional por parte de la Vicerrectoría de Investigación.
La información a validar corresponde a los años 2019-2020 y aquella que entra en la ventana de observación y debe ser modificada según el Modelo de medición de grupos. La validación comprende:
1. Verificación de la vinculación de los integrantes a la Universidad de Antioquia y al grupo de investigación. Diligenciar los campos solicitados.
2. Verificación de la producción de GNC, DTeI, ASC y FRH, en los campos habilitados en cada hoja de este formato. Las evidencias requeridas para los productos deben ser anexadas al repositorio digital asignado al grupo y se deben enlazar a cada producto.
Este documento debe ser diligenciado en línea.
De antemano, la Vicerrectoría de Investigación agradece su participación en este ejercicio, que resulta de vital importancia para llevar a buen término la Convocatoria de Reconocimiento y Medición de Grupos de Investigación
'''
#Final part of the first sheet
datos=clean_df(pd.read_excel('https://github.com/restrepo/InstituLAC/raw/main/data/template_data.xlsx'))
#Capture xlsxwriter object
# IMPORTANT → workbook is the same object used in the official document at https://xlsxwriter.readthedocs.io
#workbook=writer.book
#***************
#Styles as explained in https://xlsxwriter.readthedocs.io
title=workbook.add_format({'font_size':28,'center_across':True})
subtitle=workbook.add_format({'font_size':24,'center_across':True})
abstract=workbook.add_format({'font_size':20,'center_across':True,'text_wrap':True})
normal=workbook.add_format({'font_size':12,'text_wrap':True})
#***************
#Creates the first work-sheet
#IMPORTANT → worksheet is the same object used in the official document at https://xlsxwriter.readthedocs.io
worksheet=workbook.add_worksheet("1.Presentación")
#Prepare image insertion: See → https://xlsxwriter.readthedocs.io/example_images.html
worksheet.set_column('A:A', 15)
worksheet.set_column('B:B', 15)
logo_path = str(pathlib.Path(__file__).parent.absolute()) + '/templates/img/logo.jpeg'
worksheet.insert_image('A1', logo_path)
#Prepare text insertion: See → https://xlsxwriter.readthedocs.io/example_images.html
worksheet.set_column('C:C', 140,general)
worksheet.set_row_pixels(0, 60)
#Texts
worksheet.write('C1', 'UNIVERSIDAD DE ANTIOQUIA',title)
worksheet.set_row_pixels(2, 60)
worksheet.write('C3', 'VICERRECTORÍA DE INVESTIGACIÓN',subtitle)
worksheet.set_row_pixels(5, 100)
worksheet.write('C6', abstract_text,abstract)
worksheet.set_row_pixels(8, 40)
worksheet.write('C9','PRESENTACIÓN DEL EJERCICIO',
workbook.add_format({'font_size':18,'center_across':True}) )
worksheet.set_row_pixels(10, 320)
worksheet.write('C11',instructions,normal)
#*** ADD PANDAS DATAFRAME IN SPECIFIC POSITION ****
#Add a data Frame in some specific position. See → https://stackoverflow.com/a/43510881/2268280
# See also → https://xlsxwriter.readthedocs.io/working_with_pandas.html
writer.sheets["1.Presentación"]=worksheet
datos.to_excel(writer,sheet_name="1.Presentación",startrow=12,startcol=2,index=False)
#**************************************************
#Fix columns heights for long text
worksheet.set_row_pixels(17, 40)
worksheet.set_row_pixels(18, 40)
worksheet.set_row_pixels(19, 40)
worksheet.set_row_pixels(20, 40)
worksheet.set_row_pixels(22, 40)
def login(user,password,institution='UNIVERSIDAD DE ANTIOQUIA',sleep=0.8,headless=True):
#def login(user,password): → browser, otro, otro
# MAIN CODE
# login =
# name_ins =
# usser =
# passw=
# login
browser = h.start_firefox('https://scienti.minciencias.gov.co/institulac2-war/',headless=headless)
#browser = h.start_firefox('https://scienti.minciencias.gov.co/institulac2-war/')
time.sleep(sleep)
h.click('Consulte Aquí')
time.sleep(sleep)
h.write(institution,into='Digite el nombre de la Institución') # name ins
time.sleep(sleep)
h.click('Buscar')
time.sleep(sleep)
h.click(browser.find_element_by_id('list_instituciones'))
time.sleep(sleep)
time.sleep(sleep)
h.select('seleccione una',institution) # name_ins
time.sleep(sleep)
h.write(user,into='Usuario') # user
time.sleep(sleep)
h.write(password, into='Contraseña') # passw
time.sleep(sleep)
h.click(h.Button('Ingresar'))
# cookie injection
time.sleep(sleep)
# implementation cookie injection
# get current cookie and store
new_cookie=browser.get_cookies()[0]
# create new_cookie with time_expire
time_expire = (datetime(2022,1,1) - datetime(1970,1,1)).total_seconds()
new_cookie['expiry'] = int(time_expire)
# delete cookie sites
browser.delete_all_cookies()
# add new cookie
browser.add_cookie(new_cookie)
try:
error=browser.find_element_by_class_name("error")
if error.text.lower().find('fallidos')>-1:
print("ERROR! Bad login or password")
return False
else:
pass
except NoSuchElementException:
pass
# navigation 1
time.sleep(sleep)
h.click('Aval')
time.sleep(sleep)
h.click('Avalar grupos')
time.sleep(sleep)
h.click('Grupos Avalados')
# -- end login --
# list of total groups
#select max results per page
h.wait_until(h.Text('Ver Reporte').exists)
h.click(browser.find_element_by_xpath('//table[@id="grupos_avalados"]//select[@name="maxRows"]'))
time.sleep(sleep)
h.select(browser.find_element_by_xpath('//table[@id="grupos_avalados"]//select[@name="maxRows"]'),'100')
return browser
def get_groups(browser,DIR='InstituLAC',sleep=0.8):
# catch 1: groups info [name, lider, cod, link to producs]
# schema
# empty df
# select max items per page
# while until end
# try:
# catch table
# preproces table
# catch urls
# add url colums
# add df
# click next page -> raise error
# except Nosuchelement:
# break
# catch 1: list of groups
dfg=pd.DataFrame()
cont=True
while cont:
try:
# catch source
time.sleep(sleep)
source_g=browser.page_source
# catch table
time.sleep(sleep)
df=pd.read_html(source_g, attrs={"id":"grupos_avalados"}, header=2)[0]
# and preprocces it
c=[x for x in df.columns if x.find('Unnamed:') == -1]
dfgp=df[c][1:-1]
print(dfgp.columns,dfgp.shape)
# catch urls
url=[a.get_attribute('href') for a in browser.find_elements_by_xpath('//table[@id="grupos_avalados"]//td[5]/a')]
dfgp['Revisar'] = url
dfg=dfg.append(dfgp)
# click next page. this instruction rise error of the end.
h.click(browser.find_element_by_xpath('//table[@id="grupos_avalados"]//tr/td[3]/a'))
except NoSuchElementException as e:
print(e)
print('out of cicle')
break
time.sleep(sleep)
time.sleep(sleep)
dfg=dfg.reset_index(drop=True)
with open(f'{DIR}/dfg.pickle', 'wb') as f:
pickle.dump(dfg, f)
return browser,dfg
def get_DB(browser,target_data,DB=[],dfg=pd.DataFrame(),sleep=0.8,DIR='InstituLAC',
start=None,end=None,COL_Group='',start_time=0):
os.makedirs(DIR,exist_ok=True)
if dfg.empty:
browser,dfg=get_groups(browser,DIR=DIR,sleep=sleep)
dfg = dfg.reset_index(drop=True)
#find start and end if COL_Group
if COL_Group:
dfcg=dfg[dfg['COL Grupo']==COL_Group]
if not dfcg.empty:
start=dfcg.index[0]
end=start+1
#assert dfg.shape[0] == 324
time.sleep(sleep*2)
for idx in dfg.index[start:end]: # TEST
# create db for store things related to group
DBG = {} # HERE V1. DBG.keys = [cat1,cat2,...,catN]
# DBG['cat1'].keys = [prod1bycat,...prodnbycat]
# part info group
print(dfg.loc[idx,'Nombre del grupo'])
# specific group url
time.sleep(sleep)
url_group = dfg.loc[idx,'Revisar']
# go to url group
time.sleep(sleep)
browser.get(url_group)
# catch two tables: info grupo and members
source=browser.page_source
# Info group
l_info=pd.read_html(source, match='Nombre Grupo')
info_g=l_info[3].pivot(columns=0,values=1)
# Store info group
DBG['Info_group'] = info_g
# List members
l_int = pd.read_html(source,attrs={'id':'tblIntegrantes'},header=2)
mem_g=l_int[0]
# Store list of members
DBG['Members'] = mem_g
# Products
h.wait_until(lambda: browser.find_element_by_xpath('//td[@id="bodyPrincipal"]//a[text()="Ver productos"]') is not None)
h.click(browser.find_element_by_xpath('//td[@id="bodyPrincipal"]//a[text()="Ver productos"]'))
# Target products = ALL products: no aval, aval, aval pert (Categories)
_target_data = [('//*[@id="ProdsNoAval"]', '//div[@id="accordionCatgNoAval"]/h3', 'categoriaNoAval=%s&subcategoriaNoAval=%s&aval=F'),
('//*[@id="ProdsAval"]','//div[@id="accordionCatg"]/h3','categoria=%s&subcategoria=%s&aval=T'),
('//*[@id="ProdsPertenecia"]','//div[@id="accordionCatgP"]/h3','categoriaP=%s&subcategoriaP=%s&aval=P')
]
if target_data == 'NoAval':
target_data = target_data = _target_data[0:1]
print('map NoAvalProds')
elif target_data == 'Aval':
target_data = _target_data[1:2]
print('map institulac NoAvalProds')
elif target_data == 'Pert':
target_data = _target_data[2:]
print('map Pert institulac prods')
elif target_data == 'All':
target_data = _target_data
print('map all institulac prods')
lcp = [] # list of categories and prods by cat dif to cero e.g. [[NC_NO_AVAL,ART_IMP_NO_AVAL],[NC,ART_IMP]...]
for i in target_data:
print('#####')####
time.sleep(sleep)
h.wait_until(lambda: browser.find_element_by_xpath(i[0]) is not None)
h.click(browser.find_element_by_xpath(i[0]))
time.sleep(sleep)
url_base=browser.current_url
# MAP
# map products by macro-Cat (prod aval per) diff to cero
sleep = 0.8
for cat in browser.find_elements_by_xpath(i[1]):
# exist products
id_cat = cat.get_attribute('id')
#print(cat.text,'----',id_cat)
num_prods_cat = int(re.findall(r'\d+',cat.text)[0])
if num_prods_cat > 0:
time.sleep(sleep)
h.click(cat)
print(cat.text,'----',id_cat)
else:
continue
for prod in browser.find_elements_by_xpath('//div[@aria-labelledby="%s"]/h3' % cat.get_attribute('id')):
# items in products
#h.click(cat)
id_prod = prod.get_attribute('id')
#print(' ',prod.text,id_prod)
#print(prod)
num_items_prod = int(re.findall(r'\d+',prod.text)[0])
if num_items_prod > 0:
lcp.append([id_cat,id_prod])
print(' ',prod.text,id_prod)
else:
continue
time.sleep(sleep)
h.click(cat)
# DBG
# build database
for cat in lcp:
if cat[0] not in DBG.keys():
DBG[cat[0]] = {}
for prod in lcp:
# build query by case no aval, aval rev, pert
if 'NO_AV' in prod[0]:
query='categoriaNoAval=%s&subcategoriaNoAval=%s&aval=F' % (prod[0],prod[1])
elif '_P' in prod[0]:
query='categoriaP=%s&subcategoriaP=%s&aval=P' % (prod[0],prod[1])
else:
query='categoria=%s&subcategoria=%s&aval=T' % (prod[0],prod[1])
# HERE
url_query = url_base.split('?')[0] + '?' + query + '&' + url_base.split('?')[1]
# do query
browser.get(url_query)
# wait until complete load
h.wait_until(h.Button('Guardar').exists,timeout_secs=20)
# load
page_source = browser.page_source
# detect tables
try:
tables = | pd.read_html(browser.page_source,attrs={'class':'table'}) | pandas.read_html |
import pandas as pd
import numpy as np
from scipy import sparse
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import patches
import matplotlib.colors as colors
import textwrap
import re
class DrawGroup:
def __init__(self):
self.theta_1 = np.pi * 0.7
self.angle_margin = 3 * np.pi / 25
self.node2color = {}
self.angles = None
self.radius = 2
self.label_node_margin = 0.35
self.group_arc_node_margin = 0.1
self.edge_norm = lambda x: np.power(x, 1 / 2)
self.max_edge_width = 15
self.font_size = 15
self.node_size = 0.25
self.label_width = 15
self.max_label_width = 40
self.group_order = {"source": 0, "target": 1, "reciprocal": 2, "other": 3}
def draw(
self, group, node_labels=None, cmap=None, ax=None,
):
"""
Draw citation networks within a citation cartel
Parameters
----------
A : scipy.sparse matrix
The adjacency matrix for the network with all nodes
node_ids : np.array or list
The node ids of the nodes in the cartel
donor_score : np.array or list
The donor score of the nodes in the cartel.
We assume the donor_score[i] indicates the
donor score for node_ids[i]
recipient_score : np.array or list
The recipient score of the nodes in the cartel.
We assume the recipient_score[i] indicates the
recipient score for node_ids[i]
theta : float
The threshold for the donor and recipient score
node_name : list
Node names. Assume that node_name[i] indicates the
name of the node_ids[i]
cmap : matplotlib color map or list
Color map or List of strings indicating hex code
ax : axis
Return
------
ax : axis
"""
#
# Input formatting
#
# A,
# node_ids,
# donor_score,
# recipient_score,
# theta,
node_ids = np.array(group.node_ids)
donor_score = np.array(group.donor_score)
recipient_score = np.array(group.recipient_score)
theta = group.threshold
if node_labels is None:
node_names = ["%d" % i for i in node_ids]
else:
node_names = [node_labels[i] for i in node_ids]
# Classify nodes into donor, recipient and reciprocal
node_types = self.classify_nodes(donor_score, recipient_score, theta)
# Change the angle for the reciprocal journals based on the number of it
num_reciprocal = np.sum(
np.array([node_types[i] == "reciprocal" for i in range(len(node_types))])
)
if num_reciprocal > 2:
self.theta_1 = np.pi * 0.3
else:
self.theta_1 = np.pi * 0.7
#
# Construct the adjacency matrix with 'Other' node
#
As = group.A.copy()
node_types += ["other"]
node_names += ["Other"]
#
# Calculate the positions and sizes
#
# Make node table
num_nodes = len(node_types)
node_table = pd.DataFrame(
{"id": np.arange(num_nodes), "group": node_types, "name": node_names}
)
# Calculate the angle of each node
node_table, self.angles = self.calc_node_angles(node_table)
# Calculate the position of journals based on the angles
node_table = self.calc_node_position(node_table)
# Text folding
node_table = self.fold_node_name(node_table)
# Compute the edge positions based on the nodes
edge_table = self.make_edge_table(node_table, As)
# make color map
self.make_color_map(node_table, cmap)
#
# Plot
#
self.plot_edges(node_table, edge_table, ax)
self.plot_node_label(node_table, ax)
self.plot_nodes(node_table, As, node_ids, ax)
self.plot_group_arc(node_table, ax)
self.trim(ax)
return ax
def classify_nodes(self, donor_score, recipient_score, threshold):
is_recipient = recipient_score >= threshold
is_donor = donor_score >= threshold
is_reciprocal = is_recipient & is_donor
is_recipient = is_recipient & (~is_reciprocal)
is_donor = is_donor & (~is_reciprocal)
node_type = is_recipient + 2 * is_donor + 3 * is_reciprocal
node_type = np.array(["", "target", "source", "reciprocal"])[node_type]
return node_type.tolist()
def calc_node_angles(self, node_table):
# Compute the coordinate of nodes
self.theta_2 = np.pi - self.theta_1 - 2 * self.angle_margin
node_table["within_group_id"] = -1
node_table["angle"] = -1
angles = {"margin_angle": self.angle_margin}
for group_name in ["source", "target", "reciprocal", "other"]:
dg = node_table[node_table.group == group_name]
if group_name == "source":
start_angle = -self.angle_margin - self.theta_1 - self.theta_2 / 2
end_angle = start_angle + self.theta_1
elif group_name == "target":
start_angle = self.angle_margin + self.theta_2 / 2
end_angle = start_angle + self.theta_1
elif group_name == "reciprocal":
start_angle = -self.theta_2 / 2
end_angle = start_angle + self.theta_2
elif group_name == "other":
start_angle = self.theta_2 / 2 + self.angle_margin * 2 + self.theta_1
end_angle = start_angle + self.theta_2
ids = np.arange(dg.shape[0])
node_table.loc[dg.index, "within_group_id"] = ids
n = dg.shape[0]
if (group_name == "reciprocal") and (n >= 2):
a = (
(ids) * ((end_angle - start_angle) - self.angle_margin) / (n - 1)
+ start_angle
+ 0.5 * self.angle_margin
)
else:
if n >= 2:
a = (
ids
* ((end_angle - start_angle) - 1.5 * self.angle_margin)
/ (n - 1)
+ start_angle
+ 0.75 * self.angle_margin
)
else:
a = (
(ids + 1)
* ((end_angle - start_angle) - 1.5 * self.angle_margin)
/ (n + 1)
+ start_angle
+ 0.75 * self.angle_margin
)
# node_table.loc[dg.index, "angle"] = (ids +1) * angle_group / (n-1) + start_angle
node_table.loc[dg.index, "angle"] = a
angles[group_name] = {"start": start_angle, "end": end_angle}
return node_table, angles
def calc_node_position(self, node_table):
nodes = node_table.copy()
nodes["x"] = self.radius * np.sin(nodes.angle)
nodes["y"] = self.radius * np.cos(nodes.angle)
return nodes
def make_edge_table(self, node_table, As):
# Compute the edge table
src, trg = np.where(As)
selfloop = src != trg
src, trg = src[selfloop], trg[selfloop]
w = As[(src, trg)]
edge_table = | pd.DataFrame({"src": src, "trg": trg, "w": w}) | pandas.DataFrame |