prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 20:51:01 2018
@author: SilverDoe
"""
'''
To Apply our own function or some other library’s function, pandas provide three
important functions namely :
1. Table wise Function Application: pipe()
2. Row or Column Wise Function Application: apply()
3. Element wise Function Application: applymap()
>> pipe() function performs the custom operation for the entire dataframe.
>> apply() function performs the custom operation for either row wise or column wise.
>> applymap() Function performs the specified operation for all the elements of the dataframe.
'''
''' ============== pipe() function ============================================'''
# pipe() Function to add value 2 to the entire dataframe
import pandas as pd
def adder(adder1,adder2):
return adder1+adder2
#Create a Dictionary of series
d = {'Score_Math': | pd.Series([66,57,75,44,31,67,85,33,42,62,51,47]) | pandas.Series |
# Extract data
import urllib.request
from PyPDF2 import PdfFileReader
import io #input/output
import pandas as pd
import tests
# Set up the URL
url = "https://www.normanok.gov/sites/default/files/documents/2021-03/2021-03-01_daily_incident_summary.pdf"
# Set up the headers
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36"
# Read the data from the url
p0_data = urllib.request.urlopen(urllib.request.Request(url, headers=headers)).read()
def test_extract():
# Change the byte type dataset.
f = io.BytesIO(p0_data) # byte data ==> english
# Send to PdfFileReader
reader = PdfFileReader(f)
# Get the number of pages in the document
numPage = reader.getNumPages()
# Empty DataFrame
output = pd.DataFrame()
# For loop for page loading
for i in range(0, numPage, 1):
# Get the list of the data points using the split by "\n"
lst = reader.getPage(i).extractText().split("\n")
if i == 10:
lst[17:19]=[''.join(lst[17:19])]
print(lst)
# Print the page number
print("Page Number: " + str(i))
# Get the number of data points within the page
numberofdata = (len(lst)//5)*5
# Creat the lists for column 0, column 1, column 2, column 3 , column 4 for pattern saving and columns.
lst0 = []
lst1 = []
lst2 = []
lst3 = []
lst4 = []
# Recognize the pattern and run the loop.
for j in range(0, numberofdata, 1):
if j % 5 == 0:
lst0.append(lst[j])
elif j % 5 == 1:
lst1.append(lst[j])
elif j % 5 == 2:
lst2.append(lst[j])
elif j % 5 == 3:
lst3.append(lst[j])
elif j % 5 == 4:
lst4.append(lst[j])
# Create the list in list
lst = [lst0, lst1, lst2, lst3, lst4]
df = pd.DataFrame(lst).T
# If it is the first page, then get the header from the first row of the dataframe
if i == 0:
df.columns = df.loc[0, :]
df.columns.name = ""
df = df.loc[1:].reset_index(drop = True)
# If not, then get it from output dataframe.
else:
df.columns = output.columns
df = df.reset_index(drop = True)
# Stack the outcomes
output = | pd.concat([output, df]) | pandas.concat |
# -*- coding:utf-8 -*-
import math
import phate
import anndata
import shutil
import warnings
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import cdist
from scipy.stats import wilcoxon, pearsonr
from scipy.spatial import distance_matrix
from sklearn.decomposition import PCA
# from python_codes.train.train import train
from python_codes.train.clustering import clustering
from python_codes.train.pseudotime import pseudotime
from python_codes.util.util import load_breast_cancer_data, preprocessing_data, save_features
from python_codes.util.exchangeable_loom import write_exchangeable_loom
warnings.filterwarnings("ignore")
from python_codes.util.util import *
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial','Roboto']
rcParams['savefig.dpi'] = 300
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable, inset_locator
title_sz = 16
####################################
#----------Get Annotations---------#
####################################
def get_adata_from_embeddings(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
feature_fp = os.path.join(output_dir, "features.tsv")
adata = sc.read_csv(feature_fp, delimiter="\t", first_column_names=None)
return adata
def get_clusters(args, sample_name, method="leiden", dataset="breast_cancer"):
original_spatial = args.spatial
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
args.spatial = original_spatial
cluster_color_dict = get_cluster_colors(args, sample_name)
unique_cluster_dict = {cluster:cluster_color_dict[cluster]["abbr"] for cluster in cluster_color_dict.keys()}
uniq_pred = np.unique(pred_clusters)
for cid, cluster in enumerate(uniq_pred):
pred_clusters[pred_clusters == cluster] = unique_cluster_dict[int(cluster)]
return pred_clusters
def get_cluster_colors_and_labels_original():
ann_dict = {
0: "Cancer 1",
1: "Immune:B/plasma",
2: "Adipose",
3: "Immune:APC/B/T cells",
4: "Cancer:Immune rich",
5: "Cancer 2",
6: "Cancer Connective"
}
color_dict = {
0: "#771122",
1: "#AA4488",
2: "#05C1BA",
3: "#F7E54A",
4: "#D55802",
5: "#137777",
6: "#124477"
}
return ann_dict, color_dict
def get_cluster_colors(args, sample_name):
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
clusters = df["Cluster ID"].values.astype(int)
annotations = df["Annotations"].values.astype(str)
colors = df["Color"].values.astype(str)
abbrs = df["Abbr"].values.astype(str)
cur_dict = {}
for cid, cluster in enumerate(clusters):
cur_dict[cluster] = {
"annotation" : annotations[cid],
"color" : colors[cid],
"abbr" : abbrs[cid]
}
return cur_dict
def get_top_n_cluster_specific_genes(args, sample_name, method, dataset="breast_cancer", top_n=3):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = | pd.read_csv(cluster_marker_genes_fp, sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
import datetime as dt
import os
import zipfile
from datetime import datetime, timedelta
from urllib.parse import urlparse
study_prefix = "U01"
def get_user_id_from_filename(f):
#Get user id from from file name
return(f.split(".")[3])
def get_file_names_from_zip(z, file_type=None, prefix=study_prefix):
#Extact file list
file_list = list(z.filelist)
if(filter is None):
filtered = [f.filename for f in file_list if (prefix in f.filename) and (".csv" in f.filename)]
else:
filtered = [f.filename for f in file_list if (file_type in f.filename and prefix in f.filename)]
return(filtered)
def get_data_catalog(catalog_file, data_file, data_dir, dict_dir):
dc= | pd.read_csv(catalog_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["barata"], 'english': ["german_cockroach"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["coipo"], 'english': ["coypu"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cuncuna"], 'english': ["caterpillar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carpincho"], 'english': ["capybara"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jote"], 'english': ["buzzard"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["zorzal"], 'english': ["fieldfare"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guanaco"], 'english': ["guanaco"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["pejerrey"], 'english': ["silverside"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["mandril"], 'english': ["mandrill"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["peludo"], 'english': ["armadillo"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chingue"], 'english': ["skunk"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guaren"], 'english': ["brown_rat"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cata"], 'english': ["budgerigar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["bonito"], 'english': ["atlantic_bonito"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cachalote"], 'english': ["sperm_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["morena"], 'english': ["moray_eels"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jaiba"], 'english': ["callinectes_sapidus"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cervatillo"], 'english': ["fawn"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["mulita"], 'english': ["nine-banded_armadillo"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carpintero"], 'english': ["woodpecker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["centolla"], 'english': ["maja_squinado"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["palometa"], 'english': ["pomfret"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["suricata"], 'english': ["meerkat"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vampiro"], 'english': ["vampire_bats"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["laucha"], 'english': ["mouse"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guanaco"], 'english': ["guanaco"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vicuña"], 'english': ["vicuna"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carancho"], 'english': ["caracara"]}), ignore_index = True)
if df_translate is None:
df_translate = df_auxiliar.copy(deep=True)
else:
for i,row in df_auxiliar.iterrows():
if row['spanish'] not in df_translate['spanish'].values:
df_translate = df_translate.append(row)
# df_translate = pd.concat([df_translate, df_auxiliar.ix[df_auxiliar._merge=='left_only', ['spanish']]])
if (path != ""):
df_translate.to_pickle(path)
return df_translate
def tokenize_list(self, text_dataframe, tokenizer_type = "basic_english"):
'''
It receives a list of strings and returns a list of string list where
each string is a token obtained from apply the tokenizer_type.
Parameters
----------
text_dataframe : string list
A string list where each element has words separated by spaces.
tokenizer_type :
The kind of tokenizer to be applied. Basic_english applied by
default. Other tokenizers could be: spacy, moses, toktok, revtok,
subword.
Returns
-------
tokens : list of string list
A list where each element is a list that contains tokens.
'''
tokenizer = get_tokenizer(tokenizer_type)
tokens = [tokenizer(x) if str(x)!="nan" else x for x in text_dataframe]
return tokens
def count_words(self, tokens, percentaje = 0):
'''
It returns a word unique list extracted from tokens parameter and the
number of times that each word appear in descendingly ordered.
Parameters
----------
tokens : list of token list
A list where each element is a token list.
percentaje: int
Int between 0 and 1.
If it is 0, it doesn't print anything.
If it is greater than 0, it prints by console how many of
the most common words (in percentage) exceed the percentage passed
by parameter.
Returns
-------
The word unique list and the number of times that each one appear.
'''
words = Counter()
for s in tokens:
if str(s) != "nan":
for w in s:
words[w] += 1
sorted_words = OrderedDict(words.most_common())
if (percentaje>0):
count_occurences = sum(words.values())
accumulated = 0
counter = 0
while accumulated < count_occurences * percentaje:
accumulated += list(sorted_words.values())[counter]
counter += 1
print(f"The {counter * 100 / len(words)}% most common words "
f"account for the {accumulated * 100 / count_occurences}% of the occurrences")
return list(sorted_words.keys()),list(sorted_words.values())
def join_horizontally_strings(self, df, column_list, separator = " "):
'''
It takes each df dataframe row and joins the content of each column
passed in *args separated by the separator parameter.
Parameters
----------
df : pandas.dataframe
Dataframe that contains the columns to join.
*args : string
The columns names to be joined.
Returns
-------
lista : list of strings
A list where each row has the union between the different columns
passed in * args separated by separator.
'''
lista=[]
contador = 0
for i, row in df.iterrows():
lista.append("")
for column in column_list:
lista[contador] = lista[contador] + row[column] + separator
lista[contador] = lista[contador].rstrip()
contador+=1
return lista
def add_to_pickle_translation_file(self,path,words,lan_src = "spanish",lan_dest = "english"):
'''
It check if the word is in path+"//translations.pkl", if it is not, it adds it to the file.
Parameters
----------
path : string
The path where the translation file is.
words : list of strings
List of words to obtain the translation.
lan_src : string, optional
DESCRIPTION. language in which each word of words is.
The default is "spanish".
lan_dest : string, optional
language in which each word will be translated.
The default is "english".
Returns
-------
None.
'''
df_translation = self.read_pickle_translation_file(path)
for i,word in enumerate(words):
df_check = df_translation[(df_translation.word == word) & (df_translation.lan_src == lan_src) & (df_translation.lan_dest == lan_dest)]
if len(df_check.index) == 0:
print("Traduciendo " + word +": " + str(i) + "/" + str(len(words)))
new_row = [word,self.translate([word],lan_src,lan_dest)[0].extra_data["parsed"],lan_src,lan_dest]
df_length = len(df_translation)
df_translation.loc[df_length] = new_row
df_translation.to_pickle(path+"//translations.pkl")
def read_pickle_translation_file(self,path):
'''
Read pickle file with the all translations DataFrame.
Parameters
----------
path : string
Path where the picke file is.
Returns
-------
df_translation : pandas.DataFrame
df with all the translations with the following structure:
word|translation|lan_src|lan_dest|
'''
try:
df_translation = pd.read_pickle(path+"//translations.pkl")
except (OSError, IOError):
print("translation.pkl no encontrado")
df_translation = pd.DataFrame(columns=['word','translation','lan_src','lan_dest'])
return df_translation
def translate(self, text, lan_src = 'spanish', lan_dest = 'english'):
'''
It translates text from one language to another using googletrans.
Parameters
----------
text : string list
Strings to be translated.
lan_src : string, optional.
The language source.
The default is 'es'.
lan_dest : string, optional
The language destiny.
The default is 'en'.
Returns
-------
text_translate : translated_object list
A list where each element is a translation from each text list
element.
'''
translator = Translator()
translated_objects = []
for element in text:
translated_objects.append(translator.translate(element, src=lan_src, dest=lan_dest))
return translated_objects
def translate_checking_wordnet_and_hypernym(self, texts, df_translate = None, hypernym_check = '', len_src = 'spanish', len_dest = 'english'):
'''
It receives a word list in len_src language and returns a dataframe
with the original word list and its len_dest translation. If the
original word doesn't have a translation that exists on WordNet synset
or the hypernym_check on the hypernym tree, it returns
"no_translation".
Parameters
----------
texts : string list
list with words to translate.
df_translate : pandas.dataframe, optional
A dataframe with two columns: len_src with words in len_src language
and len_dest with words in len_dest language. If it's not None,
the rows are appended. Otherwise it's initialized and then the
rows are appended.
The default is None
hypernym_check : string, optional
The synset to be checked if exists on translated hypernym tree.
The default is "".
len_src : string, optional
Language source.
The default is "spanish".
len_dest : string, optional
Language destiny.
he default is "english".
Returns
-------
df_translate : pandas.dataframe
Dataframe with two columns: len_src with words in len_src language
and len_dest with words in len_dest language.
'''
# If df_translate not exist, initialize it
if df_translate is None:
df_translate = pd.DataFrame(columns=[len_src,len_dest])
for text in texts:
if text not in df_translate[len_src].to_list():
try:
has_hyper = False
iter_translates = -1
translation_object = self.translate([text]) # Get the translation_object with all posible translations
while (not has_hyper):
translated_synsets = []
while (len(translated_synsets)==0):
iter_translates+=1
translated_word = translation_object[0].extra_data["parsed"][1][0][0][5][0][4][iter_translates][0].lower() # Extract a posible translation
translated_synsets = wn.synsets(translated_word.replace(" ","_"),pos=wn.NOUN)
translated_synsets = [x for x in translated_synsets if ".n." in x.name().lower()] # keep nouns only
if hypernym_check != '':
synset_with_hypernym, _ = self.get_synset_that_has_hypernym(translated_synsets, hypernym_check = hypernym_check) # check if hypernym_check is part of translated_synsets hypernym tree
if len(synset_with_hypernym)>0:
has_hyper = True
else:
has_hyper = True
except:
df2 = | pd.DataFrame({len_src: [text],len_dest: "no_translation"}) | pandas.DataFrame |
"""
Implementation of Econometric measures of
connectness and systemic risk in finance and
insurance sectors by M.Billio, M.Getmansky,
<NAME>, L.Pelizzon
"""
import pandas as pd
import numpy as np
from arch import arch_model
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from typing import Dict
from itertools import combinations, product
from scipy.stats import ttest_1samp
from scipy.sparse.linalg import eigs
from marketlearn.causality_network.sector_data import SectorPrice
from marketlearn.causality_network.vector_ar.bivar import BiVariateVar
from marketlearn.causality_network.graph import GraphAdMap
# pylint: disable=invalid-name, undefined-loop-variable
class CNet:
"""
Class Implements the granger causal flows in a complicated network
"""
class PreProcess:
"""
Nested Class to pre-process data before program start and create
"sectors" attribute
"""
def __init__(self, start: str = "1999-12-31"):
"""
Constructor used to instantiate class
:param start: (str) start date in format 'YYYY-MM-DD'
"""
self.start = start
self.preprocess_sectors()
self.transformed_data = None
@staticmethod
def _get_sectors() -> pd.DataFrame:
"""
Downloads the sector prices from SectorPrice
:return: (pd.DataFrame) *?
"""
return SectorPrice().read()
def preprocess_sectors(self):
"""
Preprocesses data by removing any NAs
and create sectors attribute
:return: (None)
"""
# pylint: disable=consider-iterating-dictionary
sec = self._get_sectors()
for k in sec.keys():
sec[k] = sec[k][sec[k].index >= self.start].dropna(axis=1)
# Create the sectors attribute
self.sectors = sec.copy()
# Garbage collection
sec = None
def get(self) -> pd.DataFrame:
"""
Returns the sectors after preprocessing
"""
return self.sectors
# --- main class definition
def __init__(self, start: str):
self.sectors = self.PreProcess(start=start).get()
self.pca = PCA()
self.sc = StandardScaler()
self.lr = None
self.ret1 = None
self.ret2 = None
self.errors = None
self.transformed_data = None
def risk_fraction(self, data: pd.DataFrame, n: int = 3):
"""
Computes the cumulative risk fraction of system
see ref: formula (6) of main paper
:param data: (pd.DataFrame) end of month prices
shape = (n_samples, p_shares)
:param n: (int) Number of principal components (3 by default)
assumes user has chosen the best n
:return: (float)
"""
# Store col names
col_names = list(data)
# Compute log returns
data = np.log(1 + data.pct_change())
data = self.sc.fit_transform(data.dropna())
data = self.pca.fit_transform(data)
self.transformed_data = | pd.DataFrame(data, columns=col_names) | pandas.DataFrame |
import streamlit as st
st.title('inspo-Book')
st.header('Upload an item of clothing to find matching looks')
st.subheader('<NAME>')
st.subheader('Insight Data Science, Los Angeles')
from PIL import *
import cv2
###########################################################
### uploading the image ###
###########################################################
upload = st.file_uploader('Upload an picture of the item you are trying to match')
if upload is not None:
image1 = Image.open(upload)
st.image(image1, caption='Uploaded Image.', width = 200)
##### User input of number of similar outfits to find
number = st.number_input('How many similar outfits would you like to see?', value = 0, step = 1)
########################################################
### importing libraries
########################################################
import pandas as pd
import csv
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from keras.models import load_model
import os
import pandas as pd
from keras.preprocessing import image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['axes.titlesize'] = 20
import matplotlib.gridspec as gridspec
from sklearn import metrics
from sklearn.metrics import pairwise_distances
from sklearn import datasets
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn import preprocessing
###########################################################
### TF model
###########################################################
### User image identification and extraction
# import keras
import keras
import keras_maskrcnn
import keras_retinanet
# import keras_retinanet
from keras_maskrcnn import models
from keras_maskrcnn.utils.visualization import draw_mask
from keras_retinanet.utils.visualization import draw_box, draw_caption, draw_annotations
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.colors import label_color
# import miscellaneous modules
import matplotlib.pyplot as plt
import cv2
import os
import shutil
import numpy as np
import time
import json
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# use this environment flag to change which GPU to use
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
######################################################
### User image identification and extraction
# adjust this to point to your downloaded/trained model
model_path = os.path.join('/home/ec2-user/inspo/', 'resnet50_modanet.h5')
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
#print(model.summary())
# load label to names mapping for visualization purposes
labels_to_names = {1: 'bag', 2: 'belt', 3: 'boots', 4: 'footwear', 5: 'outer', 6: 'dress', 7: 'sunglasses', 8: 'pants', 9: 'top', 10: 'shorts', 11: 'skirt', 12: 'headwear', 13: 'scarf/tie'}
#######################################################
def cloth(input_imagefile):
# load image
image = read_image_bgr(input_imagefile)
# copy to draw on
draw = image.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
start = time.time()
outputs = model.predict_on_batch(np.expand_dims(image, axis=0))
print("processing time: ", time.time() - start)
boxes = outputs[-4][0]
scores = outputs[-3][0]
labels = outputs[-2][0]
masks = outputs[-1][0]
# correct for image scale
boxes /= scale
masks_dic={}
boxes_dic={}
labels_dic={}
counter=0
# visualize detections
for box, score, label, mask in zip(boxes, scores, labels, masks):
if score < 0.5:
break
color = label_color(label)
b = box.astype(int)
draw_box(draw, b, color=color)
mask = mask[:, :, label]
draw_mask(draw, b, mask, color=label_color(label))
masks_dic[str(counter)]=mask
boxes_dic[str(counter)]=box
labels_dic[str(counter)]=label
counter+=1
image = read_image_bgr(input_imagefile)
# copy to draw on
draw = image.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# visualize detections
items_dic={}
counter=0
for box, mask2, label2 in zip(boxes_dic.values(), masks_dic.values(), labels_dic.values()):
b = box.astype(int)
# resize to fit the box
mask2 = mask2.astype(np.float32)
mask2 = cv2.resize(mask2, (b[2] - b[0], b[3] - b[1]))
# binarize the mask1
mask2 = (mask2 > 0.5).astype(np.uint8)
# draw the mask2 in the image
mask2_image = np.zeros((draw.shape[0], draw.shape[1]), np.uint8)
mask2_image[b[1]:b[3], b[0]:b[2]] = mask2
mask2 = mask2_image
mask2 = (np.stack([mask2] * 3, axis = 2))*draw
items_dic[str(counter)] = mask2
counter+=1
# newfileneame=input_imagefile.split("/")[4].split('.')[0]
# plt.ioff()
# plt.figure(figsize=(15, 15))
# plt.axis('off')
# plt.imshow(mask2)
#plt.savefig('/home/ec2-user/SageMaker/'+str(newfileneame)+'-masked-'+str(label2)+'.jpg',bbox_inches='tight', pad_inches=0)
#plt.show()
plt.close('all')
return mask2, label2
###################################################################
mask1, label = cloth(upload)
#########################################################################
from PIL import Image
testimg = Image.fromarray(mask1, 'RGB')
testimg = testimg.resize((224,224))
#####################################################################
# loading model file for AlexNEt
an_model = load_model("/home/ec2-user/inspo/an-model.h5")
######################################################################3
#Loading the weights for AlexNet
import tensorflow as tf
an_model.layers[0].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d/bias')])
an_model.layers[3].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_1/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_1/bias')])
an_model.layers[6].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_2/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_2/bias')])
an_model.layers[8].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_3/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_3/bias')])
an_model.layers[10].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_4/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'conv2d_4/bias')])
an_model.layers[13].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'dense/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'dense/bias')])
an_model.layers[16].set_weights([tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'dense_1/kernel'),
tf.train.load_variable('/home/ec2-user/inspo/model.ckpt-5000',
'dense_1/bias')])
######################################################################
# Extracting features from user test using AlexNet
INV3_feature_dic = {}
INV3_feature_list=[]
img_data = image.img_to_array(testimg)
img_data = np.expand_dims(img_data, axis=0)
#img_data = preprocess_input(img_data)
INV3_feature = an_model.predict(img_data)
feature_np = np.array(INV3_feature)
testfeature = feature_np.flatten()
###################################################################3
features = | pd.read_csv('/home/ec2-user/inspo/an_features.csv', index_col='names') | pandas.read_csv |
import re
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
import plotly.express as px
# from plotly.subplots import make_subplots
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from .dash_config import DASH_CACHING
from .helpers import clean_dataset, get_data_metadata, logger, bin_numeric
TIMEOUT = 60 * 60 if DASH_CACHING else 1
def register_data_callbacks(app, cache):
@app.callback(
[
Output("scatterdiv", "children"),
Output("dataloaded", "value"),
Output("datatable", "data"),
Output("datatable", "columns"),
],
[
Input("url", "pathname"),
Input("tableloaded", "children"),
],
[State("datatable", "columns")],
)
@cache.memoize(timeout=TIMEOUT)
def entropy_scatter(url, tableloaded, existing_columns):
existing_columns.append({"id": "Entropy", "name": "Entropy"})
logger.debug("Downloading data and calculate entropy")
data_id = int(re.search(r"data/(\d+)", url).group(1))
df, meta_features, numerical_data, nominal_data = get_data_metadata(data_id)
scatter_div = (
[
html.Div(
[
html.H3("Scatter plot"),
html.Div(
dcc.Dropdown(
id="dropdown1",
options=[
{"label": i, "value": i}
for i in numerical_data[:1000]
],
multi=False,
clearable=False,
value=numerical_data[0],
),
style={"width": "30%"},
),
html.Div(
dcc.Dropdown(
id="dropdown2",
options=[
{"label": i, "value": i}
for i in numerical_data[:1000]
],
multi=False,
clearable=False,
value=numerical_data[1],
),
style={"width": "30%"},
),
html.Div(
dcc.Dropdown(
id="dropdown3",
options=[
{"label": i, "value": i}
for i in nominal_data[:1000]
],
multi=False,
clearable=False,
value=nominal_data[0],
),
style={"width": "30%"},
),
html.Div(id="scatter_plot"),
]
)
]
if len(numerical_data) > 1 and nominal_data
else html.Div(
id="Scatter Plot",
children=[html.Div(html.P("No numerical-nominal combination found"))],
)
)
logger.debug("Downloaded data and calculated entropy")
return scatter_div, "loaded", meta_features.to_dict("records"), existing_columns
# @app.callback(
# Output('distribution', 'children'),
# [Input('datatable', 'selected_rows'),
# Input('radio1', 'value'),
# Input('stack', 'value'),
# Input('url', 'pathname'),
# Input('dataloaded', 'value')],
# [State('datatable', 'data')])
# def distribution_sub_plot(selected_row_indices, radio_value,
# stack, url, data_loaded, rows):
# if data_loaded is None:
# return []
# data_id = int(re.search(r'data/(\d+)', url).group(1))
# try:
# df = pd.read_pickle('cache/df' + str(data_id) + '.pkl')
# except OSError:
# return []
# meta_data = pd.DataFrame(rows)
# print("distribution")
# if len(selected_row_indices) != 0:
# meta_data = meta_data.loc[selected_row_indices]
# attributes = meta_data["Attribute"].values
# types = meta_data["DataType"].values
#
# if len(attributes) == 0:
# fig = make_subplots(rows=1, cols=1)
# trace1 = go.Scatter(x=[0, 0, 0], y=[0, 0, 0])
# fig.append_trace(trace1, 1, 1)
# else:
# fig = make_subplots(rows=len(attributes), cols=1,
# subplot_titles=attributes,
# )
# i = 0
# for attribute in attributes:
# show_legend = True if i == 0 else False
# data = dist_plot(meta_data, attribute, types[i],
# radio_value, data_id, show_legend, df)
# i = i + 1
# for trace in data:
# fig.append_trace(trace, i, 1)
#
# fig['layout'].update(hovermode='closest',
# height=300 + (len(attributes) * 100),
# barmode=stack,
# font=dict(size=11))
# for i in fig['layout']['annotations']:
# i['font']['size'] = 11
# #print(fig['layout'])
# return html.Div(dcc.Graph(figure=fig), id="graph1")
@app.callback(
Output("table-graph", "children"),
[
Input("datatable", "data"),
Input("datatable", "selected_rows"),
Input("url", "pathname"),
Input("radio1", "value"),
Input("stack", "value"),
Input("dataloaded", "value"),
],
)
@cache.memoize(timeout=TIMEOUT)
def plot_table(rows, selected_row_indices, url, radio, stack, dataloaded):
# If dataset is not downloaded yet
if dataloaded is None:
return []
logger.debug("loading pickle to create dist plot")
# If pickle file is present
data_id = int(re.search(r"data/(\d+)", url).group(1))
try:
df = pd.read_pickle("cache/df" + str(data_id) + ".pkl")
except OSError:
return []
# Get selected rows from table
meta_data = pd.DataFrame(rows)
if len(selected_row_indices) != 0:
meta_data = meta_data.loc[selected_row_indices]
else:
return "no selected rows"
# Create distribution plots and align them as a table graph
children = []
for index, row in meta_data.iterrows():
attribute = row["Attribute"]
col1 = html.P(row["Attribute"])
show_legend = True if index == 0 else False
data = dist_plot(
meta_data, attribute, row["DataType"], radio, data_id, show_legend, df
)
fig = go.Figure(data=data)
fig["layout"].update(
hovermode="closest", height=300, barmode=stack, font=dict(size=9)
)
col2 = dcc.Graph(figure=fig)
children.append(generate_metric_row(col1, col2))
out = html.Div(
className="metric-rows",
style={"overflowY": "scroll", "height": "500px", "marginBottom": "50px"},
children=children,
)
logger.debug("distribution plot created")
return out
@app.callback(
[Output("fi", "children"), Output("hidden", "value")],
[Input("url", "pathname"), Input("dataloaded", "value")],
[State("datatable", "data")],
)
@cache.memoize(timeout=TIMEOUT)
def feature_importance(url, dataloaded, rows):
# If dataset is not loaded
if dataloaded is None:
return [], "No file"
# Get dataset if pickle exists
data_id = int(re.search(r"data/(\d+)", url).group(1))
try:
df = pd.read_pickle("cache/df" + str(data_id) + ".pkl")
except OSError:
return [], "No file"
# Get table of metadata
meta_data = | pd.DataFrame(rows) | pandas.DataFrame |
from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = | pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}) | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/07/26
@author: Wangzili
@group : **
@contact: <EMAIL>
所有指标中参数df为通过get_k_data获取的股票数据
"""
import pandas as pd
import numpy as np
import itertools
def ma(df, n=10):
"""
移动平均线 Moving Average
MA(N)=(第1日收盘价+第2日收盘价—+……+第N日收盘价)/N
"""
pv = pd.DataFrame()
pv['date'] = df['date']
pv['v'] = df.close.rolling(n).mean()
return pv
def _ma(series, n):
"""
移动平均
"""
return series.rolling(n).mean()
def md(df, n=10):
"""
移动标准差
STD=S(CLOSE,N)=[∑(CLOSE-MA(CLOSE,N))^2/N]^0.5
"""
_md = pd.DataFrame()
_md['date'] = df.date
_md["md"] = df.close.rolling(n).std(ddof=0)
return _md
def _md(series, n):
"""
标准差MD
"""
return series.rolling(n).std(ddof=0) # 有时候会用ddof=1
def ema(df, n=12):
"""
指数平均数指标 Exponential Moving Average
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
EMA(X,N)=[2×X+(N-1)×EMA(ref(X),N]/(N+1)
"""
_ema = pd.DataFrame()
_ema['date'] = df['date']
_ema['ema'] = df.close.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
return _ema
def _ema(series, n):
"""
指数平均数
"""
return series.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
def macd(df, n=12, m=26, k=9):
"""
平滑异同移动平均线(Moving Average Convergence Divergence)
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
DIFF= EMA(N1)- EMA(N2)
DEA(DIF,M)= 2/(M+1)×DIF +[1-2/(M+1)]×DEA(REF(DIF,1),M)
MACD(BAR)=2×(DIF-DEA)
return:
osc: MACD bar / OSC 差值柱形图 DIFF - DEM
diff: 差离值
dea: 讯号线
"""
_macd = pd.DataFrame()
_macd['date'] = df['date']
_macd['diff'] = _ema(df.close, n) - _ema(df.close, m)
_macd['dea'] = _ema(_macd['diff'], k)
_macd['macd'] = _macd['diff'] - _macd['dea']
return _macd
def kdj(df, n=9):
"""
随机指标KDJ
N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100%
当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1)
当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2)
当日J值=3 ×当日K值-2×当日D值
"""
_kdj = pd.DataFrame()
_kdj['date'] = df['date']
rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100
_kdj['k'] = sma(rsv, 3)
_kdj['d'] = sma(_kdj.k, 3)
_kdj['j'] = 3 * _kdj.k - 2 * _kdj.d
return _kdj
def rsi(df, n=6):
"""
相对强弱指标(Relative Strength Index,简称RSI
LC= REF(CLOSE,1)
RSI=SMA(MAX(CLOSE-LC,0),N,1)/SMA(ABS(CLOSE-LC),N1,1)×100
SMA(C,N,M)=M/N×今日收盘价+(N-M)/N×昨日SMA(N)
"""
# pd.set_option('display.max_rows', 1000)
_rsi = pd.DataFrame()
_rsi['date'] = df['date']
px = df.close - df.close.shift(1)
px[px < 0] = 0
_rsi['rsi'] = sma(px, n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
# def tmax(x):
# if x < 0:
# x = 0
# return x
# _rsi['rsi'] = sma((df['close'] - df['close'].shift(1)).apply(tmax), n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
return _rsi
def vrsi(df, n=6):
"""
量相对强弱指标
VRSI=SMA(最大值(成交量-REF(成交量,1),0),N,1)/SMA(ABS((成交量-REF(成交量,1),N,1)×100%
"""
_vrsi = pd.DataFrame()
_vrsi['date'] = df['date']
px = df['volume'] - df['volume'].shift(1)
px[px < 0] = 0
_vrsi['vrsi'] = sma(px, n) / sma((df['volume'] - df['volume'].shift(1)).abs(), n) * 100
return _vrsi
def boll(df, n=26, k=2):
"""
布林线指标BOLL boll(26,2) MID=MA(N)
标准差MD=根号[∑(CLOSE-MA(CLOSE,N))^2/N]
UPPER=MID+k×MD
LOWER=MID-k×MD
"""
_boll = pd.DataFrame()
_boll['date'] = df.date
_boll['mid'] = _ma(df.close, n)
_mdd = _md(df.close, n)
_boll['up'] = _boll.mid + k * _mdd
_boll['low'] = _boll.mid - k * _mdd
return _boll
def bbiboll(df, n=10, k=3):
"""
BBI多空布林线 bbiboll(10,3)
BBI={MA(3)+ MA(6)+ MA(12)+ MA(24)}/4
标准差MD=根号[∑(BBI-MA(BBI,N))^2/N]
UPR= BBI+k×MD
DWN= BBI-k×MD
"""
# pd.set_option('display.max_rows', 1000)
_bbiboll = pd.DataFrame()
_bbiboll['date'] = df.date
_bbiboll['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
_bbiboll['md'] = _md(_bbiboll.bbi, n)
_bbiboll['upr'] = _bbiboll.bbi + k * _bbiboll.md
_bbiboll['dwn'] = _bbiboll.bbi - k * _bbiboll.md
return _bbiboll
def wr(df, n=14):
"""
威廉指标 w&r
WR=[最高值(最高价,N)-收盘价]/[最高值(最高价,N)-最低值(最低价,N)]×100%
"""
_wr = pd.DataFrame()
_wr['date'] = df['date']
higest = df.high.rolling(n).max()
_wr['wr'] = (higest - df.close) / (higest - df.low.rolling(n).min()) * 100
return _wr
def bias(df, n=12):
"""
乖离率 bias
bias=[(当日收盘价-12日平均价)/12日平均价]×100%
"""
_bias = pd.DataFrame()
_bias['date'] = df.date
_mav = df.close.rolling(n).mean()
_bias['bias'] = (np.true_divide((df.close - _mav), _mav)) * 100
# _bias["bias"] = np.vectorize(lambda x: round(Decimal(x), 4))(BIAS)
return _bias
def asi(df, n=5):
"""
振动升降指标(累计震动升降因子) ASI # 同花顺给出的公式不完整就不贴出来了
"""
_asi = pd.DataFrame()
_asi['date'] = df.date
_m = pd.DataFrame()
_m['a'] = (df.high - df.close.shift()).abs()
_m['b'] = (df.low - df.close.shift()).abs()
_m['c'] = (df.high - df.low.shift()).abs()
_m['d'] = (df.close.shift() - df.open.shift()).abs()
_m['r'] = _m.apply(lambda x: x.a + 0.5 * x.b + 0.25 * x.d if max(x.a, x.b, x.c) == x.a else (
x.b + 0.5 * x.a + 0.25 * x.d if max(x.a, x.b, x.c) == x.b else x.c + 0.25 * x.d
), axis=1)
_m['x'] = df.close - df.close.shift() + 0.5 * (df.close - df.open) + df.close.shift() - df.open.shift()
_m['k'] = np.maximum(_m.a, _m.b)
_asi['si'] = 16 * (_m.x / _m.r) * _m.k
_asi["asi"] = _ma(_asi.si, n)
return _asi
def vr_rate(df, n=26):
"""
成交量变异率 vr or vr_rate
VR=(AVS+1/2CVS)/(BVS+1/2CVS)×100
其中:
AVS:表示N日内股价上涨成交量之和
BVS:表示N日内股价下跌成交量之和
CVS:表示N日内股价不涨不跌成交量之和
"""
_vr = pd.DataFrame()
_vr['date'] = df['date']
_m = pd.DataFrame()
_m['volume'] = df.volume
_m['cs'] = df.close - df.close.shift(1)
_m['avs'] = _m.apply(lambda x: x.volume if x.cs > 0 else 0, axis=1)
_m['bvs'] = _m.apply(lambda x: x.volume if x.cs < 0 else 0, axis=1)
_m['cvs'] = _m.apply(lambda x: x.volume if x.cs == 0 else 0, axis=1)
_vr["vr"] = (_m.avs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()
) / (_m.bvs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()) * 100
return _vr
def vr(df, n=5):
"""
开市后平均每分钟的成交量与过去5个交易日平均每分钟成交量之比
量比:=V/REF(MA(V,5),1);
涨幅:=(C-REF(C,1))/REF(C,1)*100;
1)量比大于1.8,涨幅小于2%,现价涨幅在0—2%之间,在盘中选股的
选股:量比>1.8 AND 涨幅>0 AND 涨幅<2;
"""
_vr = pd.DataFrame()
_vr['date'] = df.date
_vr['vr'] = df.volume / _ma(df.volume, n).shift(1)
_vr['rr'] = (df.close - df.close.shift(1)) / df.close.shift(1) * 100
return _vr
def arbr(df, n=26):
"""
人气意愿指标 arbr(26)
N日AR=N日内(H-O)之和除以N日内(O-L)之和
其中,H为当日最高价,L为当日最低价,O为当日开盘价,N为设定的时间参数,一般原始参数日设定为26日
N日BR=N日内(H-CY)之和除以N日内(CY-L)之和
其中,H为当日最高价,L为当日最低价,CY为前一交易日的收盘价,N为设定的时间参数,一般原始参数日设定为26日。
"""
_arbr = pd.DataFrame()
_arbr['date'] = df.date
_arbr['ar'] = (df.high - df.open).rolling(n).sum() / (df.open - df.low).rolling(n).sum() * 100
_arbr['br'] = (df.high - df.close.shift(1)).rolling(n).sum() / (df.close.shift() - df.low).rolling(n).sum() * 100
return _arbr
def dpo(df, n=20, m=6):
"""
区间震荡线指标 dpo(20,6)
DPO=CLOSE-MA(CLOSE, N/2+1)
MADPO=MA(DPO,M)
"""
_dpo = pd.DataFrame()
_dpo['date'] = df['date']
_dpo['dpo'] = df.close - _ma(df.close, int(n / 2 + 1))
_dpo['dopma'] = _ma(_dpo.dpo, m)
return _dpo
def trix(df, n=12, m=20):
"""
三重指数平滑平均 TRIX(12)
TR= EMA(EMA(EMA(CLOSE,N),N),N),即进行三次平滑处理
TRIX=(TR-昨日TR)/ 昨日TR×100
TRMA=MA(TRIX,M)
"""
_trix = pd.DataFrame()
_trix['date'] = df.date
tr = _ema(_ema(_ema(df.close, n), n), n)
_trix['trix'] = (tr - tr.shift()) / tr.shift() * 100
_trix['trma'] = _ma(_trix.trix, m)
return _trix
def bbi(df):
"""
多空指数 BBI(3,6,12,24)
BBI=(3日均价+6日均价+12日均价+24日均价)/4
"""
_bbi = pd.DataFrame()
_bbi['date'] = df['date']
_bbi['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
return _bbi
def mtm(df, n=6, m=5):
"""
动力指标 MTM(6,5)
MTM(N日)=C-REF(C,N)式中,C=当日的收盘价,REF(C,N)=N日前的收盘价;N日是只计算交易日期,剔除掉节假日。
MTMMA(MTM,N1)= MA(MTM,N1)
N表示间隔天数,N1表示天数
"""
_mtm = pd.DataFrame()
_mtm['date'] = df.date
_mtm['mtm'] = df.close - df.close.shift(n)
_mtm['mtmma'] = _ma(_mtm.mtm, m)
return _mtm
def obv(df):
"""
能量潮 On Balance Volume
多空比率净额= [(收盘价-最低价)-(最高价-收盘价)] ÷( 最高价-最低价)×V # 同花顺貌似用的下面公式
主公式:当日OBV=前一日OBV+今日成交量
1.基期OBV值为0,即该股上市的第一天,OBV值为0
2.若当日收盘价>上日收盘价,则当日OBV=前一日OBV+今日成交量
3.若当日收盘价<上日收盘价,则当日OBV=前一日OBV-今日成交量
4.若当日收盘价=上日收盘价,则当日OBV=前一日OBV
"""
_obv = pd.DataFrame()
_obv["date"] = df['date']
# tmp = np.true_divide(((df.close - df.low) - (df.high - df.close)), (df.high - df.low))
# _obv['obvv'] = tmp * df.volume
# _obv["obv"] = _obv.obvv.expanding(1).sum() / 100
_m = pd.DataFrame()
_m['date'] = df.date
_m['cs'] = df.close - df.close.shift()
_m['v'] = df.volume
_m['vv'] = _m.apply(lambda x: x.v if x.cs > 0 else (-x.v if x.cs < 0 else 0), axis=1)
_obv['obv'] = _m.vv.expanding(1).sum()
return _obv
def cci(df, n=14):
"""
顺势指标
TYP:=(HIGH+LOW+CLOSE)/3
CCI:=(TYP-MA(TYP,N))/(0.015×AVEDEV(TYP,N))
"""
_cci = pd.DataFrame()
_cci["date"] = df['date']
typ = (df.high + df.low + df.close) / 3
_cci['cci'] = ((typ - typ.rolling(n).mean()) /
(0.015 * typ.rolling(min_periods=1, center=False, window=n).apply(
lambda x: np.fabs(x - x.mean()).mean())))
return _cci
def priceosc(df, n=12, m=26):
"""
价格振动指数
PRICEOSC=(MA(C,12)-MA(C,26))/MA(C,12) * 100
"""
_c = pd.DataFrame()
_c['date'] = df['date']
man = _ma(df.close, n)
_c['osc'] = (man - _ma(df.close, m)) / man * 100
return _c
def sma(a, n, m=1):
"""
平滑移动指标 Smooth Moving Average
"""
''' # 方法一,此方法有缺陷
_sma = []
for index, value in enumerate(a):
if index == 0 or pd.isna(value) or np.isnan(value):
tsma = 0
else:
# Y=(M*X+(N-M)*Y')/N
tsma = (m * value + (n - m) * tsma) / n
_sma.append(tsma)
return pd.Series(_sma)
'''
''' # 方法二
results = np.nan_to_num(a).copy()
# FIXME this is very slow
for i in range(1, len(a)):
results[i] = (m * results[i] + (n - m) * results[i - 1]) / n
# results[i] = ((n - 1) * results[i - 1] + results[i]) / n
# return results
'''
# b = np.nan_to_num(a).copy()
# return ((n - m) * a.shift(1) + m * a) / n
a = a.fillna(0)
b = a.ewm(min_periods=0, ignore_na=False, adjust=False, alpha=m/n).mean()
return b
def dbcd(df, n=5, m=16, t=76):
"""
异同离差乖离率 dbcd(5,16,76)
BIAS=(C-MA(C,N))/MA(C,N)
DIF=(BIAS-REF(BIAS,M))
DBCD=SMA(DIF,T,1) =(1-1/T)×SMA(REF(DIF,1),T,1)+ 1/T×DIF
MM=MA(DBCD,5)
"""
_dbcd = pd.DataFrame()
_dbcd['date'] = df.date
man = _ma(df.close, n)
_bias = (df.close - man) / man
_dif = _bias - _bias.shift(m)
_dbcd['dbcd'] = sma(_dif, t)
_dbcd['mm'] = _ma(_dbcd.dbcd, n)
return _dbcd
def roc(df, n=12, m=6):
"""
变动速率 roc(12,6)
ROC=(今日收盘价-N日前的收盘价)/ N日前的收盘价×100%
ROCMA=MA(ROC,M)
ROC:(CLOSE-REF(CLOSE,N))/REF(CLOSE,N)×100
ROCMA:MA(ROC,M)
"""
_roc = pd.DataFrame()
_roc['date'] = df['date']
_roc['roc'] = (df.close - df.close.shift(n))/df.close.shift(n) * 100
_roc['rocma'] = _ma(_roc.roc, m)
return _roc
def vroc(df, n=12):
"""
量变动速率
VROC=(当日成交量-N日前的成交量)/ N日前的成交量×100%
"""
_vroc = pd.DataFrame()
_vroc['date'] = df['date']
_vroc['vroc'] = (df.volume - df.volume.shift(n)) / df.volume.shift(n) * 100
return _vroc
def cr(df, n=26):
""" 能量指标
CR=∑(H-PM)/∑(PM-L)×100
PM:上一交易日中价((最高、最低、收盘价的均值)
H:当天最高价
L:当天最低价
"""
_cr = pd.DataFrame()
_cr['date'] = df.date
# pm = ((df['high'] + df['low'] + df['close']) / 3).shift(1)
pm = (df[['high', 'low', 'close']]).mean(axis=1).shift(1)
_cr['cr'] = (df.high - pm).rolling(n).sum()/(pm - df.low).rolling(n).sum() * 100
return _cr
def psy(df, n=12):
"""
心理指标 PSY(12)
PSY=N日内上涨天数/N×100
PSY:COUNT(CLOSE>REF(CLOSE,1),N)/N×100
MAPSY=PSY的M日简单移动平均
"""
_psy = pd.DataFrame()
_psy['date'] = df.date
p = df.close - df.close.shift()
p[p <= 0] = np.nan
_psy['psy'] = p.rolling(n).count() / n * 100
return _psy
def wad(df, n=30):
"""
威廉聚散指标 WAD(30)
TRL=昨日收盘价与今日最低价中价格最低者;TRH=昨日收盘价与今日最高价中价格最高者
如果今日的收盘价>昨日的收盘价,则今日的A/D=今日的收盘价-今日的TRL
如果今日的收盘价<昨日的收盘价,则今日的A/D=今日的收盘价-今日的TRH
如果今日的收盘价=昨日的收盘价,则今日的A/D=0
WAD=今日的A/D+昨日的WAD;MAWAD=WAD的M日简单移动平均
"""
def dmd(x):
if x.c > 0:
y = x.close - x.trl
elif x.c < 0:
y = x.close - x.trh
else:
y = 0
return y
_wad = pd.DataFrame()
_wad['date'] = df['date']
_ad = pd.DataFrame()
_ad['trl'] = np.minimum(df.low, df.close.shift(1))
_ad['trh'] = np.maximum(df.high, df.close.shift(1))
_ad['c'] = df.close - df.close.shift()
_ad['close'] = df.close
_ad['ad'] = _ad.apply(dmd, axis=1)
_wad['wad'] = _ad.ad.expanding(1).sum()
_wad['mawad'] = _ma(_wad.wad, n)
return _wad
def mfi(df, n=14):
"""
资金流向指标 mfi(14)
MF=TYP×成交量;TYP:当日中价((最高、最低、收盘价的均值)
如果当日TYP>昨日TYP,则将当日的MF值视为当日PMF值。而当日NMF值=0
如果当日TYP<=昨日TYP,则将当日的MF值视为当日NMF值。而当日PMF值=0
MR=∑PMF/∑NMF
MFI=100-(100÷(1+MR))
"""
_mfi = pd.DataFrame()
_mfi['date'] = df.date
_m = pd.DataFrame()
_m['typ'] = df[['high', 'low', 'close']].mean(axis=1)
_m['mf'] = _m.typ * df.volume
_m['typ_shift'] = _m.typ - _m.typ.shift(1)
_m['pmf'] = _m.apply(lambda x: x.mf if x.typ_shift > 0 else 0, axis=1)
_m['nmf'] = _m.apply(lambda x: x.mf if x.typ_shift <= 0 else 0, axis=1)
# _mfi['mfi'] = 100 - (100 / (1 + _m.pmf.rolling(n).sum() / _m.nmf.rolling(n).sum()))
_m['mr'] = _m.pmf.rolling(n).sum() / _m.nmf.rolling(n).sum()
_mfi['mfi'] = 100 * _m.mr / (1 + _m.mr) # 同花顺自己给出的公式和实际用的公式不一样,真操蛋,浪费两个小时时间
return _mfi
def pvt(df):
"""
pvt 量价趋势指标 pvt
如果设x=(今日收盘价—昨日收盘价)/昨日收盘价×当日成交量,
那么当日PVT指标值则为从第一个交易日起每日X值的累加。
"""
_pvt = pd.DataFrame()
_pvt['date'] = df.date
x = (df.close - df.close.shift(1)) / df.close.shift(1) * df.volume
_pvt['pvt'] = x.expanding(1).sum()
return _pvt
def wvad(df, n=24, m=6):
""" # 算法是对的,同花顺计算wvad用的n=6
威廉变异离散量 wvad(24,6)
WVAD=N1日的∑ {(当日收盘价-当日开盘价)/(当日最高价-当日最低价)×成交量}
MAWVAD=MA(WVAD,N2)
"""
_wvad = pd.DataFrame()
_wvad['date'] = df.date
# _wvad['wvad'] = (np.true_divide((df.close - df.open), (df.high - df.low)) * df.volume).rolling(n).sum()
_wvad['wvad'] = (np.true_divide((df.close - df.open), (df.high - df.low)) * df.volume).rolling(n).sum()
_wvad['mawvad'] = _ma(_wvad.wvad, m)
return _wvad
def cdp(df):
"""
逆势操作 cdp
CDP=(最高价+最低价+收盘价)/3 # 同花顺实际用的(H+L+2*c)/4
AH=CDP+(前日最高价-前日最低价)
NH=CDP×2-最低价
NL=CDP×2-最高价
AL=CDP-(前日最高价-前日最低价)
"""
_cdp = pd.DataFrame()
_cdp['date'] = df.date
# _cdp['cdp'] = (df.high + df.low + df.close * 2).shift(1) / 4
_cdp['cdp'] = df[['high', 'low', 'close', 'close']].shift().mean(axis=1)
_cdp['ah'] = _cdp.cdp + (df.high.shift(1) - df.low.shift())
_cdp['al'] = _cdp.cdp - (df.high.shift(1) - df.low.shift())
_cdp['nh'] = _cdp.cdp * 2 - df.low.shift(1)
_cdp['nl'] = _cdp.cdp * 2 - df.high.shift(1)
return _cdp
def env(df, n=14):
"""
ENV指标 ENV(14)
Upper=MA(CLOSE,N)×1.06
LOWER= MA(CLOSE,N)×0.94
"""
_env = pd.DataFrame()
_env['date'] = df.date
_env['up'] = df.close.rolling(n).mean() * 1.06
_env['low'] = df.close.rolling(n).mean() * 0.94
return _env
def mike(df, n=12):
"""
麦克指标 mike(12)
初始价(TYP)=(当日最高价+当日最低价+当日收盘价)/3
HV=N日内区间最高价
LV=N日内区间最低价
初级压力线(WR)=TYP×2-LV
中级压力线(MR)=TYP+HV-LV
强力压力线(SR)=2×HV-LV
初级支撑线(WS)=TYP×2-HV
中级支撑线(MS)=TYP-HV+LV
强力支撑线(SS)=2×LV-HV
"""
_mike = pd.DataFrame()
_mike['date'] = df.date
typ = df[['high', 'low', 'close']].mean(axis=1)
hv = df.high.rolling(n).max()
lv = df.low.rolling(n).min()
_mike['wr'] = typ * 2 - lv
_mike['mr'] = typ + hv - lv
_mike['sr'] = 2 * hv - lv
_mike['ws'] = typ * 2 - hv
_mike['ms'] = typ - hv + lv
_mike['ss'] = 2 * lv - hv
return _mike
def vma(df, n=5):
"""
量简单移动平均 VMA(5) VMA=MA(volume,N)
VOLUME表示成交量;N表示天数
"""
_vma = pd.DataFrame()
_vma['date'] = df.date
_vma['vma'] = _ma(df.volume, n)
return _vma
def vmacd(df, qn=12, sn=26, m=9):
"""
量指数平滑异同平均 vmacd(12,26,9)
今日EMA(N)=2/(N+1)×今日成交量+(N-1)/(N+1)×昨日EMA(N)
DIFF= EMA(N1)- EMA(N2)
DEA(DIF,M)= 2/(M+1)×DIF +[1-2/(M+1)]×DEA(REF(DIF,1),M)
MACD(BAR)=2×(DIF-DEA)
"""
_vmacd = pd.DataFrame()
_vmacd['date'] = df.date
_vmacd['diff'] = _ema(df.volume, qn) - _ema(df.volume, sn)
_vmacd['dea'] = _ema(_vmacd['diff'], m) # TODO: 不能用_vmacd.diff, 不知道为什么
_vmacd['macd'] = (_vmacd['diff'] - _vmacd['dea'])
return _vmacd
def vosc(df, n=12, m=26):
"""
成交量震荡 vosc(12,26)
VOSC=(MA(VOLUME,SHORT)- MA(VOLUME,LONG))/MA(VOLUME,SHORT)×100
"""
_c = pd.DataFrame()
_c['date'] = df['date']
_c['osc'] = (_ma(df.volume, n) - _ma(df.volume, m)) / _ma(df.volume, n) * 100
return _c
def tapi(df, n=6):
""" # TODO: 由于get_k_data返回数据中没有amount,可以用get_h_data中amount,算法是正确的
加权指数成交值 tapi(6)
TAPI=每日成交总值/当日加权指数=a/PI;A表示每日的成交金额,PI表示当天的股价指数即指收盘价
"""
_tapi = pd.DataFrame()
# _tapi['date'] = df.date
_tapi['tapi'] = df.amount / df.close
_tapi['matapi'] = _ma(_tapi.tapi, n)
return _tapi
def vstd(df, n=10):
"""
成交量标准差 vstd(10)
VSTD=STD(Volume,N)=[∑(Volume-MA(Volume,N))^2/N]^0.5
"""
_vstd = pd.DataFrame()
_vstd['date'] = df.date
_vstd['vstd'] = df.volume.rolling(n).std(ddof=1)
return _vstd
def adtm(df, n=23, m=8):
"""
动态买卖气指标 adtm(23,8)
如果开盘价≤昨日开盘价,DTM=0
如果开盘价>昨日开盘价,DTM=(最高价-开盘价)和(开盘价-昨日开盘价)的较大值
如果开盘价≥昨日开盘价,DBM=0
如果开盘价<昨日开盘价,DBM=(开盘价-最低价)
STM=DTM在N日内的和
SBM=DBM在N日内的和
如果STM > SBM,ADTM=(STM-SBM)/STM
如果STM < SBM , ADTM = (STM-SBM)/SBM
如果STM = SBM,ADTM=0
ADTMMA=MA(ADTM,M)
"""
_adtm = pd.DataFrame()
_adtm['date'] = df.date
_m = pd.DataFrame()
_m['cc'] = df.open - df.open.shift(1)
_m['ho'] = df.high - df.open
_m['ol'] = df.open - df.low
_m['dtm'] = _m.apply(lambda x: max(x.ho, x.cc) if x.cc > 0 else 0, axis=1)
_m['dbm'] = _m.apply(lambda x: x.ol if x.cc < 0 else 0, axis=1)
_m['stm'] = _m.dtm.rolling(n).sum()
_m['sbm'] = _m.dbm.rolling(n).sum()
_m['ss'] = _m.stm - _m.sbm
_adtm['adtm'] = _m.apply(lambda x: x.ss / x.stm if x.ss > 0 else (x.ss / x.sbm if x.ss < 0 else 0), axis=1)
_adtm['adtmma'] = _ma(_adtm.adtm, m)
return _adtm
def mi(df, n=12):
"""
动量指标 mi(12)
A=CLOSE-REF(CLOSE,N)
MI=SMA(A,N,1)
"""
_mi = pd.DataFrame()
_mi['date'] = df.date
_mi['mi'] = sma(df.close - df.close.shift(n), n)
return _mi
def micd(df, n=3, m=10, k=20):
"""
异同离差动力指数 micd(3,10,20)
MI=CLOSE-ref(CLOSE,1)AMI=SMA(MI,N1,1)
DIF=MA(ref(AMI,1),N2)-MA(ref(AMI,1),N3)
MICD=SMA(DIF,10,1)
"""
_micd = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
'''
Perform "eco-exceedance" analysis for functional flow data on the Merced River and produce figures
'''
def eco_endpoints(ffc_data, rh_data):
# define the eco endpoints. 5-95th of control. table of endpoints for each ffm
for model_index, model in enumerate(ffc_data):
model['ffc_metrics'] = model['ffc_metrics'].apply(pd.to_numeric, errors='coerce')
if model['gage_id'] == 'SACSMA_CTR_T0P0S0E0I0':
control = ffc_data[model_index]['ffc_metrics']
metrics = ffc_data[0]['ffc_metrics'].index
eco_5 = []
eco_95 = []
eco_min = []
eco_max = []
for metric in metrics:
eco_5.append(np.nanquantile(control.loc[metric], 0.05))
eco_95.append(np.nanquantile(control.loc[metric], 0.95))
eco_min.append(np.nanmin(control.loc[metric]))
eco_max.append(np.nanmax(control.loc[metric]))
endpoints = pd.DataFrame(data=[eco_5, eco_95, eco_min, eco_max, metrics], index = ['eco_5', 'eco_95', 'eco_min', 'eco_max', 'metrics'])
endpoints = endpoints.transpose()
endpoints = endpoints.set_index(keys='metrics')
# define hydrograph trace to overlay in plot
for model_index, model in enumerate(rh_data):
if model['name'] == 'SACSMA_CTR_T0P0S0E0I0':
model['data'] = model['data'].apply(pd.to_numeric, errors='coerce')
hydrograph_ctrl = model['data']['1979']
def eco_endpoints_plot(ffc_data, endpoints, hydrograph_ctrl):
fig, ax = plt.subplots()
tim_metric = 'FA_Tim' # FA_Tim, SP_Tim, Wet_Tim, DS_Tim
mag_metric = 'FA_Mag' # FA_Mag, SP_Mag, Wet_BFL_Mag_50, DS_Mag_50
param = 'Seasonal intensity'
season = 'Fall Pulse eco-exceedance' # Fall Pulse, Spring Recession, Wet Season, Dry Season eco-exceedance
for model in ffc_data:
plt_color = 'grey'
colors_dict_temp = {'1':'mistyrose', '2':'lightcoral', '3':'crimson', '4':'firebrick', '5':'darkred'}
colors_dict_precip = {'-30':'darkred', '-20':'crimson', '-10':'lightcoral', '10':'dodgerblue', '20':'blue', '30':'darkblue'}
colors_dict_int = {'1':'#D9FFBF', '2':'#85CC6F', '3':'#6AB155', '4':'green', '5':'darkgreen'}
# for key in enumerate(colors_dict_int):
# import pdb; pdb.set_trace()
if model['gage_id'].find('OAT') >= 0: # check if it is an OAT model
if model['gage_id'][10] == 'T':
plt_marker = 'o'
plt_color_key = model['gage_id'].split('_')[2][1]
plt_color = colors_dict_temp[plt_color_key]
plt_label = 'temperature'
elif model['gage_id'][10] == 'P':
plt_marker = '^'
plt_color_key = re.findall(r'P([0-9.-]*[0-9]+)', model['gage_id'])[0]
plt_color = colors_dict_precip[plt_color_key]
plt_label = 'precipitation volume'
elif model['gage_id'][10] == 'S':
plt_marker = 'p'
plt_color_key = re.findall(r'S([0-9.-]*[0-9]+)', model['gage_id'])[0]
plt_color = colors_dict_int[plt_color_key]
plt_label = 'seasonal variability'
elif model['gage_id'][10] == 'E':
plt_marker = 'X'
plt_color_key = re.findall(r'E([0-9.-]*[0-9]+)', model['gage_id'])[0]
plt_color = colors_dict_int[plt_color_key]
plt_label = 'event intensity'
elif model['gage_id'][10] == 'I':
plt_marker = 'd'
plt_color_key = re.findall(r'I([0-9.-]*[0-9]+)', model['gage_id'])[0]
plt_color = colors_dict_int[plt_color_key]
plt_label = 'interannual variability'
elif model['gage_id'].find('EXT') >= 0:
plt_marker = 'o'
plt_color = 'black'
plt_label = 'extreme end scenarios'
elif model['gage_id'].find('MID') >= 0:
plt_marker = 'o'
plt_color = 'grey'
plt_label = 'mid-range scenarios'
elif model['gage_id'].find('CTR') >= 0:
continue
# import pdb; pdb.set_trace()
x = model['ffc_metrics'].loc[tim_metric]
y = model['ffc_metrics'].loc[mag_metric]
# ax.scatter(x, y, color=plt_color, marker = plt_marker, alpha=0.3, label = plt_label)
if model['gage_id'] in ('SACSMA_OATT_T5P0S0E0I0', 'SACSMA_OATP_T0P30S0E0I0', 'SACSMA_OATS_T0P0S5E0I0', 'SACSMA_OATE_T0P0S0E5I0',\
'SACSMA_OATI_T0P0S0E0I5', 'SACSMA_EXT_T0P30S5E5I5', 'SACSMA_MID_T3.4P3.4I1.7'):
ax.scatter(x, y, color=plt_color, marker = plt_marker, alpha=0.5, label = plt_label)
else:
ax.scatter(x, y, color=plt_color, marker = plt_marker, alpha=0.5)
# add min/max endpoints
plt.vlines(endpoints['eco_5'][tim_metric], ymin=endpoints['eco_5'][mag_metric], ymax=endpoints['eco_95'][mag_metric], color='black')
plt.vlines(endpoints['eco_95'][tim_metric], ymin=endpoints['eco_5'][mag_metric], ymax=endpoints['eco_95'][mag_metric], color='black')
plt.hlines(endpoints['eco_5'][mag_metric], xmin=endpoints['eco_5'][tim_metric], xmax=endpoints['eco_95'][tim_metric], label='10-90% control', color='black')
plt.hlines(endpoints['eco_95'][mag_metric], xmin=endpoints['eco_5'][tim_metric], xmax=endpoints['eco_95'][tim_metric], color='black')
plt.vlines(endpoints['eco_min'][tim_metric], ymin=endpoints['eco_min'][mag_metric], ymax=endpoints['eco_max'][mag_metric], alpha=0.5, linestyles='dashed', color='black')
plt.vlines(endpoints['eco_max'][tim_metric], ymin=endpoints['eco_min'][mag_metric], ymax=endpoints['eco_max'][mag_metric], alpha=0.5, linestyles='dashed', color='black')
plt.hlines(endpoints['eco_min'][mag_metric], xmin=endpoints['eco_min'][tim_metric], xmax=endpoints['eco_max'][tim_metric], label='Full range control', alpha=0.5, linestyles='dashed', color='black')
plt.hlines(endpoints['eco_max'][mag_metric], xmin=endpoints['eco_min'][tim_metric], xmax=endpoints['eco_max'][tim_metric], alpha=0.5, linestyles='dashed', color='black')
# add hydrograph trace over top
plt.plot(hydrograph_ctrl, color='goldenrod', linewidth=2)
month_ticks = [0,32,60,91,121,152,182,213,244,274,305,335]
month_labels = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep']
plt.xticks(month_ticks, month_labels)
plt.yticks()
ax.set_ylabel('Flow (cfs)')
plt.title(season)
ax.legend(loc='upper right') # legend on for Fall Pulse
plt.yscale('symlog', linthreshy=10000) # use this for spring and fall plots
plt.ylim([-200, 120000]) # -200,120000 for fall pulse, -200,170000 for spring, -200, 15000 for wet, -200, 9000 for dry
plt.xlim([-10,380]) # -10,380 for fall/spring/wet/dry
plt.savefig('data_outputs/plots/eco_exceedance/fall_pulse.pdf', dpi=1200)
plt.show()
plots = eco_endpoints_plot(ffc_data, endpoints, hydrograph_ctrl)
# For each model, determine %exceedance over eco endpoints. (for each metric)
model_name = []
total_exceedance = []
annual_metrics = []
fall_pulse = []
wet_season = []
peak_flows = []
spring_recession = []
dry_season = []
metrics = metrics.drop(['Peak_5', 'Peak_10', 'Peak_Dur_2', 'Peak_Dur_5', 'Peak_Dur_10', 'Peak_Fre_2', 'Peak_Fre_5', 'Peak_Fre_10', 'Std', 'DS_No_Flow'])
for model_index, model in enumerate(ffc_data):
# enter model name into table
model_name.append(model['gage_id'])
# create a dict/table and fill with control-based eco limits for each metric - done! endpoints.
# create a dict/table and fill with calc eco exceedance for each metric of model
dict = {}
for metric in metrics:
count = 0
for val in model['ffc_metrics'].loc[metric]:
if val < endpoints['eco_min'][metric] or val > endpoints['eco_max'][metric]:
count += 1
dict[metric] = count/len(model['ffc_metrics'].loc[metric])
total_exceedance.append(sum(dict.values()) / len(dict) * 100)
annual_metrics.append(sum([dict['Avg'], dict['CV']]) / 2 * 100)
fall_pulse.append(sum([dict['FA_Mag'], dict['FA_Dur'], dict['FA_Tim']]) / 3 * 100)
wet_season.append(sum([dict['Wet_BFL_Mag_10'], dict['Wet_BFL_Mag_50'], dict['Wet_Tim'], dict['Wet_BFL_Dur']]) / 4 * 100)
peak_flows.append(dict['Peak_2'] * 100)
spring_recession.append(sum([dict['SP_Mag'], dict['SP_Tim'], dict['SP_Dur'], dict['SP_ROC']]) / 4 * 100)
dry_season.append(sum([dict['DS_Mag_50'], dict['DS_Mag_90'], dict['DS_Tim'], dict['DS_Dur_WS']]) / 4 * 100)
data = {'model_name':model_name, 'total_exceedance':total_exceedance, 'annual_metrics':annual_metrics, 'fall_pulse':fall_pulse, \
'wet_season':wet_season, 'peak_flows':peak_flows, 'spring_recession':spring_recession, 'dry_season':dry_season}
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Prep sam compare data for Bayesian Machine
Logic:
* group df by sample,
* only keep samples with more than 2 reps
* summarize columns
* filtering
* calculate:
* total_reads_counted
* both total
* g1_total
* g2 total
* ase total
* for each rep, if APN > input then
* flag_APN = 1 (flag_APN = 0 if APN < input, flag_APN = -1 if APN < 0)
* if flag_APN = 1 for at least 1 of the reps the
* flag_analyze = 1
* merge reps together
"""
import argparse
import os
import pandas as pd
import numpy as np
from functools import reduce
DEBUG = False
def getOptions():
parser = argparse.ArgumentParser(description="Return best row in blast scores file")
parser.add_argument(
"-o",
"--output",
dest="output",
action="store",
required=True,
help="Output directory for filtered ase counts",
)
parser.add_argument(
"-collection_identifiers",
"--collection_identifiers",
dest="collection_identifiers",
action="store",
required=True,
help="Input original names [Required]",
)
parser.add_argument(
"-collection_filenames",
"--collection_filenames",
dest="collection_filenames",
action="store",
required=True,
help="Input galaxy names [Required]",
)
parser.add_argument(
"-d",
"--design",
dest="design",
action="store",
required=True,
help="Design file",
)
parser.add_argument(
"-p1",
"--parent1",
dest="parent1",
action="store",
required=True,
help="Column containing parent 1 genome, G1",
)
parser.add_argument(
"-p2",
"--parent2",
dest="parent2",
action="store",
required=True,
help="Column containing parent 2 genome, G2",
)
parser.add_argument(
"-s",
"--sampleCol",
dest="sampleCol",
action="store",
required=True,
help="Column containing sample names, no rep info",
)
parser.add_argument(
"-id",
"--sampleIDCol",
dest="sampleIDCol",
action="store",
required=True,
help="Column containing sampleID names, has rep info",
)
parser.add_argument(
"-a",
"--apn",
dest="apn",
action="store",
required=True,
type=int,
help="APN (average per nucleotide) value for flagging a feature as found and analyzable",
)
parser.add_argument(
"--debug", action="store_true", default=False, help="Print debugging output"
)
args = parser.parse_args()
return args
def main():
"""Main Function"""
args = getOptions()
global DEBUG
if args.debug:
DEBUG = True
identifiers = [i.strip() for i in args.collection_identifiers.split(",")]
filenames = [i.strip() for i in args.collection_filenames.split(",")]
input_dict = dict(zip(identifiers, filenames))
# Read in design file as dataframe (as a TSV file)
df_design = | pd.read_table(args.design, header=0) | pandas.read_table |
"""
This modules contains utility functions for data manipulation and plotting of
results and data
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
import torch
#######################################################
# Data Utilities
#######################################################
def load_trained_model(previous_model, model, optimizer):
checkpoint = torch.load(previous_model)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
model.eval()
return model, optimizer
def save_trained_model(save_path, epoch, model, optimizer, train_loss, test_loss):
save_dict = {
'epoch': epoch,
'state_dict': model.state_dict(),
# 'train_losses': train_loss
# 'test_losses': [pce_test_loss, voc_test_loss,
# jsc_test_loss, ff_test_loss]
'optimizer': optimizer.state_dict()
}
torch.save(save_dict, save_path)
return
def df_MinMax_normalize(dataframe):
df = dataframe
normed_df = | pd.DataFrame() | pandas.DataFrame |
import collections
import json
import os
from datetime import time
import random
from tqdm import tqdm
from main import cvtCsvDataframe
import pickle
import numpy as np
import pandas as pd
import random
import networkx as nx
import time
from main import FPGrowth
from shopping import Shopping, Cell
import main
# QoL for display
pd.set_option('display.max_columns', 30)
def encodeData():
df = pd.read_csv('products.txt', delimiter="\t")
dataHere = df['Nome'].str.strip()
indexes = [x for x in range(0,len(dataHere))]
df['ID'] = indexes
#print(data.to_numpy())
return df
products = encodeData()
'''
It is suppose to simulate N amount of shopping trips given test wishlists and staminas.
1 - Create a shopping with the given configuration
2 - Generate N random wishlists and their stamina
3 - Simulate each one and save the results
4 - Analyse the supermarket profit
'''
class SoS:
def __init__(self, configuration, staminaDistr,explanations):
self.shoppingClass = Shopping([23,21],configuration)
#self.shoppingClass.changeShoppingConfig(configuration)
self.shopping = self.shoppingClass.shopping
self.staminaDistr = staminaDistr
self.explanations = explanations
self.auxNeighbors = self.getAuxNeighbors()
self.auxNeighborsPrimary = self.getAuxNeighborsPrimary()
data, explanations = cvtCsvDataframe(pd.read_csv("data.csv"), pd.read_csv("explanations.csv"))
mergedReceiptExplanations = pd.merge(data, explanations, on='receiptID', how='outer')
self.boughtAndWishlist = mergedReceiptExplanations[['PRODUCTS', 'WISHLIST']].to_numpy()
def generateCustomers(self, samples):
'''
:return: Returns a sample of random customers with stamina and wishlist
'''
customers = []
wishlists = list(self.explanations['WISHLIST'].to_numpy())
randomWishlists = random.sample(wishlists,samples)
staminas = self.staminaDistr.sample(samples)
for i, j in zip(randomWishlists,staminas):
customers.append((i,int(j)))
return customers
def findNeighbors(self, currentCell, typeSearch):
'''
:param currentCell: Current cell to search
:param typeSearch: Type of search 1 - Halls 2- Shelves
:return: Return the neighbors
'''
neighbors = []
try:
#If there are neighbors in the top
if currentCell[0] > 0:
#Get the top neighbor
neighbors.append(self.shopping[currentCell[0] - 1][currentCell[1]].id)
#If there are neighbors on the left
if currentCell[1] > 0:
neighbors.append(self.shopping[currentCell[0]][currentCell[1] - 1].id)
#If there are neighbors on the right
if currentCell[1] < self.shopping.shape[1]:
neighbors.append(self.shopping[currentCell[0]][currentCell[1] + 1].id)
#If there are neighbors on the bottom
if currentCell[0] < self.shopping.shape[0]:
neighbors.append(self.shopping[currentCell[0] + 1][currentCell[1]].id)
except:
pass
aux = []
if typeSearch == 1:
notToAdd = [1,461,483,23]
for i in neighbors:
if i not in self.shoppingClass.config and i not in notToAdd:
aux.append(i)
else:
notToAdd = [1, 461, 483, 23]
for i in neighbors:
if i in self.shoppingClass.config and i not in notToAdd:
aux.append(i)
return aux
def findClosestProduct(self, item):
'''
:param item: Receives an item to search for
:return: Returns the closest product path there is
'''
size = self.shopping.shape
allPathsToItem = []
for j in range(size[1]):
for i in range(size[0]):
if self.shopping[i][j].product == item:
pathsToThisCell = self.auxNeighborsPrimary[f"[{i},{j}]"]
for s in pathsToThisCell: allPathsToItem.append(s)
pathsLenght = []
paths = []
for possiblePath in allPathsToItem:
paths.append(nx.dijkstra_path(self.shoppingClass.graphShopping, self.shoppingClass.entrance, possiblePath))
pathsLenght.append(len(nx.dijkstra_path(self.shoppingClass.graphShopping, self.shoppingClass.entrance, possiblePath)))
#Return the minimium path
return paths[np.argmin(pathsLenght)]
def getAuxNeighborsPrimary(self):
aux = {}
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
aux[f"[{i},{j}]"] = self.findNeighbors([i, j], 1)
return aux
def getAuxNeighbors(self):
aux = {}
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
aux[f"[{i},{j}]"] = self.findNeighbors([i, j], 2)
return aux
def getCellProducts(self, cell):
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
if self.shopping[i][j].id == cell:
cells = self.auxNeighbors[f"[{i},{j}]"]
products = []
for c in cells:
products.append(self.shoppingClass.productsAux[c])
return products
def getProbabilityOfPicking(self, product):
#Check if the file already exists
if os.path.exists("probabilityBuy.p"): probToBuy = pickle.load(open("probabilityBuy.p","rb"))
#Otherwise write it
else:
# organize_data()
# Read the csv file and convert it to a well formatted dataframe
aux = {}
#For each receipt
for p in tqdm(self.boughtAndWishlist):
#go through the products bought
for i in p[0]:
if i not in list(aux.keys()):
aux[i] = {'NotIn': 0, 'Counter':0}
#Increase counter
aux[i]['Counter'] += 1
#If the product bought is not in the wishlist
if i not in p[1]:
#Increase counter of times that the product was bought and was not in the wishlist
aux[i]['NotIn'] += 1
probToBuy = {}
for k in aux:
probToBuy[k] = aux[k]['NotIn'] / aux[k]['Counter']
pickle.dump(probToBuy,open("probabilityBuy.p","wb"))
#Reutrn the respective probability
return probToBuy[product]
def simulateCustomers(self,customers):
'''
:param customers: Receives a list of customers
:return: Returns the simulation results
'''
sales = []
#For each customer
for customer in tqdm(customers):
currentWishlist = customer[0]
currentWishlist.reverse()
currentStamina = customer[1]
productsBought = []
#print(f"Customer wishlist: {currentWishlist}")
#While the customer still has products the wants and still has stamina keep the simulation
while len(currentWishlist) > 0 and currentStamina > 0:
item = currentWishlist[0]
#print(f"Looking for {products.loc[products['ID'] == item, 'Nome'].iloc[0]}")
closest = self.findClosestProduct(item)
#print(f"Found {products.loc[products['ID'] == item, 'Nome'].iloc[0]} on cell {closest[-1]}")
for cell in range(len(closest)):
#print(f"I am on cell {closest[cell]}")
prodcutsCloseToCell = self.getCellProducts(closest[cell])
for prod in prodcutsCloseToCell:
#If the product is in the wishlist then buy it
if prod in currentWishlist:
#print(f"Found {products.loc[products['ID'] == prod, 'Nome'].iloc[0]} which was in my wishlist, so I bought it.")
#Remove it from the wishlist
currentWishlist.remove(prod)
productsBought.append(prod)
#Otherwise calculate the probability of buying it
else:
#Probability of this product being picked without being in the wishlist
prob = self.getProbabilityOfPicking(prod)
#Random probability
randomProb = random.uniform(0,1)
#If it is bought
if randomProb <= prob:
productsBought.append(prod)
#print(f"Felt like buying {products.loc[products['ID'] == prod, 'Nome'].iloc[0]}, so I bought it.")
currentStamina -= 1
#print(f"Current stamina : {currentStamina}")
#Scenarios that the person leaves the shopping
if currentStamina <= 0:
#print("I got tired!")
break
elif len(currentWishlist) <= 0:
#print("Bought everything!")
break
sales.append(productsBought)
return sales
def evaluateShoppingCost(self, sales):
'''
:param sales: Receives a list of sales from customers
:return: Return the calcualte profit for those sales
'''
totalProfit = 0
for sale in tqdm(sales):
for product in sale:
totalProfit += (products.loc[products['ID'] == product, 'Preço'].iloc[0] / products.loc[products['ID'] == product, '<NAME>'].iloc[0])
return totalProfit
def generateSimulator(config):
#QoL for display
pd.set_option('display.max_columns', 30)
data, explanations = main.cvtCsvDataframe(pd.read_csv("data.csv"), | pd.read_csv("explanations.csv") | pandas.read_csv |
from numpy.linalg.linalg import eig
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
engdata = pd.read_csv("./engdata.txt")
pdata = engdata.loc[:, ["Age", "Salary"]]
pdata = pdata.drop_duplicates()
scaler = StandardScaler()
scaler = scaler.fit(pdata)
transformed = pd.DataFrame(scaler.transform(pdata), columns=["Age", "Salary"])
plt.scatter(pdata.Age, pdata.Salary)
plt.xlabel("Age")
plt.ylabel("Salary")
plt.show()
plt.scatter(transformed.Age, transformed.Salary)
plt.xlabel("Age")
plt.ylabel("Salary")
plt.show()
data_sample = pdata.sample(n=150, random_state=1, replace=True)
plt.scatter(pdata.Age, pdata.Salary)
plt.xlabel("Age")
plt.ylabel("Salary")
plt.show()
plt.scatter(data_sample.Age, data_sample.Salary)
plt.xlabel("Age")
plt.ylabel("Salary")
plt.show()
discAge = | pd.cut(pdata.Age, [0, 10, 20, 30, 40, 50, 60, 70, 80]) | pandas.cut |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
assert len(result) == 0
tm.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
asdict = {x: y for x, y in compat.iteritems(df)}
asdict2 = {x: y.values for x, y in compat.iteritems(df)}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
pytest.raises(ValueError, DataFrame.from_records, df, index=[2])
pytest.raises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
expected = Index(['bar'])
assert len(result) == 0
assert result.index.name == 'foo'
tm.assert_index_equal(result.columns, expected)
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0).to_frame().dtypes
expected = Series({0: np.float64})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = DataFrame({'A': [0, 1, 2, 3, 4]}, dtype=dtype or 'int64')
result = DataFrame({'A': range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
class TestDataFrameConstructorWithDatetimeTZ(TestData):
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
# construction
df = DataFrame({'A': idx, 'B': dr})
assert df['A'].dtype, 'M8[ns, US/Eastern'
assert df['A'].name == 'A'
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
def test_from_index(self):
# from index
idx2 = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2['foo'], | Series(idx2, name='foo') | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
| tm.assert_frame_equal(result4, expected4) | pandas._testing.assert_frame_equal |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = | DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}) | pandas.DataFrame |
from sqlalchemy import create_engine
import pandas as pd
import os
csv_data = pd.read_csv('./assets/ks-projects-201801.csv')
df = | pd.DataFrame(csv_data) | pandas.DataFrame |
import pytest, pandas
from os import remove
from datetime import date
from patentpy.utility import get_date_tues
from patentpy.convert_txt import convert_txt_to_df
from patentpy.acquire import get_bulk_patent_data
### TEST_GET_BULK_PATENT_DATA ###
# test generic; should return true and create/append to csv file.
def test_get_bulk_patent_data():
# run func x2 and see if csv formatted version == df version
df = get_bulk_patent_data([i for i in range (2001, 2006, 2)], [1 for i in range(0, 3)])
get_bulk_patent_data([i for i in range (2001, 2006, 2)], [1 for i in range(0, 3)], "test.csv")
df_from_csv = | pandas.read_csv("test.csv") | pandas.read_csv |
import baostock as bs
import pandas as pd
import datetime
import time
from sqlalchemy import create_engine
def download_data(date):
# 获取指定日期的指数、股票数据
stock_rs = bs.query_all_stock(date)
stock_df = stock_rs.get_data()
data_df = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
| tm.assert_frame_equal(ri, ei) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
| assert_panel_equal(unshifted, ps) | pandas.util.testing.assert_panel_equal |
# -*- coding: utf-8 -*-
"""
Create the economic tables required to run the MRIA model.
"""
import numpy as np
import pandas as pd
class io_basic(object):
"""
This is the class object **io_basic** which is used to set up the table.
"""
def __init__(self, name, filepath, list_regions):
"""
Creation of a the object instance, specify the file path and sectors to include.
Parameters
- *self* - **io_basic** class object
- name - string name for the **io_basic** class
- filepath - string path name to location of IO table
- list_regions - list of regions to include
Output
- *self*.name - string name of the model in the **io_basic** class
- *self*.file - filepath in the **MRIA_IO** class
- *self*.regions - list of regions in the **MRIA_IO** class
- *self*.total_regions - Integer of total amount of regions in the **io_basic** class
"""
self.name = name
self.file = filepath
self.regions = list_regions
self.total_regions = len(list_regions)
def load_labels(self):
"""
Load all labels for the **io_basic** class.
Parameters
- *self* - **io_basic** class object
Output
- *self*.FD_labels - labels for Final Demand columns in the **io_basic** class
- *self*.FD_cat - labels for Final Demand categories in the **io_basic** class
- *self*.Exp_labels - labels for Export columns in the **io_basic** class
- *self*.T_labels - region and sector labels for Z-matrix in the **io_basic** class
- *self*.VA_labels - labels for Value Added in the **io_basic** class
- *self*.sectors - labels for the sectors in the **io_basic** class
"""
if 'xls' in self.file:
FD_labels = pd.read_excel(self.file, sheet_name="labels_FD",
names=['reg', 'tfd'], header=None)
Exp_labels = pd.read_excel(self.file, sheet_name="labels_ExpROW", names=[
'export'], header=None)
T_labels = pd.read_excel(self.file, sheet_name="labels_T",
header=None, names=['reg', 'ind'])
VA_labels = pd.read_excel(self.file, sheet_name="labels_VA", names=[
'Import', 'ValueA'], header=None)
if len(self.regions) == 0:
self.regions = list(T_labels['reg'].unique())
self.total_regions = len(self.regions)
self.FD_labels = FD_labels
self.FD_cat = list(self.FD_labels['tfd'].unique())
self.Exp_labels = Exp_labels
self.T_labels = T_labels
self.VA_labels = VA_labels
self.sectors = list(T_labels['ind'].unique())
def load_all_data(self):
"""
Load all data for the **io_basic** class.
Parameters
- *self* - **io_basic** class object
Output
- *self*.FD_data - pandas Dataframe of Final Demand in the **io_basic** class
- *self*.T_data - pandas Dataframe of Z matrix in the **io_basic** class
- *self*.VA_data - pandas Dataframe of Value Added in the **io_basic** class
- *self*.ImpROW_data - pandas Dataframe of import from the Rest of the World in the **io_basic** class
- *self*.ExpROW_data - pandas Dataframe of exports to the Rest of The World in the **io_basic** class
"""
try:
self.FD_labels is None
except:
self.load_labels()
#LOAD DATA
FD_data = | pd.read_excel(self.file, sheet_name="FD", header=None) | pandas.read_excel |
import pandas as pd
import numpy as np
import pytest
from .conftest import DATA_DIR, assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature, tools
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_cell_from_module(sapm_default):
default = temperature.sapm_cell_from_module(50, 900,
sapm_default['deltaT'])
assert_allclose(default, 50 + 900 / 1000 * sapm_default['deltaT'])
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = | pd.Series([0., 23.06066166, 5.], index=times) | pandas.Series |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
| tm.assert_frame_equal(ri, ei) | pandas.util.testing.assert_frame_equal |
import os
import sys
import glob
import numpy as np
import pandas as pd
from cooler import Cooler
import matplotlib
import matplotlib.pyplot as plt
import h5py
import seaborn as sns
import shelve
from collections import defaultdict, Iterable
# cooler_path = '/net/levsha/share/lab/dekkerU54/new_files/'
# cooler_paths = ['/net/levsha/share/lab/U54/2019_mapping_hg38/U54_matrix/cooler_library/',
# '/net/levsha/share/lab/U54/2019_mapping_hg38/U54_deep/cooler_library_group/']
# dot_paths = ['/net/levsha/share/lab/U54/2019_mapping_hg38/U54_matrix/snakedots/',
# '/net/levsha/share/lab/U54/2019_mapping_hg38/U54_deep/snakedots/']
# analysis_path = '/net/levsha/share/sameer/U54/hic_matrix/'
# db_path = '/net/levsha/share/sameer/U54/hic_matrix/metadata/U54_matrix_info'
hela_chroms = ['chr4', 'chr14', 'chr17', 'chr18', 'chr20', 'chr21']
class Database:
def __init__(self, db_path):
self.db_path = db_path
if os.path.exists(f'{db_path}.dat'):
with shelve.open(db_path, flag='r') as db:
self.metadata = db['metadata']
self.cooler_paths = db['cooler_paths']
self.analysis_path = db['analysis_path']
self.dot_paths = db['dot_paths']
keys = list(db.keys())
keys.remove('metadata')
keys.remove('cooler_paths')
keys.remove('analysis_path')
keys.remove('dot_paths')
self.keys = keys
else:
self.metadata = None
self.keys = []
self.cooler_paths = ''
self.analysis_path = ''
self.dot_paths = ''
def create_dataset(self, table, cool_paths, analysis_path, dot_paths):
if os.path.exists(f'{self.db_path}.dat'):
print(f'Database already exists at {self.db_path}')
raise FileExistsError
else:
assert np.all([s in table.columns for s in ['lib_name', 'celltype', 'xlink', 'enzyme', 'cycle', 'seq']]), 'table does not contain the required columns'
with shelve.open(self.db_path, flag='n') as db:
db['cooler_paths'] = cool_paths
self.cooler_paths = cool_paths
db['analysis_path'] = analysis_path
self.analysis_path = analysis_path
db['dot_paths'] = dot_paths
self.dot_paths = dot_paths
db['metadata'] = table
self.metadata = table
def get_tables(self, keys=None):
if self.metadata is None:
print('Database not initialized')
raise
else:
result = self.metadata
if keys is None:
return result
elif isinstance(keys, str):
keys = [keys]
with shelve.open(self.db_path, flag='r') as db:
for key in keys:
assert key in self.keys, "Key not found in database"
df = db[key]
result = result.merge(df, on='lib_name', how='outer')
return result
def add_table(self, key, table):
assert 'lib_name' in table.columns, "Please pass table with lib_names columns in it"
table_lib_names = table['lib_name'].values
with shelve.open(self.db_path, flag='w') as db:
assert key not in self.keys, "Key already exists. If you wish to modify this, please use modify_table() method"
meta_lib_names = db['metadata']['lib_name'].values
assert np.all(meta_lib_names == table_lib_names), 'List of libraries does not match those in metadata'
db[key] = table
self.keys.append(key)
def remove_table(self, key):
bad_keys = ['metadata', 'cooler_paths', 'analysis_path', 'dot_paths']
assert key not in bad_keys, f"The following keys should not be deleted: {bad_keys}"
with shelve.open(self.db_path, flag='w') as db:
assert key in self.keys, "Key not found in database"
del db[key]
self.keys.remove(key)
def modify_table(self, key, new_table):
assert 'lib_name' in new_table.columns, "Please pass table with lib_names columns in it"
table_lib_names = new_table['lib_name'].values
meta_lib_names = self.metadata['lib_name'].values
assert np.all(meta_lib_names == table_lib_names), 'List of libraries does not match those in metadata'
with shelve.open(self.db_path, flag='w') as db:
assert key in self.keys, "Key not found in database. If you want to add a table, please use add_table() method"
del db[key]
db[key] = new_table
def get_coolers(self, table, res=1000000):
names = table['lib_name'].values
cool_dict = defaultdict(list)
for name in names:
if name not in self.metadata['lib_name'].values:
print(f'Name: {name} not found in metadata. Skipping')
continue
cool_dict['lib_name'].append(name)
flag = True
for cpath in self.cooler_paths:
if f'{name}.hg38.mapq_30.1000.mcool' in os.listdir(cpath):
flag = False
cool = Cooler(cpath+f'{name}.hg38.mapq_30.1000.mcool::/resolutions/{res}')
cool_dict[f'cooler_{res}'].append(cool)
if flag:
print(f'Cooler not found matching {name}. Appending np.nan to appropriate row')
cool_dict[f'cooler_{res}'].append(np.nan)
df = pd.DataFrame(cool_dict)
df = table.copy(deep=True).merge(df, on='lib_name', how='outer')
return df
def get_eigendecomps(self, table, res=1000000, subdir='eigdecomp/'):
comp_path = self.analysis_path+f'{subdir}/{res}/'
if not os.path.isdir(comp_path):
print(f'{comp_path} is not a valid directory')
raise
names = table['lib_name'].values
keys = ['lams', 'vectors']
comp_dict = defaultdict(list)
for name in names:
if name not in self.metadata['lib_name'].values:
print(f'Name: {name} not found in metadata. Skipping')
continue
comp_dict['lib_name'].append(name)
for k in keys:
if f'{name}.hdf5' in os.listdir(comp_path):
comp_dict[f'{k}_{res}'].append(
pd.read_hdf(comp_path+f'{name}.hdf5', key=k))
else:
comp_dict[f'{k}_{res}'].append(np.nan)
df = pd.DataFrame(comp_dict)
df = table.copy(deep=True).merge(df, on='lib_name', how='outer')
return df
def get_scalings(self, table, subdir='scalings/global/', trans=False):
scale_path = self.analysis_path+subdir
if not os.path.isdir(scale_path):
print(f'{scale_path} is not a valid directory')
raise
names = data['lib_name'].values
if trans:
keys = ['scaling','trans_lvl']
else:
keys = ['scaling']
scale_dict = defaultdict(list)
for name in names:
if name not in self.metadata['lib_name'].values:
print(f'Name: {name} not found in metadata. Skipping')
continue
scale_dict['lib_name'].append(name)
if f'{name}.hdf5' in os.listdir(scale_path):
scale_dict['scaling'].append(Scaling(scale_path+f'{name}.hdf5', keys))
else:
scale_dict['scaling'].append(np.nan)
df = pd.DataFrame(scale_dict)
df = data.copy(deep=True).merge(df, on='lib_name', how='outer')
return df
def get_pileups(self, table, subdir='pileup/dots/5000/', col_name='dots'):
pileup_path = self.analysis_path+subdir
if not os.path.isdir(pileup_path):
print(f'{pileup_path} is not a valid directory')
raise
names = table['lib_name'].values
pileup_dict = defaultdict(list)
for name in names:
if name not in self.metadata['lib_name'].values:
print(f'Name: {name} not found in metadata. Skipping')
continue
pileup_dict['lib_name'].append(name)
pileup_dict[col_name].append(Pileup(pileup_path, f'{name}.npy'))
df = | pd.DataFrame(pileup_dict) | pandas.DataFrame |
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Tampilkan tipe data
print("Tipe data df:\n", df.dtypes)
# Ubah tipe data kolom quantity menjadi tipe data numerik float
df["quantity"] = | pd.to_numeric(df["quantity"], downcast="float") | pandas.to_numeric |
import pandas as pd
import math
def combination_generator():
dfA = pd.read_csv('A_feature.csv', header=0)
dfB = pd.read_csv('B_feature.csv', header=0)
dfX = | pd.read_csv('X_feature.csv', header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Small analysis of Estonian kennelshows using Bernese mountain dogs data from kennelliit.ee and CatBoost algorithm
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from catboost import CatBoostRegressor, CatBoostClassifier, Pool, cv
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import metrics
import seaborn as sns
# load and join all data to a single frame
df_2019 = pd.read_csv('dogshows_bernese_est_2019.csv')
df_2018 = pd.read_csv('dogshows_bernese_est_2018.csv')
df_2017 = pd.read_csv('dogshows_bernese_est_2017.csv')
df_2016 = pd.read_csv('dogshows_bernese_est_2016.csv')
df_2015 = pd.read_csv('dogshows_bernese_est_2015.csv')
df_2014 = pd.read_csv('dogshows_bernese_est_2014.csv')
df_2013 = pd.read_csv('dogshows_bernese_est_2013.csv')
frames = [df_2019, df_2018, df_2017, df_2016, df_2015, df_2014, df_2013]
df = | pd.concat(frames, join='inner') | pandas.concat |
import numpy as np
import pandas as pd
import os, errno
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
from scipy.spatial.distance import squareform
from sklearn.decomposition.nmf import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
def save_df_to_npz(obj, filename):
np.savez_compressed(filename, data=obj.values, index=obj.index.values, columns=obj.columns.values)
def save_df_to_text(obj, filename):
obj.to_csv(filename, sep='\t')
def load_df_from_npz(filename):
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1,1))
D += squared_norms.reshape((1,-1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return(beta)
def fast_ols_all_cols_df(X,Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return(beta)
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean**2)
return(var)
def get_highvar_genes_sparse(expression, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy(); E2.data **= 2; gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean**2))
del(E2)
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var)/gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = ((gene_fano > w_fano_low) &
(gene_fano < w_fano_high) &
(gene_mean > w_mean_low) &
(gene_mean < w_mean_high))
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_mean + (B**2)
fano_ratio = (gene_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_mean,
'var': gene_var,
'fano': gene_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(input_counts, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = pd.Series(gene_counts_var/gene_counts_mean)
# Find parameters for expected fano line
top_genes = gene_counts_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_counts_var)/gene_counts_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_counts_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_counts_fano.quantile([0.10, 0.90])
winsor_box = ((gene_counts_fano > w_fano_low) &
(gene_counts_fano < w_fano_high) &
(gene_counts_mean > w_mean_low) &
(gene_counts_mean < w_mean_high))
fano_median = gene_counts_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_counts_mean + (B**2)
fano_ratio = (gene_counts_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_counts_mean,
'var': gene_counts_var,
'fano': gene_counts_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def compute_tpm(input_counts):
"""
Default TPM normalization
"""
tpm = input_counts.copy()
sc.pp.normalize_per_cell(tpm, counts_per_cell_after=1e6)
return(tpm)
class cNMF():
def __init__(self, output_dir=".", name=None):
"""
Parameters
----------
output_dir : path, optional (default=".")
Output directory for analysis files.
name : string, optional (default=None)
A name for this analysis. Will be prefixed to all output files.
If set to None, will be automatically generated from date (and random string).
"""
self.output_dir = output_dir
if name is None:
now = datetime.datetime.now()
rand_hash = uuid.uuid4().hex[:6]
name = '%s_%s' % (now.strftime("%Y_%m_%d"), rand_hash)
self.name = name
self.paths = None
def _initialize_dirs(self):
if self.paths is None:
# Check that output directory exists, create it if needed.
check_dir_exists(self.output_dir)
check_dir_exists(os.path.join(self.output_dir, self.name))
check_dir_exists(os.path.join(self.output_dir, self.name, 'cnmf_tmp'))
self.paths = {
'normalized_counts' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.norm_counts.h5ad'),
'nmf_replicate_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_params.df.npz'),
'nmf_run_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_idvrun_params.yaml'),
'nmf_genes_list' : os.path.join(self.output_dir, self.name, self.name+'.overdispersed_genes.txt'),
'tpm' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm.h5ad'),
'tpm_stats' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm_stats.df.npz'),
'iter_spectra' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.iter_%d.df.npz'),
'iter_usages' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.usages.k_%d.iter_%d.df.npz'),
'merged_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.merged.df.npz'),
'local_density_cache': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.local_density_cache.k_%d.merged.df.npz'),
'consensus_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.dt_%s.consensus.df.npz'),
'consensus_spectra__txt': os.path.join(self.output_dir, self.name, self.name+'.spectra.k_%d.dt_%s.consensus.txt'),
'consensus_usages': os.path.join(self.output_dir, self.name, 'cnmf_tmp',self.name+'.usages.k_%d.dt_%s.consensus.df.npz'),
'consensus_usages__txt': os.path.join(self.output_dir, self.name, self.name+'.usages.k_%d.dt_%s.consensus.txt'),
'consensus_stats': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.stats.k_%d.dt_%s.df.npz'),
'clustering_plot': os.path.join(self.output_dir, self.name, self.name+'.clustering.k_%d.dt_%s.png'),
'gene_spectra_score': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_score.k_%d.dt_%s.df.npz'),
'gene_spectra_score__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_score.k_%d.dt_%s.txt'),
'gene_spectra_tpm': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_tpm.k_%d.dt_%s.df.npz'),
'gene_spectra_tpm__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_tpm.k_%d.dt_%s.txt'),
'k_selection_plot' : os.path.join(self.output_dir, self.name, self.name+'.k_selection.png'),
'k_selection_stats' : os.path.join(self.output_dir, self.name, self.name+'.k_selection_stats.df.npz'),
}
def get_norm_counts(self, counts, tpm,
high_variance_genes_filter = None,
num_highvar_genes = None
):
"""
Parameters
----------
counts : anndata.AnnData
Scanpy AnnData object (cells x genes) containing raw counts. Filtered such that
no genes or cells with 0 counts
tpm : anndata.AnnData
Scanpy AnnData object (cells x genes) containing tpm normalized data matching
counts
high_variance_genes_filter : np.array, optional (default=None)
A pre-specified list of genes considered to be high-variance.
Only these genes will be used during factorization of the counts matrix.
Must match the .var index of counts and tpm.
If set to None, high-variance genes will be automatically computed, using the
parameters below.
num_highvar_genes : int, optional (default=None)
Instead of providing an array of high-variance genes, identify this many most overdispersed genes
for filtering
Returns
-------
normcounts : anndata.AnnData, shape (cells, num_highvar_genes)
A counts matrix containing only the high variance genes and with columns (genes)normalized to unit
variance
"""
if high_variance_genes_filter is None:
## Get list of high-var genes if one wasn't provided
if sp.issparse(tpm.X):
(gene_counts_stats, gene_fano_params) = get_highvar_genes_sparse(tpm.X, numgenes=num_highvar_genes)
else:
(gene_counts_stats, gene_fano_params) = get_highvar_genes(np.array(tpm.X), numgenes=num_highvar_genes)
high_variance_genes_filter = list(tpm.var.index[gene_counts_stats.high_var.values])
## Subset out high-variance genes
norm_counts = counts[:, high_variance_genes_filter]
## Scale genes to unit variance
if sp.issparse(tpm.X):
sc.pp.scale(norm_counts, zero_center=False)
if np.isnan(norm_counts.X.data).sum() > 0:
print('Warning NaNs in normalized counts matrix')
else:
norm_counts.X /= norm_counts.X.std(axis=0, ddof=1)
if np.isnan(norm_counts.X).sum().sum() > 0:
print('Warning NaNs in normalized counts matrix')
## Save a \n-delimited list of the high-variance genes used for factorization
open(self.paths['nmf_genes_list'], 'w').write('\n'.join(high_variance_genes_filter))
## Check for any cells that have 0 counts of the overdispersed genes
zerocells = norm_counts.X.sum(axis=1)==0
if zerocells.sum()>0:
examples = norm_counts.obs.index[zerocells]
print('Warning: %d cells have zero counts of overdispersed genes. E.g. %s' % (zerocells.sum(), examples[0]))
print('Consensus step may not run when this is the case')
return(norm_counts)
def save_norm_counts(self, norm_counts):
self._initialize_dirs()
sc.write(self.paths['normalized_counts'], norm_counts)
def get_nmf_iter_params(self, ks, n_iter = 100,
random_state_seed = None,
beta_loss = 'kullback-leibler'):
"""
Create a DataFrame with parameters for NMF iterations.
Parameters
----------
ks : integer, or list-like.
Number of topics (components) for factorization.
Several values can be specified at the same time, which will be run independently.
n_iter : integer, optional (defailt=100)
Number of iterations for factorization. If several ``k`` are specified, this many
iterations will be run for each value of ``k``.
random_state_seed : int or None, optional (default=None)
Seed for sklearn random state.
"""
if type(ks) is int:
ks = [ks]
# Remove any repeated k values, and order.
k_list = sorted(set(list(ks)))
n_runs = len(ks)* n_iter
np.random.seed(seed=random_state_seed)
nmf_seeds = np.random.randint(low=1, high=(2**32)-1, size=n_runs)
replicate_params = []
for i, (k, r) in enumerate(itertools.product(k_list, range(n_iter))):
replicate_params.append([k, r, nmf_seeds[i]])
replicate_params = pd.DataFrame(replicate_params, columns = ['n_components', 'iter', 'nmf_seed'])
_nmf_kwargs = dict(
alpha=0.0,
l1_ratio=0.0,
beta_loss=beta_loss,
solver='mu',
tol=1e-4,
max_iter=400,
regularization=None,
init='random'
)
## Coordinate descent is faster than multiplicative update but only works for frobenius
if beta_loss == 'frobenius':
_nmf_kwargs['solver'] = 'cd'
return(replicate_params, _nmf_kwargs)
def save_nmf_iter_params(self, replicate_params, run_params):
self._initialize_dirs()
save_df_to_npz(replicate_params, self.paths['nmf_replicate_parameters'])
with open(self.paths['nmf_run_parameters'], 'w') as F:
yaml.dump(run_params, F)
def _nmf(self, X, nmf_kwargs):
"""
Parameters
----------
X : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
nmf_kwargs : dict,
Arguments to be passed to ``non_negative_factorization``
"""
(usages, spectra, niter) = non_negative_factorization(X, **nmf_kwargs)
return(spectra, usages)
def run_nmf(self,
worker_i=1, total_workers=1,
):
"""
Iteratively run NMF with prespecified parameters.
Use the `worker_i` and `total_workers` parameters for parallelization.
Generic kwargs for NMF are loaded from self.paths['nmf_run_parameters'], defaults below::
``non_negative_factorization`` default arguments:
alpha=0.0
l1_ratio=0.0
beta_loss='kullback-leibler'
solver='mu'
tol=1e-4,
max_iter=200
regularization=None
init='random'
random_state, n_components are both set by the prespecified self.paths['nmf_replicate_parameters'].
Parameters
----------
norm_counts : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
(Output of ``normalize_counts``)
run_params : pandas.DataFrame,
Parameters for NMF iterations.
(Output of ``prepare_nmf_iter_params``)
"""
self._initialize_dirs()
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
norm_counts = sc.read(self.paths['normalized_counts'])
_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
jobs_for_this_worker = worker_filter(range(len(run_params)), worker_i, total_workers)
for idx in jobs_for_this_worker:
p = run_params.iloc[idx, :]
print('[Worker %d]. Starting task %d.' % (worker_i, idx))
_nmf_kwargs['random_state'] = p['nmf_seed']
_nmf_kwargs['n_components'] = p['n_components']
(spectra, usages) = self._nmf(norm_counts.X, _nmf_kwargs)
spectra = pd.DataFrame(spectra,
index=np.arange(1, _nmf_kwargs['n_components']+1),
columns=norm_counts.var.index)
save_df_to_npz(spectra, self.paths['iter_spectra'] % (p['n_components'], p['iter']))
def combine_nmf(self, k, remove_individual_iterations=False):
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
print('Combining factorizations for k=%d.'%k)
self._initialize_dirs()
combined_spectra = None
n_iter = sum(run_params.n_components==k)
run_params_subset = run_params[run_params.n_components==k].sort_values('iter')
spectra_labels = []
for i,p in run_params_subset.iterrows():
spectra = load_df_from_npz(self.paths['iter_spectra'] % (p['n_components'], p['iter']))
if combined_spectra is None:
combined_spectra = np.zeros((n_iter, k, spectra.shape[1]))
combined_spectra[p['iter'], :, :] = spectra.values
for t in range(k):
spectra_labels.append('iter%d_topic%d'%(p['iter'], t+1))
combined_spectra = combined_spectra.reshape(-1, combined_spectra.shape[-1])
combined_spectra = pd.DataFrame(combined_spectra, columns=spectra.columns, index=spectra_labels)
save_df_to_npz(combined_spectra, self.paths['merged_spectra']%k)
return combined_spectra
def consensus(self, k, density_threshold_str='0.5', local_neighborhood_size = 0.30,show_clustering = False,
skip_density_and_return_after_stats = False, close_clustergram_fig=True):
merged_spectra = load_df_from_npz(self.paths['merged_spectra']%k)
norm_counts = sc.read(self.paths['normalized_counts'])
if skip_density_and_return_after_stats:
density_threshold_str = '2'
density_threshold_repl = density_threshold_str.replace('.', '_')
density_threshold = float(density_threshold_str)
n_neighbors = int(local_neighborhood_size * merged_spectra.shape[0]/k)
# Rescale topics such to length of 1.
l2_spectra = (merged_spectra.T/np.sqrt((merged_spectra**2).sum(axis=1))).T
if not skip_density_and_return_after_stats:
# Compute the local density matrix (if not previously cached)
topics_dist = None
if os.path.isfile(self.paths['local_density_cache'] % k):
local_density = load_df_from_npz(self.paths['local_density_cache'] % k)
else:
# first find the full distance matrix
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# partition based on the first n neighbors
partitioning_order = np.argpartition(topics_dist, n_neighbors+1)[:, :n_neighbors+1]
# find the mean over those n_neighbors (excluding self, which has a distance of 0)
distance_to_nearest_neighbors = topics_dist[np.arange(topics_dist.shape[0])[:, None], partitioning_order]
local_density = pd.DataFrame(distance_to_nearest_neighbors.sum(1)/(n_neighbors),
columns=['local_density'],
index=l2_spectra.index)
save_df_to_npz(local_density, self.paths['local_density_cache'] % k)
del(partitioning_order)
del(distance_to_nearest_neighbors)
density_filter = local_density.iloc[:, 0] < density_threshold
l2_spectra = l2_spectra.loc[density_filter, :]
kmeans_model = KMeans(n_clusters=k, n_init=10, random_state=1)
kmeans_model.fit(l2_spectra)
kmeans_cluster_labels = pd.Series(kmeans_model.labels_+1, index=l2_spectra.index)
# Find median usage for each gene across cluster
median_spectra = l2_spectra.groupby(kmeans_cluster_labels).median()
# Normalize median spectra to probability distributions.
median_spectra = (median_spectra.T/median_spectra.sum(1)).T
# Compute the silhouette score
stability = silhouette_score(l2_spectra.values, kmeans_cluster_labels, metric='euclidean')
# Obtain the reconstructed count matrix by re-fitting the usage matrix and computing the dot product: usage.dot(spectra)
refit_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
refit_nmf_kwargs.update(dict(
n_components = k,
H = median_spectra.values,
update_H = False
))
_, rf_usages = self._nmf(norm_counts.X,
nmf_kwargs=refit_nmf_kwargs)
rf_usages = pd.DataFrame(rf_usages, index=norm_counts.obs.index, columns=median_spectra.index)
rf_pred_norm_counts = rf_usages.dot(median_spectra)
# Compute prediction error as a frobenius norm
if sp.issparse(norm_counts.X):
prediction_error = ((norm_counts.X.todense() - rf_pred_norm_counts)**2).sum().sum()
else:
prediction_error = ((norm_counts.X - rf_pred_norm_counts)**2).sum().sum()
consensus_stats = pd.DataFrame([k, density_threshold, stability, prediction_error],
index = ['k', 'local_density_threshold', 'stability', 'prediction_error'],
columns = ['stats'])
if skip_density_and_return_after_stats:
return consensus_stats
save_df_to_npz(median_spectra, self.paths['consensus_spectra']%(k, density_threshold_repl))
save_df_to_npz(rf_usages, self.paths['consensus_usages']%(k, density_threshold_repl))
save_df_to_npz(consensus_stats, self.paths['consensus_stats']%(k, density_threshold_repl))
save_df_to_text(median_spectra, self.paths['consensus_spectra__txt']%(k, density_threshold_repl))
save_df_to_text(rf_usages, self.paths['consensus_usages__txt']%(k, density_threshold_repl))
# Compute gene-scores for each GEP by regressing usage on Z-scores of TPM
tpm = sc.read(self.paths['tpm'])
tpm_stats = load_df_from_npz(self.paths['tpm_stats'])
if sp.issparse(tpm.X):
norm_tpm = (np.array(tpm.X.todense()) - tpm_stats['__mean'].values) / tpm_stats['__std'].values
else:
norm_tpm = (tpm.X - tpm_stats['__mean'].values) / tpm_stats['__std'].values
usage_coef = fast_ols_all_cols(rf_usages.values, norm_tpm)
usage_coef = pd.DataFrame(usage_coef, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(usage_coef, self.paths['gene_spectra_score']%(k, density_threshold_repl))
save_df_to_text(usage_coef, self.paths['gene_spectra_score__txt']%(k, density_threshold_repl))
# Convert spectra to TPM units, and obtain results for all genes by running last step of NMF
# with usages fixed and TPM as the input matrix
norm_usages = rf_usages.div(rf_usages.sum(axis=1), axis=0)
refit_nmf_kwargs.update(dict(
H = norm_usages.T.values,
))
_, spectra_tpm = self._nmf(tpm.X.T, nmf_kwargs=refit_nmf_kwargs)
spectra_tpm = pd.DataFrame(spectra_tpm.T, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(spectra_tpm, self.paths['gene_spectra_tpm']%(k, density_threshold_repl))
save_df_to_text(spectra_tpm, self.paths['gene_spectra_tpm__txt']%(k, density_threshold_repl))
if show_clustering:
if topics_dist is None:
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# (l2_spectra was already filtered using the density filter)
else:
# (but the previously computed topics_dist was not!)
topics_dist = topics_dist[density_filter.values, :][:, density_filter.values]
spectra_order = []
for cl in sorted(set(kmeans_cluster_labels)):
cl_filter = kmeans_cluster_labels==cl
if cl_filter.sum() > 1:
cl_dist = squareform(topics_dist[cl_filter, :][:, cl_filter])
cl_dist[cl_dist < 0] = 0 #Rarely get floating point arithmetic issues
cl_link = linkage(cl_dist, 'average')
cl_leaves_order = leaves_list(cl_link)
spectra_order += list(np.where(cl_filter)[0][cl_leaves_order])
else:
## Corner case where a component only has one element
spectra_order += list(np.where(cl_filter)[0])
from matplotlib import gridspec
import matplotlib.pyplot as plt
width_ratios = [0.5, 9, 0.5, 4, 1]
height_ratios = [0.5, 9]
fig = plt.figure(figsize=(sum(width_ratios), sum(height_ratios)))
gs = gridspec.GridSpec(len(height_ratios), len(width_ratios), fig,
0.01, 0.01, 0.98, 0.98,
height_ratios=height_ratios,
width_ratios=width_ratios,
wspace=0, hspace=0)
dist_ax = fig.add_subplot(gs[1,1], xscale='linear', yscale='linear',
xticks=[], yticks=[],xlabel='', ylabel='',
frameon=True)
D = topics_dist[spectra_order, :][:, spectra_order]
dist_im = dist_ax.imshow(D, interpolation='none', cmap='viridis', aspect='auto',
rasterized=True)
left_ax = fig.add_subplot(gs[1,0], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
left_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(-1, 1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
top_ax = fig.add_subplot(gs[0,1], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
top_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(1, -1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
hist_gs = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[1, 3],
wspace=0, hspace=0)
hist_ax = fig.add_subplot(hist_gs[0,0], xscale='linear', yscale='linear',
xlabel='', ylabel='', frameon=True, title='Local density histogram')
hist_ax.hist(local_density.values, bins=np.linspace(0, 1, 50))
hist_ax.yaxis.tick_right()
xlim = hist_ax.get_xlim()
ylim = hist_ax.get_ylim()
if density_threshold < xlim[1]:
hist_ax.axvline(density_threshold, linestyle='--', color='k')
hist_ax.text(density_threshold + 0.02, ylim[1] * 0.95, 'filtering\nthreshold\n\n', va='top')
hist_ax.set_xlim(xlim)
hist_ax.set_xlabel('Mean distance to k nearest neighbors\n\n%d/%d (%.0f%%) spectra above threshold\nwere removed prior to clustering'%(sum(~density_filter), len(density_filter), 100*(~density_filter).mean()))
fig.savefig(self.paths['clustering_plot']%(k, density_threshold_repl), dpi=250)
if close_clustergram_fig:
plt.close(fig)
def k_selection_plot(self, close_fig=True):
'''
Borrowed from <NAME>. 2013 Deciphering Mutational Signatures
publication in Cell Reports
'''
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
stats = []
for k in sorted(set(run_params.n_components)):
stats.append(self.consensus(k, skip_density_and_return_after_stats=True).stats)
stats = pd.DataFrame(stats)
stats.reset_index(drop = True, inplace = True)
save_df_to_npz(stats, self.paths['k_selection_stats'])
fig = plt.figure(figsize=(6, 4))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(stats.k, stats.stability, 'o-', color='b')
ax1.set_ylabel('Stability', color='b', fontsize=15)
for tl in ax1.get_yticklabels():
tl.set_color('b')
#ax1.set_xlabel('K', fontsize=15)
ax2.plot(stats.k, stats.prediction_error, 'o-', color='r')
ax2.set_ylabel('Error', color='r', fontsize=15)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.set_xlabel('Number of Components', fontsize=15)
ax1.grid('on')
plt.tight_layout()
fig.savefig(self.paths['k_selection_plot'], dpi=250)
if close_fig:
plt.close(fig)
if __name__=="__main__":
"""
Example commands for now:
output_dir="/Users/averes/Projects/Melton/Notebooks/2018/07-2018/cnmf_test/"
python cnmf.py prepare --output-dir $output_dir \
--name test --counts /Users/averes/Projects/Melton/Notebooks/2018/07-2018/cnmf_test/test_data.df.npz \
-k 6 7 8 9 --n-iter 5
python cnmf.py factorize --name test --output-dir $output_dir
THis can be parallelized as such:
python cnmf.py factorize --name test --output-dir $output_dir --total-workers 2 --worker-index WORKER_INDEX (where worker_index starts with 0)
python cnmf.py combine --name test --output-dir $output_dir
python cnmf.py consensus --name test --output-dir $output_dir
"""
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('command', type=str, choices=['prepare', 'factorize', 'combine', 'consensus', 'k_selection_plot'])
parser.add_argument('--name', type=str, help='[all] Name for analysis. All output will be placed in [output-dir]/[name]/...', nargs='?', default='cNMF')
parser.add_argument('--output-dir', type=str, help='[all] Output directory. All output will be placed in [output-dir]/[name]/...', nargs='?', default='.')
parser.add_argument('-c', '--counts', type=str, help='[prepare] Input (cell x gene) counts matrix as df.npz or tab delimited text file')
parser.add_argument('-k', '--components', type=int, help='[prepare] Numper of components (k) for matrix factorization. Several can be specified with "-k 8 9 10"', nargs='+')
parser.add_argument('-n', '--n-iter', type=int, help='[prepare] Numper of factorization replicates', default=100)
parser.add_argument('--total-workers', type=int, help='[all] Total number of workers to distribute jobs to', default=1)
parser.add_argument('--seed', type=int, help='[prepare] Seed for pseudorandom number generation', default=None)
parser.add_argument('--genes-file', type=str, help='[prepare] File containing a list of genes to include, one gene per line. Must match column labels of counts matrix.', default=None)
parser.add_argument('--numgenes', type=int, help='[prepare] Number of high variance genes to use for matrix factorization.', default=2000)
parser.add_argument('--tpm', type=str, help='[prepare] Pre-computed (cell x gene) TPM values as df.npz or tab separated txt file. If not provided TPM will be calculated automatically', default=None)
parser.add_argument('--beta-loss', type=str, choices=['frobenius', 'kullback-leibler', 'itakura-saito'], help='[prepare] Loss function for NMF.', default='frobenius')
parser.add_argument('--densify', dest='densify', help='[prepare] Treat the input data as non-sparse', action='store_true', default=False)
parser.add_argument('--worker-index', type=int, help='[factorize] Index of current worker (the first worker should have index 0)', default=0)
parser.add_argument('--local-density-threshold', type=str, help='[consensus] Threshold for the local density filtering. This string must convert to a float >0 and <=2', default='0.5')
parser.add_argument('--local-neighborhood-size', type=float, help='[consensus] Fraction of the number of replicates to use as nearest neighbors for local density filtering', default=0.30)
parser.add_argument('--show-clustering', dest='show_clustering', help='[consensus] Produce a clustergram figure summarizing the spectra clustering', action='store_true')
args = parser.parse_args()
cnmf_obj = cNMF(output_dir=args.output_dir, name=args.name)
cnmf_obj._initialize_dirs()
if args.command == 'prepare':
if args.counts.endswith('.h5ad'):
input_counts = sc.read(args.counts)
else:
## Load txt or compressed dataframe and convert to scanpy object
if args.counts.endswith('.npz'):
input_counts = load_df_from_npz(args.counts)
else:
input_counts = pd.read_csv(args.counts, sep='\t', index_col=0)
if args.densify:
input_counts = sc.AnnData(X=input_counts.values,
obs=pd.DataFrame(index=input_counts.index),
var=pd.DataFrame(index=input_counts.columns))
else:
input_counts = sc.AnnData(X=sp.csr_matrix(input_counts.values),
obs=pd.DataFrame(index=input_counts.index),
var=pd.DataFrame(index=input_counts.columns))
if sp.issparse(input_counts.X) & args.densify:
input_counts.X = np.array(input_counts.X.todense())
if args.tpm is None:
tpm = compute_tpm(input_counts)
sc.write(cnmf_obj.paths['tpm'], tpm)
elif args.tpm.endswith('.h5ad'):
subprocess.call('cp %s %s' % (args.tpm, cnmf_obj.paths['tpm']), shell=True)
tpm = sc.read(cnmf_obj.paths['tpm'])
else:
if args.tpm.endswith('.npz'):
tpm = load_df_from_npz(args.tpm)
else:
tpm = pd.read_csv(args.tpm, sep='\t', index_col=0)
if args.densify:
tpm = sc.AnnData(X=tpm.values,
obs=pd.DataFrame(index=tpm.index),
var= | pd.DataFrame(index=tpm.columns) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 10:31:23 2017
@author: robertmarsland
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import subprocess
import os
import pickle
import datetime
from sklearn.decomposition import PCA
StateData = ['ACI', 'ACII', 'CIATP', 'CIIATP', 'pU', 'pT', 'pD', 'pS']
def FormatPath(folder):
if folder==None:
folder=''
else:
if folder != '':
if folder[-1] != '/':
folder = folder+'/'
return folder
def LoadData(name, folder = None, suffix = '.dat'):
folder = FormatPath(folder)
col_ind = list(range(22))
del col_ind[5]
return | pd.read_table(folder+name+suffix,index_col=0,usecols=col_ind) | pandas.read_table |
from bs4 import BeautifulSoup as BS
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, \
TimeoutException, StaleElementReferenceException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from FIR_logging import logger
import os
import time
import pandas as pd
# base function
# Some constants:
URL = r'http://www.mhpolice.maharashtra.gov.in/Citizen/MH/PublishedFIRs.aspx'
Download_Directory = r'/home/sangharshmanuski/Documents/mha_FIRs/raw_footage/raw_footage_after_beed'
COLUMNS = ['Sr.No.', 'State', 'District', 'Police Station', 'Year', 'FIR No.', 'Registration Date', 'FIR No',
'Sections']
ALL_Districts = ['NAGPUR RURAL']
# other functions
def district_selection(name):
dist_list = Select(driver.find_element_by_css_selector(
"#ContentPlaceHolder1_ddlDistrict"))
names = [o.get_attribute("text")
for o in dist_list.options if o.get_attribute("text") not in (
'Select')]
if name not in names:
logger.info(f"{name} is not in list")
return False
dist_list.select_by_visible_text(name)
time.sleep(6)
def police_station(ps):
# select particular police station
police_station_list = Select(driver.find_element_by_css_selector(
"#ContentPlaceHolder1_ddlPoliceStation"))
name = ps
police_station_list.select_by_visible_text(name)
time.sleep(3)
def enter_date(date, date_plus_one):
# enters start as well as end dates with "action chains."
WebDriverWait(driver, 160).until(
EC.presence_of_element_located((By.CSS_SELECTOR,
'#ContentPlaceHolder1_txtDateOfRegistrationFrom')))
from_date_field = driver.find_element_by_css_selector(
'#ContentPlaceHolder1_txtDateOfRegistrationFrom')
to_date_field = driver.find_element_by_css_selector(
'#ContentPlaceHolder1_txtDateOfRegistrationTo')
ActionChains(driver).click(from_date_field).send_keys(
date).move_to_element(to_date_field).click().send_keys(
date_plus_one).perform()
logger.info(f'date entered: {date}')
def search():
driver.find_element_by_css_selector('#ContentPlaceHolder1_btnSearch').click()
def number_of_records():
"""captures the text indicating number of records.
converts it to integer. if 0 returns and appends name of district to the list
if page is not loaded. it tries one more time for 15 secs."""
if driver.find_elements_by_css_selector("#ContentPlaceHolder1_gdvDeadBody_lblNoRowsFound").is_displayed:
return False
time_counter = 1
while time_counter < 8:
try:
records_number = driver.find_element_by_css_selector(
'#ContentPlaceHolder1_lbltotalrecord').text
if records_number == '':
time.sleep(1)
continue
else:
records_number = int(records_number)
if records_number != 0:
logger.info(f"{district}: {records_number}")
return records_number
else:
logger.info(f"no records @ {district}")
return False
except (NoSuchElementException, TimeoutException, StaleElementReferenceException):
logger.info("page is not loaded")
time_counter += 1
continue
def extract_table_current(name, single):
# entire table of record to be taken to the list.
soup = BS(driver.page_source, 'html.parser')
main_table = soup.find("table", {"id": "ContentPlaceHolder1_gdvDeadBody"})
time_counter = 1
while main_table is None:
if time_counter < 6:
logger.info(f"the table did not load @ {name}")
time.sleep(1)
time_counter += 1
else:
logger.info(f"the table did not load @ {name}."
f"stopped trying")
return
links_for_pages = driver.find_elements_by_css_selector('.gridPager a')
rows = main_table.find_all("tr")
if links_for_pages is None:
for row in rows:
time.sleep(8)
if '...' not in row.text:
cells = row.find_all('td')
cells = cells[0:9] # drop the last column
# store data in list
single.append([cell.text for cell in cells])
else:
for row in rows[0:(len(rows)) - 2]:
time.sleep(6)
cells = row.find_all('td')
cells = cells[0:9] # drop the last column
# store data in list
single.append([cell.text for cell in cells])
def check_the_act(page):
# check for PoA in table.
soup = BS(page, 'html')
rows = soup.find_all('//*[@id="ContentPlaceHolder1_gdvDeadBody"]//tr')
for row in rows:
cell = row.find_all("td")
text = cell[0].text
if "मुंबई दारूबंदी अधिनियम" in text:
submit = driver.find_elements_by_tag_name("input")
submit.click()
def next_page(name, data):
# check if any link to next page is available
# iterate every page.
try:
driver.find_element_by_css_selector('.gridPager a')
except NoSuchElementException:
return False
links_for_pages = driver.find_elements_by_css_selector('.gridPager a')
for page in range(len(links_for_pages)):
# new list, to by pass stale element exception
links_for_pages_new = driver.find_elements_by_css_selector('.gridPager a')
# do not click on link for new page slot
if links_for_pages_new[page].text != '...':
links_for_pages_new[page].click()
# if this can be replaced with some other wait method to save the time
time.sleep(5)
extract_table_current(name, data)
def second_page_slot():
# find specific link for going to page 11 and click.
try:
link_for_page_slot = driver.find_element_by_link_text('...')
link_for_page_slot.click()
except NoSuchElementException:
return False
# main code
page_data = []
for district in ALL_Districts:
b = "07"
c = "2020"
district_directory = os.path.join(Download_Directory, f'{district}{b}{c}')
if not os.path.exists(district_directory):
os.mkdir(district_directory)
for i in range(1, 30):
options = FirefoxOptions()
#options.add_argument("--headless")
options.add_argument("--private-window")
driver = webdriver.Firefox(options=options)
driver.get(URL)
driver.refresh()
view = Select(driver.find_element_by_css_selector(
'#ContentPlaceHolder1_ucRecordView_ddlPageSize'))
view.select_by_value('50')
# entering date and assuring that 01 to 09 is entered correctly
if i < 10:
i_i = i + 1
i = f'{str("0")}{str(i)}'
if i_i < 10:
i_i = f'{str("0")}{str(i_i)}'
else:
i_i = i + 1
date_from = str(i) + b + c
date_to = str(i_i) + b + c
enter_date(date_from, date_to)
# select district
district_selection(district)
time.sleep(3)
police_station("NARKHED")
# start the search
search()
time.sleep(5)
if not number_of_records():
continue
extract_table_current(district, page_data)
if not next_page(district, page_data):
district_data = | pd.DataFrame(page_data, columns=COLUMNS) | pandas.DataFrame |
from predict import *
from control import *
from operator import add
import pandas as pd
from statistics import stdev, mean
def plotPredictionMC(runs, episodes, everyVisit, save):
val = np.zeros((4,62,10))
for i in range(runs):
v = MonteCarlo(basicPolicy, episodes, everyVisit)
val += v
plot(np.divide(val, runs), 'mc'+('e-' if everyVisit else 'f-')+str(runs) if save else None)
def plotPredictionTD(runs, episodes, k, save):
val = np.zeros((4,62,10))
for i in range(runs):
v = TD(basicPolicy, episodes, 0.7, 0.1, k)
val += v
plot(np.divide(val, runs), 'td-'+str(k)+'-'+str(episodes) if save else None)
def plotRewards(algos, runs, episodes):
df = | pd.DataFrame() | pandas.DataFrame |
import lightgbm as lgb
import numpy as np
import pandas as pd
import sklearn.ensemble as ensemble
import sklearn.linear_model as linear_model
import sklearn.model_selection as model_selection
import sklearn.svm as svm
import sklearn.tree as tree
import xgboost as xgboost
from utils.misc import get_display_time
# Keep randomness same
np.random.seed(2210)
class EstimatorSelectHelper:
# Code derived and changed accordingly from below
# https://github.com/davidsbatista/machine-learning-notebooks/blob/master/hyperparameter-across-models.ipynb
def __init__(self, models):
self.models = models
self.keys = models.keys()
self.search_grid = {}
self.df_val_score = None
def fit(self, X, y, **grid_kwargs):
for model_key in self.keys:
# Check the model and param_grid
model = self.models[model_key][0]
param_grid = self.models[model_key][1]
# Call GridSearchCV on the model and param_grid
print(f"Running GridSearchCV for {model_key}")
grid = model_selection.GridSearchCV(model, param_grid, **grid_kwargs)
grid.fit(X, y)
self.search_grid[model_key] = grid
return self
def val_score(self, sort_by='mean_val_score'):
frames = []
for name, grid in self.search_grid.items():
frame = pd.DataFrame(grid.cv_results_)
frame = frame.filter(regex='^(?!.*param_).*$')
frame['estimator'] = len(frame) * [name]
frames.append(frame)
df_val_score = pd.concat(frames)
df_val_score = df_val_score.reset_index()
df_val_score = df_val_score.drop(['rank_test_score', 'index'], 1)
# columns = ['estimator'] + df.columns.tolist().remove('estimator')
# Keep required columns
df_val_score.rename(columns={'mean_test_score': 'mean_val_score', 'std_test_score': 'std_val_score'},
inplace=True)
keep_columns = [
"estimator",
"mean_val_score",
"std_val_score",
"mean_fit_time",
"mean_score_time",
"params",
]
df_val_score = df_val_score[keep_columns].sort_values([sort_by], ascending=False)
self.df_val_score = df_val_score
return self.df_val_score
class RegressionSelectHelper(EstimatorSelectHelper):
def __init__(self, models):
super().__init__(models)
self.df_test_score = None
def fit(self, X, y, **grid_kwargs):
super().fit(X, y, **grid_kwargs)
def val_score(self, sort_by='mean_val_score'):
return super().val_score(sort_by)
def test_score(self, X_test, y_test, sort_by=['mean_squared_error']):
test_scores = []
for key, model in self.search_grid.items():
y_pred = model.predict(X_test)
import sklearn.metrics as sm
mse = sm.mean_squared_error(y_test, y_pred)
mae = sm.mean_absolute_error(y_test, y_pred)
r2 = sm.r2_score(y_test, y_pred)
test_scores.append([key, model.best_params_, mse, mae, r2])
test_score_columns = ['estimator', 'params', 'mean_squared_error', 'mean_absolute_error', 'r2_score',
'jacobian_score']
self.df_test_score = pd.DataFrame(test_scores, columns=test_score_columns).reset_index(drop=True)
return self.df_test_score
class ClassifierSelectHelper(EstimatorSelectHelper):
def __init__(self, models):
super().__init__(models)
self.df_test_score = None
def fit(self, x, y, **grid_kwargs):
super().fit(x, y, **grid_kwargs)
def val_score(self, sort_by='mean_val_score'):
return super().val_score(sort_by)
def test_score(self, x_test, y_test, sort_by=None):
if sort_by is None:
sort_by = ['precision']
test_scores = []
for key, model in self.search_grid.items():
y_pred = model.predict(x_test)
import sklearn.metrics as sm
accuracy = sm.accuracy_score(y_test, y_pred)
precision = sm.precision_score(y_test, y_pred)
recall = sm.recall_score(y_test, y_pred)
f1_score = sm.f1_score(y_test, y_pred)
roc_auc = sm.roc_auc_score(y_test, y_pred)
log_loss = sm.log_loss(y_test, y_pred)
test_scores.append([key, model.best_params_, accuracy, precision, recall, f1_score, roc_auc, log_loss])
test_score_columns = ['estimator', 'params', 'accuracy', 'precision', 'recall',
'f1-score', 'roc_auc', 'log_loss']
self.df_test_score = pd.DataFrame(test_scores, columns=test_score_columns)
self.df_test_score = self.df_test_score.sort_values(by=sort_by, ascending=False).reset_index(drop=True)
return self.df_test_score
def evaluate_classifiers(X_train, y_train, X_test, y_test, is_binary=False, cv=5, sort_by=['f1-score']):
"""
Perform raw evaluation of the Classifer Models on the given data and return the Validation and Test Score results
"""
models = {
'DecisionTreeClassifier': (tree.DecisionTreeClassifier(), {}),
'SVM': (svm.SVC(), {}),
'RandomForestClassifier': (ensemble.RandomForestClassifier(), {}),
'LightGBMClassifier': (lgb.LGBMClassifier(), {}),
'AdaBoostClassifier': (ensemble.AdaBoostClassifier(), {}),
'GradinetBoostingClassifier': (ensemble.GradientBoostingClassifier(), {}),
'XGBClassifier': (xgboost.XGBClassifier(verbose=0, silent=True), {}),
}
# LogisticRegression
if is_binary:
models.update({'LogisticRegression': (linear_model.LogisticRegression(), {})})
if len(X_train) > 10000:
models.update({'SGDClassifier': (linear_model.SGDClassifier(), {})})
select = ClassifierSelectHelper(models)
select.fit(X_train, y_train, cv=cv, verbose=0)
df_val_score = select.val_score(sort_by='mean_val_score')
df_test_score = select.test_score(X_test, y_test, sort_by=sort_by)
search_grid = select.search_grid
return df_val_score, df_test_score, search_grid
# TODO : will be depricated with fine_tune in model_builder
def fine_tune_classifier(model_name, x_train, y_train, cv=5, verbose=0, randomized=False):
model, param_grid = None, None
if model_name == 'xgb':
model = xgboost.XGBClassifier(verbose=verbose)
param_grid = {
"learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30],
"max_depth": [3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight": [1, 3, 5, 7],
"gamma": [0.0, 0.1, 0.2, 0.3, 0.4],
"colsample_bytree": [0.3, 0.4, 0.5, 0.7]
}
elif model_name == 'rf':
model = ensemble.RandomForestClassifier()
param_grid = {'n_estimators': [10, 25], 'max_features': [5, 10],
'max_depth': [10, 50, None], 'bootstrap': [True, False]}
elif model_name == 'lr':
model = linear_model.LogisticRegression()
param_grid = {
"solver": ["newton-cg", "lbfgs", "liblinear"],
"penalty": ['l1', 'l2'],
"C": [100, 10, 1, 0.1, 0.01],
}
elif model_name == 'ada':
model = ensemble.AdaBoostClassifier()
param_grid = {
"learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30],
"n_estimators": [0, 50, 100, 500]
}
elif model_name == 'gb':
model = ensemble.GradientBoostingClassifier
param_grid = {}
elif model_name == 'lgb':
model = lgb.LGBMClassifier()
param_grid = {}
elif model_name == 'svm':
model = svm.SVC()
param_grid = {
"C": [0.1, 1, 10, 100, 1000],
"gamma": [1, 0.1, 0.01, 0.001, 0.0001],
"kernel": ["rbf", 'linear', 'sigmoid'],
}
elif model_name == 'dt':
model = tree.DecisionTreeClassifier()
param_grid = {}
elif model_name == 'sgd':
model = linear_model.SGDClassifier()
param_grid = {}
return fine_tune_model(model, param_grid, x_train, y_train, cv, verbose, randomized)
# from time import perf_counter
#
# start_time = perf_counter()
#
# grid_search = None
# if randomized:
# print(f"Performing Randomized search for {type(model).__name__}...")
# grid_search = model_selection.RandomizedSearchCV(model, param_grid, cv=cv, verbose=verbose, n_jobs=-1)
# else:
# print(f"Performing Grid search for {type(model).__name__}...")
# grid_search = model_selection.GridSearchCV(model, param_grid, cv=cv, verbose=verbose, n_jobs=-1)
#
# # Start fine tuning of the model
# grid_search.fit(x_train, y_train)
# time_taken = round(perf_counter() - start_time, 2)
# print(f"Time elapsed(s) : {get_display_time(time_taken)} | score : {grid_search.best_score_:.2}")
# print(f"Best parameters : {grid_search.best_params_} ")
# return grid_search.best_estimator_
# TODO : will be depricated with fine_tune in model_builder
def fine_tune_model(model, param_grid, x_train, y_train, cv=5, verbose=0, randomized=False):
"""
Fine Tune a given Model by using GridSearchCV/RandomizedSearchCV with the Passed parameter grid
:param model: Estimator Model
:param param_grid: Parameters grid
:param x_train: Train dataset
:param y_train: Train target
:param cv: No. of cross validations, default 5
:param verbose: verbose, default 0
:param randomized: default False, if True, randomized search to be used
:return:
"""
from time import perf_counter
start_time = perf_counter()
grid_search = None
if randomized:
print(f"Performing Randomized search for {type(model).__name__}...")
grid_search = model_selection.RandomizedSearchCV(model, param_grid, cv=cv, verbose=verbose, n_jobs=-1)
else:
print(f"Performing Grid search for {type(model).__name__}...")
grid_search = model_selection.GridSearchCV(model, param_grid, cv=cv, verbose=verbose, n_jobs=-1)
# Start fine tuning of the model
grid_search.fit(x_train, y_train)
time_taken = round(perf_counter() - start_time, 2)
print(f"Time elapsed : {get_display_time(time_taken)} | score : {grid_search.best_score_:.2}")
print(f"Best parameters : {grid_search.best_params_} ")
return grid_search.best_estimator_
# def train_models(X, y, cv, models, problem_type='classification', scoring=['accuracy', 'precision']):
# for name, model in models.items():
# model.fit(X, y)
# np.random.seed(2210)
# scores = model_selection.cross_validate(model, X, y, scoring=scoring, cv=cv, n_jobs=-1, verbose=0)
# print("%0.2f accuracy with a standard deviation of %0.2f" % (scores.mean(), scores.std()))
#
# # Export the model
# joblib.dump(model, f"models/{name}.pkl")
# joblib.dump(X.columns, f"models/{name}_columns.pkl")
#
# if problem_type == 'classification':
# select = ClassifierSelectHelper(models)
# select.fit(X_train, y_train, cv=cv, verbose=0)
# df_score, search_grid = select.score_summary(X_test, y_test, sort_by=scoring)
# return df_score, search_grid
# else:
# select = RegressionSelectHelper()
# select.fit(X_train, y_train, cv=cv, verbose=0)
# df_score, search_grid = select.test_score(X_test, y_test, sort_by=scoring)
# return df_score, search_grid
FOLD_MAPPPING = {
0: [1, 2, 3, 4],
1: [0, 2, 3, 4],
2: [0, 1, 3, 4],
3: [0, 1, 2, 4],
4: [0, 1, 2, 3]
}
# def train_models_with_folds(fold, df, target_col, drop_columns, models,
# problem_type='classification', score='accuracy'):
# """
# Train the model on the given fold. Dataframe has a column having fold number
# :param fold: Fold number raning from 0 to 5
# :param df: DataFrame
# :param target_col: Target column
# :param drop_columns: Columns to drop
# :param models: Model to train on
# :param problem_type: Problem type
# :param score: score used for evaluation
# """
# import dispatcher
#
# train_df = df[df.kfold.isin(FOLD_MAPPPING.get(fold))].reset_index(drop=True)
# valid_df = df[df.kfold == fold].reset_index(drop=True)
#
# train_df = train_df.drop(drop_columns + target_col, axis=1)
# valid_df = valid_df.drop(drop_columns + target_col, axis=1)
#
# y_train = train_df[target_col].values
# y_valid = valid_df[target_col].values
#
# for name, model in models.items():
# model.fit(train_df)
#
# if problem_type == 'classification':
# from metrics import ClassificationMetrics
# dispatcher.MODELS[model]
# preds = model.predict_proba(valid_df)[:, 1]
# metric = ClassificationMetrics()
# print(metric(score, y_valid, preds))
# else:
# from metrics import RegressionMetrics
# preds = model.predict(valid_df)
# metric = RegressionMetrics()
# print(metric(score, y_valid, preds))
#
# # Export the model
# joblib.dump(model, f"models/{model}_{fold}.pkl")
# joblib.dump(train_df.columns, f"models/{model}_{fold}_columns.pkl")
if __name__ == '__main__':
df = | pd.read_csv('data.csv') | pandas.read_csv |
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objs as go
import dash_daq as daq
import dash_table
import datetime
from datetime import datetime as dt
from datetime import timedelta
import dateutil.relativedelta
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# from UsedFunctions import *
#====================================================================================== Connecting to DB
import pyodbc
cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=;"
"Database=;"
"Uid=;"
"Pwd=;"
"MARS_Connection=Yes;")
cnxn1 = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=;"
"Database=;"
"Uid=;"
"Pwd=;"
"MARS_Connection=Yes;")
#====================================================================================== Collecting the global data
#------------ Map
TrueCodes = pd.read_csv(r'data\CountryCodes.csv')
drop_box = []
drop_box.append('All')
for country in TrueCodes.Entity.values:
drop_box.append(country)
countries = pd.read_csv('data/CC.csv', keep_default_na=False)
prices = pd.read_csv('data/PriceChangeLog.csv', keep_default_na=False)
df_sub = pd.read_csv('data/country_data.csv')
#-------------------------------------------------------------------- Data: Retention
cohort_android = pd.read_sql_query('EXEC DS_GetRetentionAndroidData', cnxn1)
cohort_android_transpose = cohort_android.set_index('Registration Period').T
cohort_ios = pd.read_sql_query('EXEC DS_GetRetentionIOSData', cnxn1)
cohort_ios_transpose = cohort_ios.set_index('Registration Period').T
#====================================================================================== Activity colors
colors = dict(red = '#d62728', #brick red
orange = '#ff7f0e',#safety orange
pink = '#e377c2',#raspberry yogurt pink
green = '#2ca02c',#cooked asparagus green
purple = '#9467bd',#muted purple
blue = '#1f77b4',#muted blue
blue_teal = '#17becf', #blue-teal
brown = '#8c564b',#chestnut brown
gray = '#7f7f7f',#middle gray
yellow = '#bcbd22', #curry yellow-green
)
map_colorscale = [
[0, "#08519c"],
[0.5, "#6baed6"],
[1, "#9ecae1"]
]
activity_color = {'Lesson': 'red',
'User Lesson': 'orange',
'Q&A': 'purple',
'User Post': 'green',
'Code': 'blue',
'Quiz': 'brown',
'Contest': 'brown',
'Profile': 'pink',
'Own Profile': 'yellow',
'Private Codes': 'blue_teal'}
design_colors = {
'page_bg': '#0f2331',
'chart_bg': '#0e2e43',
'chart_box_bg': '#0e2e43',
'box_borders': '#143756',
'Android': '#5ab4ac',
'iOS': '#d8b365',
'Web': '#f5f5f5',
'text': '#eaf5fc',
'title': '#eaf5fc',
'chart_axis_legends': '#a1aba0',
'chart_inside_lines': '#334d61'
}
design_padding = {
'level_1': '5px',
'level_2': '0 20'
}
date_format = 'MMM Do, YY'
title_size = 20
dcc_graph_height = 350
design_padding = {
'level_1': '5px'
}
box_shadow = '0px 0px 0px 2px rgb(20, 55, 86)'
#====================================================================================== The Dash app
app = dash.Dash(__name__)
external_css = ["https://cdnjs.cloudflare.com/ajax/libs/normalize/7.0.0/normalize.min.css",
"https://cdnjs.cloudflare.com/ajax/libs/skeleton/2.0.4/skeleton.min.css",
"//fonts.googleapis.com/css?family=Raleway:400,300,600",
'https://codepen.io/plotly/pen/YEYMBZ.css',
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css",
'https://codepen.io/chriddyp/pen/bWLwgP.css']
for css in external_css:
app.css.append_css({"external_url": css})
# ------------------------------------------------------------------------------------- Used Fnctions
def get_country_code(country_name):
country_code = TrueCodes.loc[TrueCodes.Entity == country_name, ['rand']].values[0][0]
return str(country_code)
def get_funnel(start_date, end_date, platform, country=None):
if country:
subscriptions = pd.read_sql_query(
'exec DS_Funnel @StartDate = \'' + start_date + '\', ' +
'@EndDate = \'' + end_date + '\', ' +
'@Platform = \'' + platform + '\',' +
'@Country = \'' + country + '\'', cnxn)
else:
subscriptions = pd.read_sql_query(
'exec DS_Funnel @StartDate = \'' + start_date + '\', ' +
'@EndDate = \'' + end_date + '\', ' +
'@Platform = ' + platform + ' ', cnxn)
subs = []
subs.append(int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['TotalSignups']].sum()))
subs.append(int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['TotalSubs']].sum()))
subs.append(int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['MonthlyOld']].sum()) + \
int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['MonthlyNew']].sum()) + \
int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['AnnualOld']].sum()) + \
int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['AnnualNew']].sum()))
text = []
for i in range(len(subs)):
if i == 0:
text.append('#: ' + str(subs[i]))
else:
subs[0] = subs[0] if subs[0] != 0 else 1
text.append('#: ' + str(subs[i]) + ' <br> ' + '%: ' + str(np.round(subs[i] / subs[0] * 100, 3)))
# if platform == '1122':
# subs[0] = subs[0] / 10
# subs[1] = subs[1] * 2
# subs[2] = subs[2] * 6
# else:
# subs[0] = subs[0] / 20
# subs[1] = subs[1] * 2
# subs[2] = subs[2] * 4
return subs, text
def price_finder(row):
country_code, platform, created_date, sub_type = row[['CountryCode', 'Platform', 'CreatedDate', 'SubscriptionType']].values
return prices[prices.CC == country_code][prices.Platform == platform][prices.Subscription_type == sub_type][prices.StartDate < created_date][prices.EndDate >= created_date].Price.values[0]
def subs_table_constructor(subs, prices, countries, signups):
subs['CountryCode'] = subs['CountryCode'].apply(lambda x: x.upper())
signups['CountryCode'] = signups['CountryCode'].apply(lambda x: x.upper())
subs['CountryCode'] = subs['CountryCode'].replace(np.nan, 'NA', regex=True)
signups['CountryCode'] = signups['CountryCode'].replace(np.nan, 'NA', regex=True)
subs["SubscriptionType"] = subs["SubscriptionType"].map({'sololearn_pro_test': "monthly", 'sololearn_pro_annual': "annual", 'sololearn_pro_monthly': "monthly"})
prices["StartDate"] = pd.to_datetime(prices["StartDate"], dayfirst=True)
prices["EndDate"] = pd.to_datetime(prices["EndDate"], dayfirst=True)
subs["SubscriptionStartDate"] = pd.to_datetime(subs["SubscriptionStartDate"], dayfirst=True)
subs["SubscriptionEndDate"] = pd.to_datetime(subs["SubscriptionEndDate"], dayfirst=True)
subs['Paid'] = np.where((subs.SubscriptionEndDate - subs.SubscriptionStartDate) > datetime.timedelta(days=5), 1, 0)
subs['Annual'] = np.where((subs.SubscriptionType == "annual") & (subs.Paid == 1), 1, 0)
subs['Monthly'] = np.where((subs.SubscriptionType == "monthly") & (subs.Paid == 1), 1, 0)
subs["Price"] = subs.apply(price_finder, axis=1)
subs["Revenue"] = subs.Price * subs.Paid
subs_df = subs.groupby("CountryCode").agg({'Platform': 'count', "Paid": 'sum', "Monthly": 'sum', "Annual": 'sum', "Revenue": 'sum'})
subs_df.rename(columns={'Platform': 'TotalSubs'}, inplace = True)
final_df = pd.merge(pd.merge(countries, signups), subs_df, on="CountryCode")
final_df["Revenue_per_user"] = final_df.Revenue / final_df.NumberOfSignups
final_df["Cancel_rate"] = 1 - final_df.Paid / final_df.TotalSubs
final_df = final_df.round(3)
return final_df
table_new = dash_table.DataTable(
id='table_new',
columns= [
# {'name': 'CountryCode', 'id': 'CountryCode'},
{'name': 'Country', 'id': 'Country'},
{'name': 'NumberOfSignups', 'id': 'NumberOfSignups'},
{'name': 'TotalSubs', 'id': 'TotalSubs'},
{'name': 'Paid', 'id': 'Paid'},
{'name': 'Monthly', 'id': 'Monthly'},
{'name': 'Annual', 'id': 'Annual'},
{'name': 'Revenue', 'id': 'Revenue'},
{'name': 'Revenue_per_user', 'id': 'Revenue_per_user'},
{'name': 'Cancel_rate', 'id': 'Cancel_rate'}],
filtering=True,
sorting=True,
style_as_list_view=True,
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(238, 238, 238)'
},
{ 'if': {'column_id': 'Country'}, 'width': '20%'},
{ 'if': {'column_id': 'NumberOfSignups'}, 'width': '10%'},
{ 'if': {'column_id': 'TotalSubs'}, 'width': '10%'},
{ 'if': {'column_id': 'Paid'}, 'width': '10%'},
{ 'if': {'column_id': 'Monthly'}, 'width': '10%'},
{ 'if': {'column_id': 'Annual'}, 'width': '10%'},
{ 'if': {'column_id': 'Revenue'}, 'width': '10%'},
{ 'if': {'column_id': 'Revenue_per_user'}, 'width': '10%'},
{ 'if': {'column_id': 'Cancel_rate'}, 'width': '10%'},
],
n_fixed_rows=1,
# style_cell={'width': '150px'},
style_table={
'maxHeight': '500',
'overflowY': 'scroll'
},
# style_data_conditional=[
# {
# 'if': {
# 'column_id': 'Number of Solar Plants',
# # 'filter': '{Number of Solar Plants} > 3.9'
# },
# 'backgroundColor': '#3D9970',
# 'color': 'white',
# }
# ]
)
# table_old = dash_table.DataTable(
# id='table_old',
# columns= [
# # {'name': 'CountryCode', 'id': 'CountryCode'},
# {'name': 'Country', 'id': 'Country'},
# {'name': 'NumberOfSignups', 'id': 'NumberOfSignups'},
# {'name': 'TotalSubs', 'id': 'TotalSubs'},
# {'name': 'Paid', 'id': 'Paid'},
# {'name': 'Monthly', 'id': 'Monthly'},
# {'name': 'Annual', 'id': 'Annual'},
# {'name': 'Revenue', 'id': 'Revenue'},
# {'name': 'Revenue_per_user', 'id': 'Revenue_per_user'},
# {'name': 'Cancel_rate', 'id': 'Cancel_rate'}],
# filtering=True,
# sorting=True,
# style_as_list_view=True,
# style_header={
# 'backgroundColor': 'white',
# 'fontWeight': 'bold'
# },
# style_cell_conditional=[
# {
# 'if': {'row_index': 'odd'},
# 'backgroundColor': 'rgb(238, 238, 238)'
# }
# ],
# n_fixed_rows=1,
# # style_cell={'width': '150px'},
# style_table={
# 'maxHeight': '250',
# 'overflowY': 'scroll'
# },
# # style_data_conditional=[
# # {
# # 'if': {
# # 'column_id': 'Number of Solar Plants',
# # # 'filter': '{Number of Solar Plants} > 3.9'
# # },
# # 'backgroundColor': '#3D9970',
# # 'color': 'white',
# # }
# # ]
# )
#------------------------------------------------------------------------------------Toggle switch
div0_1 = html.Div([
daq.ToggleSwitch(
id='toggle-switch-1',
value=False,
size=50,
label={
'label': 'Activate Filterign by Date',
'style': {
'backgroundColor': design_colors['page_bg'],
'color' : design_colors['text'],
'size' : 50
}
},
labelPosition='bottom',
color = '#5ab4ac'
)
])
div0_2 = html.Div([
daq.ToggleSwitch(
id='toggle-switch-2',
value=False,
size=50,
label={
'label': 'Activate Filtering by Platform and Country',
'style': {
'backgroundColor': design_colors['page_bg'],
'color' : design_colors['text'],
'size' : 50
}
},
labelPosition='bottom',
color = '#5ab4ac'
)
])
#====================================================================================== HTML Divs
#-------------------------------------------------------------------- Sign-ups
div1_1 = html.Div([
dcc.DatePickerRange(
id='sign-ups-date-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format=date_format,
style={'display': 'none'}
)]
)
div1_2 = html.Div([
dcc.Graph(id='sign-ups-barplot-container')
],
style={'width': '28%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div1_3 = html.Div([
dcc.Graph(id='sign-ups-map-container')
],
style={'width': '55%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div1_4 = html.Div([
dcc.Graph(id='top-countries-container')
],
style={'width': '17%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
#-------------------------------------------------------------------- Retention
div2_1 = html.Div([
dcc.RadioItems(
id='platform_retention',
options=[
{'label': 'IOS', 'value': 'ios'},
{'label': 'Android', 'value': 'android'}
],
value='android',
# textfont = dict(color = 'red'),
labelStyle={'display': 'inline-block', 'color' : design_colors['text']},
style={'display': 'none'}
)
]
)
div2_2 = html.Div([
dcc.Graph(id='retention-heatmap-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div2_3 = html.Div([
dcc.Graph(id='retention-curve-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div2 = html.Div([
# html.Div([html.H1("Retention summary")], className="row gs-header gs-text-header", style={'float': 'center'}),
div2_1,
div2_2,
div2_3
],
style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': design_colors['page_bg'],
'padding': design_padding['level_1'],
'display': 'inline-block',
'width': '100%'}
)
#-------------------------------------------------------------------- Active users & by platform
div4_1_1 = html.Div([
dcc.DatePickerRange(
id='activity-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
)
]
)
div4_2 = html.Div([
dcc.Graph(id='activity-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div4_3 = html.Div([
dcc.Graph(id='activity-pie-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div4 = html.Div([
div4_1_1,
div4_2,
div4_3
],
style={
'backgroundColor': design_colors['page_bg'],
'padding': design_padding['level_1'],
'display': 'inline-block',
'width': '67%'}
)
#-------------------------------------------------------------------- Consumption Venn diagram
div7_1_1 = html.Div([
dcc.DatePickerRange(
id='venn-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
)]
)
div7_2 = html.Div([
dcc.Graph(id='consumption-Venn-container')
],
style={'width': '100%', 'display': 'inline-block'}
)
div7 = html.Div([
div7_1_1,
div7_2,
],
style={
'backgroundColor': design_colors['page_bg'],
'padding': '0px 5px 0px 0px',
'display': 'inline-block',
'width': '33%'}
)
#-------------------------------------------------------------------- Creation
div5_1_1 = html.Div([
dcc.DatePickerRange(
id='creation-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
)
])
div5_1_2 = html.Div([
dcc.RadioItems(
id='platform_creation',
options=[
{'label': 'iOS', 'value': 'ios'},
{'label': 'Android', 'value': 'android'},
{'label': 'Total', 'value': 'total'},
],
value='total',
labelStyle={'display': 'inline-block', 'color': design_colors['text']},
style={'display': 'none'}
)
])
div5_3 = html.Div([
dcc.Graph(id='creation_objects-container')
],
style={'width': '33.6%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
#-------------------------------------------------------------------- Consumption
div6_2 = html.Div([
dcc.Graph(id='consumption_objects-container')
],
style={'width': '33%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div6_3 = html.Div([
dcc.Graph(id='consumption_average_amount-container')
],
style={'width': '33%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
#------------------------------------------------------------------- Funnel
div8 = html.Div([
dcc.DatePickerRange(
id='old_date_picker_funnel_barplot',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=6),
display_format='MMM Do, YY',
style={'display': 'none'}
),
dcc.DatePickerRange(
id='new_date_picker_funnel_barplot',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
),
dcc.RadioItems(
id='platform_funnel_barplot',
options=[
{'label': 'Android', 'value': '1114'},
{'label': 'iOS', 'value': '1122'}
],
value='1122',
labelStyle={'display': 'inline-block', 'color': design_colors['text']},
style={'display': 'none'}
),
dcc.Dropdown(
id='country_funnel_barplot',
options=[{'label':opt, 'value':opt} for opt in drop_box],
value = drop_box[0],
style={'display': 'none'}
),
html.Div([
dcc.Graph(id='funnel-container_barplot', style={'height': 500})
],
style={'width': '100%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
],
style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': design_colors['page_bg'],
'padding': design_padding['level_1'],
'display': 'inline-block',
'width': '34%'}
)
# div3_1_1 = html.Div([
# dcc.DatePickerRange(
# id='funnel-picker-range',
# min_date_allowed=dt(2014, 1, 1),
# max_date_allowed=dt.now(),
# end_date=dt.now(),
# start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
# display_format='MMM Do, YY',
# style={'display': 'none'}
# )
# ]
# )
# div3_1_2 = html.Div([
# dcc.RadioItems(
# id='platform_funnel',
# options=[
# {'label': 'IOS', 'value': 'ios'},
# {'label': 'Android', 'value': 'android'}
# ],
# value='android',
# labelStyle={'display': 'inline-block', 'color': design_colors['text']},
# style={'display': 'none'}
# )
# ])
# div3_2 = html.Div([
# dcc.Graph(id='funnel-container')
# ],
# style={'width': '100%', 'display': 'inline-block', 'padding': design_padding['level_1']}
# )
# div3 = html.Div([
# div3_1_1,
# div3_1_2,
# div3_2,
# ],
# style={
# 'borderBottom': 'thin lightgrey solid',
# 'backgroundColor': design_colors['page_bg'],
# 'padding': design_padding['level_1'],
# 'display': 'inline-block',
# 'width': '50%'}
# )
#------------------------- Layout of the tables
div9_1 = html.Div([dcc.DatePickerRange(
id='table_new-date-picker',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt(2019, 4, 1),
start_date=dt(2019, 4, 1) - dateutil.relativedelta.relativedelta(weeks=1),
display_format='MMM Do, YY',
style={'display': 'none'}
),
dcc.RadioItems(
id='table_new_platform',
options=[
{'label': 'Android', 'value': '1114'},
{'label': 'iOS', 'value': '1122'}
],
value='1122',
labelStyle={'display': 'inline-block', 'color': 'white'},
style={'display': 'none'}
),
table_new
],
style = {'padding': design_padding['level_1'],
'width': '66%'
}
)
# div9_2 = html.Div([
# dcc.DatePickerRange(
# id='table_old-date-picker',
# min_date_allowed=dt(2014, 1, 1),
# max_date_allowed=dt.now(),
# end_date=dt(2019, 4, 1),
# start_date=dt(2019, 4, 1) - dateutil.relativedelta.relativedelta(weeks=1),
# display_format='MMM Do, YY',
# style={'display': 'none'}
# ),
# dcc.RadioItems(
# id='table_old_platform',
# options=[
# {'label': 'Android', 'value': '1114'},
# {'label': 'iOS', 'value': '1122'}
# ],
# value='1114',
# labelStyle={'display': 'inline-block', 'color': 'white'},
# style={'display': 'none'}
# ),
# table_old
# ],
# style={'padding': design_padding['level_1'],
# 'width': '50%'
# }
# )
div9 = html.Div([
div8,
div9_1,
# div9_2
],
style = {'backgroundColor': '#0e2e43',
'display': 'flex',
'flex-direction': 'row',
'padding': '0px 5px 0px 5px',
}
)
div_img = html.Div([
html.Div([
html.Div([
html.H5('Messenger')
],style={'size': title_size,
'color': design_colors['title'],
'text-align': "center"
}),
html.Img(src=app.get_asset_url('image_messenger.png'),
style={
'width': '100%'
})
], style={
'padding': design_padding['level_1'],
'width': '33.333%',
'display': 'inline-block'
}),
html.Div([
html.Div([
html.H5('Comments')
],style={'size': title_size,
'color': design_colors['title'],
'text-align': "center"
}),
html.Img(src=app.get_asset_url('image_comment.png'),
style={
'width': '100%'
})
], style={
'padding': design_padding['level_1'],
'width': '33.333%',
'display': 'inline-block'
}),
html.Div([
html.Div([
html.H5('Discussion')
],style={'size': title_size,
'color': design_colors['title'],
'text-align': "center"
}),
html.Img(src=app.get_asset_url('image_discussion.png'),
style={
'width': '100%'
})
], style={
'padding': design_padding['level_1'],
'width': '33.333%',
'display': 'inline-block'
})
])
#====================================================================================== Combining HTML Divs into the layout form
app.layout = html.Div([
div0_2,
div0_1,
div1_1,
div1_2,
div1_3,
div1_4,
div4,
div7,
div5_1_1,
div5_1_2,
div5_3,
div6_2,
div6_3,
div2_1,
div2_2,
div2_3,
# div3,
# div8,
# div_img_1,
# div_img_2,
# div_img_3,
div_img,
div9
],
style={'backgroundColor': '#0f2331'}
)
#====================================================================================== Callbacks
@app.callback(
dash.dependencies.Output(component_id='sign-ups-date-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='funnel-picker-range', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='activity-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='venn-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='creation-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='platform_retention', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='platform_funnel', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='platform_creation', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='new_date_picker_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='old_date_picker_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='platform_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='country_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='table_new-date-picker', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='table_old-date-picker', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='table_new_platform', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='table_old_platform', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
#-------------------------------------------------------------------- Sign-ups
#------------------- 1_2 Bar Plot
@app.callback(
dash.dependencies.Output('sign-ups-barplot-container', 'figure'),
[dash.dependencies.Input('sign-ups-date-picker-range', 'start_date'),
dash.dependencies.Input('sign-ups-date-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_barplot(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live == False:
signups = pd.read_csv(r'data\signups.csv')
signups.drop(columns=['Unnamed: 0'], inplace=True)
signups['Date'] = pd.to_datetime(signups['Date'])
else:
signups = pd.read_sql_query('EXEC DS_GetStatistics\''+start_date+'\',\''+end_date+'\'', cnxn)
signups['Date'] = pd.to_datetime(signups['Date'])
return {
'data': [
go.Bar(x =signups['Date'], y=signups['signups_android'], name ="Android",
marker = dict(color=design_colors['Android'])
),
go.Bar(x =signups['Date'], y=signups['signups_ios'], name ="iOS",
marker = dict(color=design_colors['iOS'])
),
go.Bar(x =signups['Date'], y=signups['signups_web'], name ="Web",
marker = dict(color=design_colors['Web'])
)
],
'layout' : {
'barmode': 'stack',
'paper_bgcolor': design_colors['chart_box_bg'],
'plot_bgcolor': design_colors['chart_bg'],
'xaxis': {
'showgrid': False,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines'],
'tickformat': '%b %d',
},
'yaxis': {
'showgrid': True,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines']
},
'margin': go.layout.Margin(
l=50,
r=50,
b=50,
t=50,
# pad=20
),
"title": '<b>Signups<b>',
'titlefont' : dict(
size=title_size,
color=design_colors['title']
),
'legend': dict(font=dict(color=design_colors['text']))
}
}
#------------------- 1_3 Map
@app.callback(
dash.dependencies.Output('sign-ups-map-container', 'figure'),
[dash.dependencies.Input('sign-ups-date-picker-range', 'start_date'),
dash.dependencies.Input('sign-ups-date-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_map(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live:
SignupsPerCountry = pd.read_sql_query('EXEC DS_GetCountryCodesForMap @RegisterStartDate = \''+start_date+'\', @RegisterEndDate = \''+end_date+'\'', cnxn)
else:
SignupsPerCountry = pd.read_csv(r'data\SignupsPerCountry.csv')
SignupsPerCountry.drop(columns=['Unnamed: 0'], inplace=True)
merged = | pd.merge(SignupsPerCountry, TrueCodes, left_on='CountryCode', right_on='rand', how='right') | pandas.merge |
import pandas as pd
import ast
# ==================================== #
# Movie Network Generator #
# ==================================== #
# Movie Features
# 순번 영화명 감독 제작사 수입사 배급사 개봉일 영화유형 영화형태
# 국적 전국스크린수 전국매출액 전국관객수 서울매출액 서울관객수 장르 등급 영화구분
# + 주연, 조연
def load_data():
movies_df = pd.read_excel("./data/dataset.xlsx")
movies = movies_df.as_matrix()
return movies
def load_dataset():
movie_actor_df = pd.read_csv('./data/movie_actor_utf16.csv', encoding='utf-16')
# movie_actor_df = pd.read_csv('./data/movie_actor_crawled.csv', encoding='utf-16')
dataset = []
main_actor_series = movie_actor_df['main_actor']
sub_actor_series = movie_actor_df['sub_actor']
# main_actor_series = movie_actor_df['lead_role']
# sub_actor_series = movie_actor_df['supp_role']
for i in range(movie_actor_df.shape[0]):
row = []
row.append(movie_actor_df.ix[i].values[0])
row.append(movie_actor_df.ix[i].values[1])
row.append(movie_actor_df.ix[i].values[2])
row.append(ast.literal_eval(main_actor_series[i]))
row.append(ast.literal_eval(sub_actor_series[i]))
dataset.append(row)
return dataset
def generate_actor_network(dataset, output_path):
movie_actor_df = pd.DataFrame(columns=['vertex1', 'vertex2'], index=None)
index = 0;
# for i in range(len(dataset)):
for i in range(100):
for j in range(len(dataset[i][3])):
for k in range(j+1, len(dataset[i][3])):
movie_actor_df.loc[index] = [dataset[i][3][j], dataset[i][3][k]]
index = index + 1
# movie_actor_df.to_csv("./out/network.csv", encoding='utf-16', index=False)
movie_actor_df.to_csv(output_path, index=False)
def generate_actor_genre_network(data, dataset, output_path):
movie_actor_genre_df = pd.DataFrame(columns=['vertex1', 'vertex2', 'vertex3'], index=None)
index = 0;
# for i in range(len(dataset)):
for i in range(100):
# handling 1 person 2 role
actors = set();
for j in range(len(dataset[i][3])):
actors.add(dataset[i][3][j])
actors_list = list(actors)
for j in range(len(actors_list)):
for k in range(j+1, len(actors_list)):
movie_actor_genre_df.loc[index] = [actors_list[j], actors_list[k], data[i][15]]
index = index + 1
# for j in range(len(dataset[i][3])):
# for k in range(j+1, len(dataset[i][3])):
# movie_actor_genre_df.loc[index] = [dataset[i][3][j], dataset[i][3][k], data[i][15]]
# index = index + 1
# movie_actor_df.to_csv("./out/network.csv", encoding='utf-16', index=False)
movie_actor_genre_df.to_csv(output_path, index=False)
################################################################
def generate_actor_genre_information(data, dataset, output_path):
movie_actor_genre_df = | pd.DataFrame(columns=['actor', 'genre'], index=None) | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests
from fake_useragent import UserAgent
import io
import os
import time
import json
import demjson
from datetime import datetime
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
url = {
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"philfed": "https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/",
"chicagofed": "https://www.chicagofed.org/~/media/publications/",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
def date_transform(df, format_origin, format_after):
return_list = []
for i in range(0, len(df)):
return_list.append(datetime.strptime(df[i], format_origin).strftime(format_after))
return return_list
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>omestic Product
Description: Billions of Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDP",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "GDP"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["GDP"] = df["GDP"].astype(float)
return df
def gdpc1_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDPC1",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def oecd_gdp_monthly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USALORSGPNOSTSAM",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def payems_monthly(startdate="1939-01-01", enddate="2021-01-01"):
"""
Full Name: All Employees, Total Nonfarm
Description: Thousands of Persons,Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PAYEMS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "Payems"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["Payems"] = df["Payems"].astype(float)
return df
def ppi():
tmp_url = url["fred_econ"] + "bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=PPIACO,PCUOMFGOMFG&scale=left,left&cosd=1913-01-01,1984-12-01&coed=2021-04-01,2021-04-01&line_color=%234572a7,%23aa4643&link_values=false,false&line_style=solid,solid&mark_type=none,none&mw=3,3&lw=2,2&ost=-99999,-99999&oet=99999,99999&mma=0,0&fml=a,a&fq=Monthly,Monthly&fam=avg,avg&fgst=lin,lin&fgsnd=2020-02-01,2020-02-01&line_index=1,2&transformation=lin,lin&vintage_date=2021-06-10,2021-06-10&revision_date=2021-06-10,2021-06-10&nd=1913-01-01,1984-12-01"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df["DATE"] = pd.to_datetime(df["DATE"], format="%Y-%m-%d")
#df = df[list(df.columns[1:])].replace(".", np.nan).astype(float)
name_list = {
"PPIACO": "Producer Price Index by Commodity: All Commodities",
"PCUOMFGOMFG": "Producer Price Index by Industry: Total Manufacturing Industries"
}
df.replace(".", np.nan, inplace = True)
df.columns = ["Date", "PPI_C", "PPI_I"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["PPI_C", "PPI_I"]] = df[["PPI_C", "PPI_I"]].astype(float)
return df
def pmi():
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
PMI_I = pd.DataFrame()
PMI_I["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_I["ISM_PMI_I"] = np.array(temp_df).astype(float)
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
PMI_NI = pd.DataFrame()
PMI_NI["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_NI["ISM_PMI_NI"] = np.array(temp_df).astype(float)
PMI = pd.merge_asof(PMI_I, PMI_NI, on = "Date")
return PMI
def unrate(startdate="1948-01-01", enddate="2021-01-01"):
"""
Full Name: Unemployment Rate: Aged 15-64: All Persons for the United States
Description: Percent, Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "UR_Monthly", "UR_Quarterly", "UR_Annually"]
return df
def erate(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Employment Rate: Aged 25-54: All Persons for the United States
Description: Percent,Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "ER_Monthly", "ER_Quarterly", "ER_Annually"]
def pce_monthly(startdate="1959-01-01", enddate="2021-01-01"):
"""
Full Name: PCE
Description: Percent, Monthly, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PCE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def cpi(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: Total All Items for the United States
Description: Percent, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USM661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USQ661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USA661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]] = df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]].astype(float)
return df
def m1(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM1NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_quarterly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = [
"Date",
"M1_Weekly",
"M1_Monthly",
"M1_Quarterly",
"M1_Annually"]
return df
def m2(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>
Description: Seasonally Adjusted, Weekly, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM2NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "M2SL",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df.columns = ["Date", "M2_Weekly", "M2_Monthly"]
return df
def m3(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "M3_Monthly", "M3_Quarterly", "M3_Annually"]
return df
def ltgby_10(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Long-Term Government Bond Yields: 10-year: Main (Including Benchmark) for the United States
Description: Percent,Not Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USM156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USQ156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USA156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "ltgby_Monthly", "ltgby_Quarterly", "ltgby_Annually"]
return df
def gdp_ipd(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Long-<NAME>: 10-year: Main (Including Benchmark) for the United States
Description: Percent,Not Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGDPDEFQISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGDPDEFAISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "gdp_ipd_Quarterly", "gdp_ipd_Annually"]
return df
def cci(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Opinion Surveys: Confidence Indicators: Composite Indicators: OECD Indicator for the United States
Description: Normalised (Normal=100), Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CSCICP03USM665S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "CCI_Monthly"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
return df
def bci(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Business confidence index OECD Indicator for the United States
Description: Normalised (Normal=100), Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "BSCICP03USM665S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "BCI_Annually"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
return df
def ibr_3(startdate="1965-01-01", enddate="2021-01-01"):
"""
Full Name: 3-Month or 90-day Rates and Yields: Interbank Rates for the United States
Description: Percent, Not Seasonally Adjusted, Monthly and Quarterly
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IR3TIB01USM156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IR3TIB01USQ156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_quarterly,
on="DATE",
direction="backward")
df.columns = ["Date", "ibr3_Monthly", "ibr3_Quarterly"]
def gfcf_3(startdate="1965-01-01", enddate="2021-01-01"):
"""
Full Name: Gross Fixed Capital Formation in United States
Description: United States Dollars,Not Seasonally Adjusted, Quarterly and Annually
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGFCFQDSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGFCFADSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_quarterly,
on="DATE",
direction="backward")
df.columns = ["Date", "ibr3_Monthly", "ibr3_Annually"]
return df
def pfce(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Private Final Consumption Expenditure in United States
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAPFCEQDSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAPFCEADSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "PFCE_Quarterly", "PFCE_Annually"]
return df
def tlp(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Early Estimate of Quarterly ULC Indicators: Total Labor Productivity for the United States
Description: Growth Rate Previous Period,Seasonally Adjusted, Quarterly and YoY
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "ULQELP01USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "ULQELP01USQ659S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "PFCE_Quarterly", "PFCE_Quarterly_YoY"]
return df
def rt(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name:Total Retail Trade in United States
Description: Monthly and Anually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USASARTMISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USASARTAISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "RT_Quarterly", "RT_Annually"]
return df
def bir(startdate="2003-01-01", enddate="2021-01-01"):
"""
Full Name:Total Retail Trade in United States
Description: Monthly and Anually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "T5YIE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_5y = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_5y["DATE"] = pd.to_datetime(df_5y["DATE"], format="%Y-%m-%d")
request_header = {"User-Agent": ua.random}
request_params = {
"id": "T10YIE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_10y = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_10y["DATE"] = pd.to_datetime(df_10y["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_5y, df_10y, on="DATE", direction="backward")
df.columns = ["Date", "BIR_5y", "BIR_10y"]
return df
def adsbci():
"""
An index designed to track real business conditions at high observation frequency
"""
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["philfed"] + "ads"
r = requests.get(tmp_url, headers=request_header)
file = open("ads_temp.xls", "wb")
file.write(r.content)
file.close()
df = pd.read_excel("ads_temp.xls")
df.columns = ["Date", "ADS_Index"]
df['Date'] = pd.to_datetime(df["Date"], format="%Y:%m:%d")
os.remove("ads_temp.xls")
return df
def pci():
"""
Tracks the degree of political disagreement among U.S. politicians at the federal level, Monthly
"""
df = pd.read_excel(
"https://www.philadelphiafed.org/-/media/frbp/assets/data-visualizations/partisan-conflict.xlsx")
df["Date"] = df["Year"].astype(str) + df["Month"]
df["Date"] = pd.to_datetime(df["Date"], format="%Y%B")
df = df.drop(["Year", "Month"], axis=1)
df = df[["Date", "Partisan Conflict"]]
return df
def inflation_nowcasting():
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = "https://www.clevelandfed.org/~/media/files/charting/%20nowcast_quarter.json"
r = requests.get(tmp_url, headers=request_header)
tmp_df = pd.DataFrame(demjson.decode(r.text))
df = pd.DataFrame()
for i in range(0, len(tmp_df)):
date = tmp_df['chart'][i]['subcaption'][:4] + "/" + \
pd.DataFrame(tmp_df["dataset"][i][0]['data'])['tooltext'].str.extract(r"\b(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])\b")[0] + "/" + \
pd.DataFrame(tmp_df["dataset"][i][0]['data'])['tooltext'].str.extract(r"\b(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])\b")[1]
CPI_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[0])["value"]
C_CPI_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[1])["value"]
PCE_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[2])["value"]
C_PCE_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[3])["value"]
A_CPI_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[4])["value"]
A_C_CPI_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[5])["value"]
A_PCE_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[6])["value"]
A_C_PCE_I = pd.DataFrame(
(pd.DataFrame(tmp_df["dataset"][i])['data'])[7])["value"]
tmp_df2 = pd.DataFrame({"Date": date,
"CPI_I": CPI_I,
"C_CPI_I": C_CPI_I,
"PCE_I": PCE_I,
"C_PCE_I": C_PCE_I,
"A_CPI_I": A_CPI_I,
"A_C_CPI_I": A_C_CPI_I,
"A_PCE_I": A_PCE_I,
"A_C_PCE_I": A_C_PCE_I})
df = pd.concat([df, tmp_df2], axis=0)
df.reset_index(drop=True, inplace=True)
df.replace('', np.nan, inplace=True)
return df
def bbki():
tmp_url = url["chicagofed"] + "bbki/bbki-monthly-data-series-csv.csv"
df = | pd.read_csv(tmp_url) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 05:13:39 2018
@author: IvanA
"""
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
#from click.exceptions import ClickException
import pandas as pd
import csv
import urllib
from urllib import parse
from urllib import request
from urllib import robotparser
from datetime import datetime
import time
import re
import lxml.html
from lxml.cssselect import CSSSelector
import sys
from pathlib import Path
def main():
# webdriver hay que cambiarlo para ajustarlo
try:
driver = webdriver.Chrome('C:\\Users\\ivana\\Documents\\Tools\\chromedriver.exe')
except:
print('Hay que indicar el path correcto del chromedriver.exe')
exit
else:
seed_url = 'http://www.aena.es'
user_agent='wswp'
url = 'http://www.aena.es/csee/Satellite/infovuelos/es/'
historicalcsv = 'informaciondevuelosacumulado.csv'
try:
dfhistorical_data = gethistorycsv(historicalcsv)
rp = get_robots(seed_url)
if rp.can_fetch(user_agent, url):
dfvuelos = obtain_links(url,driver)
dfnewdata = loop_through_links(rp, user_agent, dfvuelos, dfhistorical_data)
except:
print('Error en función main')
finally:
writerecordtocsv(dfnewdata,historicalcsv)
driver.close()
def writerecordtocsv(dfnewdata,historicalcsv):
"""Escribe los registros nuevos al archivo histórico"""
#comprueba si el df está vacío
if not dfnewdata.empty:
myfile = Path(historicalcsv)
if myfile.exists():
with open(historicalcsv,'a', newline='') as originalfile:
filewriter = csv.writer(originalfile)
#escribe al archivo histórico todos los registros del df
for ix, row in dfnewdata.iterrows():
filewriter.writerow(row)
else:
with open(historicalcsv,'w', newline='') as originalfile:
filewriter = csv.writer(originalfile)
#escribe al archivo histórico todos los registros del df
for ix, row in dfnewdata.iterrows():
filewriter.writerow(row)
def gethistorycsv(historicalcsv):
"""Lee el archivo histórico de los vuelos en un dataframe que será utilizado
más tarde para comprobar si la información de un vuelo ya se ha obtenido previamente"""
myfile = Path(historicalcsv)
if myfile.exists():
colnames = ['fecha','vuelo', 'horaplaneada','horareal']
dfhistoricalcsv = pd.read_csv(historicalcsv,header=None, names=colnames)
else:
dfhistoricalcsv = pd.DataFrame(columns=['fecha','vuelo','horaplaneada','horareal'])
dfhistoricalcsv.set_index(['fecha','vuelo'])
return dfhistoricalcsv
def obtain_links(url,driver):
"""Obtiene los links destino para hacer el data scrape"""
print('Abre página origen: ' , url)
driver.get(url)
elem = driver.find_element_by_xpath(
'.//input[@id="pagename"]'
'/preceding-sibling::input[@type="hidden"]')
driver.execute_script('''
var elem = arguments[0];
var value = arguments[1];
elem.value = value;
''', elem, 'L')
print('Rellena los campos para hacer el crawling al aeropuerto de Madrid')
#pone los parámetros para pasar a la página de vuelos a Madrid
driver.find_element_by_id('origin_ac').send_keys('MADRID-BARAJAS A<NAME> (MAD )')
driver.find_element_by_id('destiny_ac').clear()
driver.find_element_by_id('destiny_ac').send_keys('Escribe origen')
#Como hay dos botones de lupa se usa find_elements en vez de find_element
#y se elige el segundo botón lupa con [1]
driver.find_elements_by_class_name('btnLupa')[1].click()
dfvuelos = | pd.DataFrame(columns=['vuelo','paginaweb']) | pandas.DataFrame |
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv = pd.concat([self.cnv, cdx.cnv]).drop_duplicates()
sv = pd.concat([self.sv, cdx.sv]).drop_duplicates()
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def from_PETA(self,
token: str,
json_str: str,
host='https://peta.bgi.com/api'):
"""Retrieve CDx data from BGI-PETA database.
Args:
token (str): Effective token for BGI-PETA database
json_str (str): The json format restrictions communicating to the database
"""
self.json_str = json_str
peta = Peta(token=token, host=host)
peta.set_data_restriction_from_json_string(json_str)
# peta.fetch_clinical_data() does`not process dtype inference correctly, do manully.
#self.cli = peta.fetch_clinical_data()
self.cli = pd.read_csv(
StringIO(peta.fetch_clinical_data().to_csv(None, index=False)))
self.mut = peta.fetch_mutation_data()
self.cnv = peta.fetch_cnv_data()
self.sv = peta.fetch_sv_data()
# dedup for the same sampleId in different studyIds, discard the duplicated ones from all tables
cli_original = self.cli
self.cli = self.cli.drop_duplicates('sampleId')
if (len(self.cli) < len(cli_original)):
print('Duplicated sampleId exists, drop duplicates and go on')
undup_tuple = [(x, y)
for x, y in zip(self.cli.sampleId, self.cli.studyId)]
self.sv = self.sv[self.sv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.cnv = self.cnv[self.cnv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.mut = self.mut[self.mut.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
# time series
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
return filter_description(json_str)
def filter_description(self):
"""retrun filter description when data load from PETA
Returns:
str: description
"""
return filter_description(self.json_str) if self.json_str else None
def from_file(self,
mut_f: str = None,
cli_f: str = None,
cnv_f: str = None,
sv_f: str = None):
"""Get CDx data from files.
Args:
mut_f (str, optional): File as NCBI MAF format contains SNV and InDel. Defaults to None.
cli_f (str, optional): File name contains clinical info. Defaults to None.
cnv_f (str, optional): File name contains CNV info. Defaults to None.
sv_f (str, optional): File name contains SV info. Defaults to None.
"""
if not mut_f is None:
self.mut = pd.read_csv(mut_f, sep='\t')
if not cnv_f is None:
self.cnv = pd.read_csv(cnv_f, sep='\t')
if not sv_f is None:
self.sv = pd.read_csv(sv_f, sep='\t')
if not cli_f is None:
self.cli = pd.read_csv(cli_f, sep='\t')
else:
self._set_cli()
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
def to_tsvs(self, path: str = './'):
"""Write CDx_Data properties to 4 seprated files
Args:
path (str, optional): Path to write files. Defaults to './'.
"""
if not self.cli is None:
self.cli.to_csv(os.path.join(path, 'sample_info.txt'),
index=None,
sep='\t')
if not self.mut is None:
self.mut.to_csv(os.path.join(path, 'mut_info.txt'),
index=None,
sep='\t')
if not self.cnv is None:
self.cnv.to_csv(os.path.join(path, 'cnv_info.txt'),
index=None,
sep='\t')
if not self.sv is None:
self.sv.to_csv(os.path.join(path, 'fusion_info.txt'),
index=None,
sep='\t')
def to_excel(self, filename: str = './output.xlsx'):
"""Write CDx_Data properties to excel file
Args:
filename (str, optional): target filename. Defaults to './output.xlsx'.
"""
if not filename.endswith('xlsx'):
filename = filename + '.xlsx'
with pd.ExcelWriter(filename) as ew:
if not self.cli is None:
self.cli.to_excel(ew, sheet_name='clinical', index=None)
if not self.mut is None:
self.mut.to_excel(ew, sheet_name='mutations', index=None)
if not self.cnv is None:
self.cnv.to_excel(ew, sheet_name='cnv', index=None)
if not self.sv is None:
self.sv.to_excel(ew, sheet_name='sv', index=None)
def _set_cli(self):
"""Set the cli attribute, generate a void DataFrame when it is not specified.
"""
sample_id_series = []
if not self.mut is None:
sample_id_series.append(
self.mut['Tumor_Sample_Barcode'].drop_duplicates())
if not self.cnv is None:
sample_id_series.append(
self.cnv['Tumor_Sample_Barcode'].drop_duplicates())
if not self.sv is None:
sample_id_series.append(
self.sv['Tumor_Sample_Barcode'].drop_duplicates())
if len(sample_id_series) > 0:
self.cli = pd.DataFrame({
'sampleId': pd.concat(sample_id_series)
}).drop_duplicates()
else:
self.cli = None
def _infer_datetime_columns(self) -> pd.DataFrame:
"""To infer the datetime_columns and astype to datetime64 format
Returns:
pd.DataFrame: CDx.cli dataframe
"""
cli = self.cli
for column in cli.columns:
if column.endswith('DATE'):
try:
cli[column] = pd.to_datetime(cli[column])
except Exception as e:
raise DatetimeFormatError(
f'{column} column end with "DATE" can not be transformed to datetime format'
)
return cli
def get_crosstab(self) -> pd.DataFrame:
"""Generate a Gene vs. Sample_id cross table.
Raises:
SampleIdError: Sample id from the mut, cnv or sv which not exsits in the cli table.
Returns:
pd.DataFrame: CDx_Data.
"""
# 这里cli表中不允许存在相同的样本编号。会造成crosstab的列中存在重复,引入Series的boolen值无法处理的问题
if (self.cli is None) or (len(self.cli) == 0):
return pd.DataFrame([])
sub_dfs = []
# cli
cli_crosstab = self.cli.copy().set_index('sampleId').T
cli_crosstab['track_type'] = 'CLINICAL'
sub_dfs.append(cli_crosstab)
# mut. represent by cHgvs, joined by '|' for mulitple hit
if (not self.mut is None) and (len(self.mut) != 0):
mut_undup = self.mut[[
'Hugo_Symbol', 'Tumor_Sample_Barcode', 'HGVSp_Short'
]].groupby([
'Hugo_Symbol', 'Tumor_Sample_Barcode'
])['HGVSp_Short'].apply(lambda x: '|'.join(x)).reset_index()
mut_crosstab = mut_undup.pivot('Hugo_Symbol',
'Tumor_Sample_Barcode',
'HGVSp_Short')
mut_crosstab['track_type'] = 'MUTATIONS'
sub_dfs.append(mut_crosstab)
# cnv. represent by gain or loss. at first use the virtual column "copy_Num"
if (not self.cnv is None) and (len(self.cnv) != 0):
cnv_undup = self.cnv[[
'Hugo_Symbol', 'Tumor_Sample_Barcode', 'status'
]].groupby([
'Hugo_Symbol', 'Tumor_Sample_Barcode'
])['status'].apply(lambda x: '|'.join(x)).reset_index()
cnv_crosstab = cnv_undup.pivot('Hugo_Symbol',
'Tumor_Sample_Barcode', 'status')
cnv_crosstab['track_type'] = 'CNV'
sub_dfs.append(cnv_crosstab)
# sv. represent by gene1 and gene2 combination. explode one record into 2 lines.
if (not self.sv is None) and (len(self.sv) != 0):
sv_undup = pd.concat([
self.sv,
self.sv.rename(columns={
'gene1': 'gene2',
'gene2': 'gene1'
})
])[['gene1', 'Tumor_Sample_Barcode', 'gene2']].groupby([
'gene1', 'Tumor_Sample_Barcode'
])['gene2'].apply(lambda x: '|'.join(x)).reset_index()
sv_crosstab = sv_undup.pivot('gene1', 'Tumor_Sample_Barcode',
'gene2')
sv_crosstab['track_type'] = 'FUSION'
sub_dfs.append(sv_crosstab)
# pandas does not support reindex with duplicated index, so turn into multiIndex
crosstab = pd.concat(sub_dfs)
crosstab = crosstab.set_index('track_type', append=True)
crosstab = crosstab.swaplevel()
return crosstab
#如何构建通用的选择接口,通过变异、基因、癌种等进行选择,并支持“或”和“且”的逻辑运算
#该接口至关重要,对变异入选条件的选择会影响到crosstab,
#选择后返回一个新的CDX_Data对象
def select(self, conditions: dict = {}, update=True):
"""A universe interface to select data via different conditions.
Args:
conditions (dict, optional): Each key represent one column`s name of the CDx_Data attributes. Defaults to {}.
update (bool, optional): [description]. Defaults to True.
"""
return self
# 数据选择的辅助函数
def _numeric_selector(self, ser: pd.Series, range: str) -> pd.Series:
"""Compute a comparition expression on a numeric Series
Args:
ser (pd.Series): Numeric Series.
range (str): comparition expression like 'x>5'. 'x' is mandatory and represent the input.
Raises:
NotNumericSeriesError: Input Series`s dtype is not a numeric type.
Returns:
pd.Series: Series with boolean values.
"""
if ser.dtype == 'object':
raise NotNumericSeriesError(f'{ser.name} is not numeric')
#return ser.map(lambda x: eval(re.sub(r'x', str(x), range)))
return eval(re.sub(r'x', 'ser', range))
def _catagory_selector(self, ser: pd.Series, range: list) -> pd.Series:
"""Return True if the Series` value in the input range list.
Args:
ser (pd.Series): Catagory Series.
range (list): List of target options.
Returns:
pd.Series: Series with boolean values
"""
return ser.isin(range)
def _selector(self, df: pd.DataFrame, selections: dict) -> pd.DataFrame:
"""Filter the input DataFrame via the dict of conditions.
Args:
df (pd.DataFrame): Input.
selections (dict): Dict format of conditions like "{'Cancer_type':['lung','CRC'],'Age':'x>5'}".
The keys represent a column in the input DataFrame.
The list values represent a catagory target and str values represent a numeric target.
Raises:
NotInColumnError: Key in the dict is not in the df`s columns.
UnknowSelectionTypeError: The type of value in the dict is not str nor list.
Returns:
pd.DataFrame: Filterd DataFrame
"""
columns = df.columns
for key, value in selections.items():
if key not in columns:
raise NotInColumnError(f'{key} is not in the columns')
if isinstance(value, str):
df = df[self._numeric_selector(df[key], value)]
elif isinstance(value, list):
df = df[self._catagory_selector(df[key], value)]
else:
raise UnknowSelectionTypeError(
f'{selections} have values not str nor list')
return df
def _fuzzy_id(self, regex: re.Pattern, text: str) -> str:
"""transform a sample id into fuzzy mode according the regex pattern
Args:
regex (re.Pattern): The info retains are in the capture patterns
text (str): input sample id
Returns:
str: fuzzy mode sample id
"""
matches = regex.findall(text)
if matches:
text = '_'.join(matches[0])
return text
def select_by_sample_ids(self,
sample_ids: list,
fuzzy: bool = False,
regex_str: str = r'(\d+)[A-Z](\d+)',
study_ids: list = []):
"""Select samples via a list of sample IDs.
Args:
sample_ids (list): sample ids list.
fuzzy (bool): fuzzy mode.
regex_str (str): The match principle for fuzzy match. The info in the regex capture patterns must be matched for a certifired record. Default for r'(\d+)[A-Z](\d+)'.
study_ids: (list): The corresponding study id of each sample ids. Length of sample_ids and study_ids must be the same.
Raises:
ListsUnEqualLengthError: Length of sample_ids and study_ids are not equal.
Returns:
CDx: CDx object of selected samples.
"""
if fuzzy:
regex = re.compile(regex_str)
# fuzzy the input ids
target_ids = []
fuzzy_to_origin = defaultdict(list)
transform = lambda x: self._fuzzy_id(regex, x)
for sample_id in sample_ids:
fuzzy_sample_id = self._fuzzy_id(regex, sample_id)
fuzzy_to_origin[fuzzy_sample_id].append(sample_id)
target_ids.append(fuzzy_sample_id)
else:
target_ids = sample_ids
transform = lambda x: x
# match
sample_id_bool = self.cli['sampleId'].map(transform).isin(target_ids)
# no match, return immediately
if not sample_id_bool.any():
return CDx_Data()
# with study ids
if len(study_ids):
if len(study_ids) != len(sample_ids):
raise ListsUnEqualLengthError('Error')
sub_cli_df = self.cli[sample_id_bool]
study_id_bool = sub_cli_df.apply(
lambda x: x['studyId'] == study_ids[target_ids.index(
transform(x['sampleId']))],
axis=1)
sample_id_bool = sample_id_bool & study_id_bool
# construct new CDx_Data object
# CDx_Data always have a cli
cli_df = self.cli[sample_id_bool].copy()
# add a column of query ids for fuzzy match
# multi hit represent as a string
if fuzzy:
cli_df['queryId'] = cli_df['sampleId'].map(
lambda x: ','.join(fuzzy_to_origin[transform(x)])).copy()
if not self.mut is None and len(self.mut) != 0:
mut_df = self.mut[self.mut['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
mut_df = None
if not self.cnv is None and len(self.cnv) != 0:
cnv_df = self.cnv[self.cnv['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
cnv_df = None
if not self.sv is None and len(self.sv) != 0:
sv_df = self.sv[self.sv['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
sv_df = None
return CDx_Data(cli_df=cli_df,
mut_df=mut_df,
cnv_df=cnv_df,
sv_df=sv_df)
#
def set_mut_eligibility(self, **kwargs):
"""Set threshold for SNV/InDels to regrard as a positive sample
Raises:
VariantUndefinedError: mut info not provided by user.
Returns:
CDx_Data: CDx_Data object
"""
if self.mut is None or len(self.mut) == 0:
mut = None
else:
mut = self._selector(self.mut, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=mut,
cnv_df=self.cnv,
sv_df=self.sv)
def set_cnv_eligibility(self, **kwargs):
"""Set threshold for CNV to regrard as a positive sample.
Raises:
VariantUndefinedError: cnv info not provided by user.
Returns:
CDx_Data: CDx_Data object.
"""
if self.cnv is None or len(self.cnv) == 0:
cnv = None
else:
cnv = self._selector(self.cnv, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=self.mut,
cnv_df=cnv,
sv_df=self.sv)
def set_sv_eligibility(self, **kwargs):
"""Set threshold for SV to regrard as a positive sample.
Raises:
VariantUndefinedError: SV info not provided by user.
Returns:
CDx_Data: CDx_Data object.
"""
if self.sv is None or len(self.sv) == 0:
sv = None
else:
sv = self._selector(self.sv, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=self.mut,
cnv_df=self.cnv,
sv_df=sv)
# 指定一个列名,再指定范围。离散型用数组,数值型
# attrdict={'Cancer_type':['lung','CRC'],'Age':'x>5'}
def select_samples_by_clinical_attributes2(self, attr_dict: dict):
"""Select samples via a set of conditions corresponding to the columns in the cli DataFrame.
Args:
attr_dict (dict): Dict format of conditions like "{'Cancer_type':['lung','CRC'],'Age':'x>5'}".
The keys represent a column in the input DataFrame.
The list values represent a catagory target and str values represent a numeric target.
Returns:
CDx: CDx object of selected samples.
"""
cli_df = self._selector(self.cli, attr_dict)
return self.select_by_sample_ids(cli_df['sampleId'])
def select_samples_by_clinical_attributes(self, **kwargs):
"""Select samples via a set of conditions corresponding to the columns in the cli DataFrame.
Args:
Keywords arguments with each key represent a column in the input DataFrame.
like "Cancer_type=['lung','CRC'], Age='x>5'"
The list values represent a catagory target and str values represent a numeric target.
Returns:
CDx: CDx object of selected samples.
"""
cli_df = self._selector(self.cli, kwargs)
return self.select_by_sample_ids(cli_df['sampleId'])
def select_samples_by_date_attributes(
self,
column_name: str = 'SAMPLE_RECEIVED_DATE',
start='',
end: str = '',
days: int = 0,
period: str = '',
):
"""Select samples using a datetime attribute in the cli dataframe
Args:
column_name (str, optional): Column used in the cli dataframe. Defaults to 'SAMPLE_RECEIVED_DATE'.
from (str, optional): Time start point. Defaults to ''.
to (str, optional): Time end point. Defaults to ''.
days (int, optional): Days lasts. Defaults to ''.
exact (str, optional): Exact range,eg '202005' for May in 2020 or '2021' for the whole year. Defaults to ''.
"""
date_ser = self.cli.set_index(column_name)['sampleId']
if period:
cdx = self.select_by_sample_ids(date_ser[period])
elif start and end:
cdx = self.select_by_sample_ids(date_ser[start:end])
elif start and days:
cdx = self.select_by_sample_ids(date_ser[start:(
pd.to_datetime(start) +
pd.to_timedelta(days, 'D')).strftime("%Y-%m-%d")])
elif end and days:
cdx = self.select_by_sample_ids(date_ser[(
pd.to_datetime(end) -
pd.to_timedelta(days, 'D')).strftime("%Y-%m-%d"):end])
return cdx
# 对阳性样本进行选取。基因组合,且或关系,chgvs和ghgvs,基因系列如MMR、HR等
# 基因组合可以做为入参数组来传入
def select_samples_by_mutate_genes(
self,
genes: list = [],
variant_type: list = ['MUTATIONS', 'CNV', 'FUSION'],
how='or'):
"""Select sample via positve variant genes.
Args:
genes (list): Gene Hugo names. Defaults to [] for all mutated genes
variant_type (list, optional): Combination of MUTATIONS, CNV and SV. Defaults to ['MUTATIONS', 'CNV', 'SV'].
how (str, optional): 'and' for variant in all genes, 'or' for variant in either genes. Defaults to 'or'.
Raises:
GenesRelationError: Value of how is not 'and' nor 'or'.
Returns:
CDx: CDx object of selected samples.
"""
variant_crosstab = self.crosstab.reindex(index=variant_type, level=0)
if len(genes) != 0:
variant_crosstab = variant_crosstab.reindex(index=genes, level=1)
# Certain variant_types or genes get a empty table. all.() bug
if len(variant_crosstab) == 0:
return CDx_Data()
gene_num = len(
pd.DataFrame(list(
variant_crosstab.index)).iloc[:, 1].drop_duplicates())
if how == 'or':
is_posi_sample = variant_crosstab.apply(
lambda x: any(pd.notnull(x)))
elif how == 'and':
# reindex multiindex bug
if len(genes) != 0 and len(genes) != gene_num:
return CDx_Data()
is_posi_sample = variant_crosstab.apply(
lambda x: all(pd.notnull(x)))
else:
raise GenesRelationError(
f'value of "how" must be "or" or "and", here comes "{how}"')
# the last column is "track_type"
sample_ids = is_posi_sample[is_posi_sample].index
return self.select_by_sample_ids(sample_ids)
# Analysis
def tableone(self, **kwargs) -> TableOne:
"""Generate summary table1 using tableone library. Please refer to https://github.com/tompollard/tableone
Args:
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
min_max: list, optional
List of variables that should report minimum and maximum, instead of
standard deviation (for normal) or Q1-Q3 (for non-normal).
pval : bool, optional
Display computed P-Values (default: False).
pval_adjust : str, optional
Method used to adjust P-Values for multiple testing.
The P-values from the unadjusted table (default when pval=True)
are adjusted to account for the number of total tests that were performed.
These adjustments would be useful when many variables are being screened
to assess if their distribution varies by the variable in the groupby argument.
For a complete list of methods, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
htest_name : bool, optional
Display a column with the names of hypothesis tests (default: False).
htest : dict, optional
Dictionary of custom hypothesis tests. Keys are variable names and
values are functions. Functions must take a list of Numpy Arrays as
the input argument and must return a test result.
e.g. htest = {'age': myfunc}
missing : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
rename : dict, optional
Dictionary of alternative names for variables.
e.g. `rename = {'sex':'gender', 'trt':'treatment'}`
sort : bool or str, optional
If `True`, sort the variables alphabetically. If a string
(e.g. `'P-Value'`), sort by the specified column in ascending order.
Default (`False`) retains the sequence specified in the `columns`
argument. Currently the only columns supported are: `'Missing'`,
`'P-Value'`, `'P-Value (adjusted)'`, and `'Test'`.
limit : int or dict, optional
Limit to the top N most frequent categories. If int, apply to all
categorical variables. If dict, apply to the key (e.g. {'sex': 1}).
order : dict, optional
Specify an order for categorical variables. Key is the variable, value
is a list of values in order. {e.g. 'sex': ['f', 'm', 'other']}
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: True).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`)
applies the rule per variable, defaulting to 1 place for unspecified
variables. For continuous variables, applies to all summary statistics
(e.g. mean and standard deviation). For categorical variables, applies
to percentage only.
overall : bool, optional
If True, add an "overall" column to the table. Smd and p-value
calculations are performed only using stratified columns.
display_all : bool, optional
If True, set pd. display_options to display all columns and rows.
(default: False)
dip_test : bool, optional
Run Hartigan's Dip Test for multimodality. If variables are found to
have multimodal distributions, a remark will be added below the Table 1.
(default: False)
normal_test : bool, optional
Test the null hypothesis that a sample come from a normal distribution.
Uses scipy.stats.normaltest. If variables are found to have non-normal
distributions, a remark will be added below the Table 1.
(default: False)
tukey_test : bool, optional
Run Tukey's test for far outliers. If variables are found to
have far outliers, a remark will be added below the Table 1.
(default: False)
Returns:
pd.DataFrame: Summary of the Data
"""
table1 = TableOne(self.cli, **kwargs)
return table1
def pathway(self):
pass
def pinpoint(self):
pass
def oncoprint(self):
pass
def survival(self):
pass
def plot_gene_variant_rate(self, genes=genes_688):
freq_mut_s = self.test_positive_rate(
genes_to_observe=genes,
groupby_genes=True,
variant_type_to_observe=['MUTATIONS']) * 100
freq_cnv_s = self.test_positive_rate(genes_to_observe=genes,
groupby_genes=True,
variant_type_to_observe=['CNV'
]) * 100
freq_sv_s = self.test_positive_rate(genes_to_observe=genes,
groupby_genes=True,
variant_type_to_observe=['FUSION'
]) * 100
#判定是否三种变异类型是0并处理
avalible_s = []
variantlist = [freq_mut_s, freq_cnv_s, freq_sv_s]
for i, x in enumerate(variantlist):
if len(x) != 0:
avalible_s.append(x)
if len(avalible_s) == 0:
return 'no data'
if len(freq_mut_s) == 0:
freq_mut_s = pd.Series([0] * len(avalible_s[0]),
index=avalible_s[0].index)
if len(freq_cnv_s) == 0:
freq_cnv_s = pd.Series([0] * len(avalible_s[0]),
index=avalible_s[0].index)
if len(freq_sv_s) == 0:
freq_sv_s = pd.Series([0] * len(avalible_s[0]),
index=avalible_s[0].index)
freq_s = pd.DataFrame({
'Mutations': freq_mut_s,
'CNV': freq_cnv_s,
'SV': freq_sv_s
}).fillna(0)
freq_s['total'] = freq_s.sum(axis=1)
freq_s = freq_s.sort_values(by='total', ascending=False)
fig = px.bar(
freq_s,
x=freq_s.index,
y=['Mutations', 'CNV', 'SV'],
)
#fig.update_traces(texttemplate='%{text:.2%}', textposition='outside',)
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.layout.xaxis.title.text = None
fig.layout.yaxis.title.text = '检出率(%)'
fig.layout.legend.title.text = None
return fig
# 画图的程序是否内置?
def test_positive_rate(
self,
groupby='',
groupby_genes=False,
groupby_variant_type=False,
genes_to_observe=[],
variant_type_to_observe=['MUTATIONS', 'CNV', 'FUSION']):
"""Calculate the positvie rate for CDx object in user defined way
Args:
groupby (str, optional): Column name in the CDx_Data.cli DataFrame. Defaults to ''.
groupby_genes (bool, optional): Groupby mutate genes. Defaults to False.
groupby_variant_type (bool, optional): Groupby variant type, including MUTATIONS, CNV and SV. Defaults to False.
genes_to_observe (list, optional): Genes list that should be considered. Defaults to [].
variant_type_to_observe (list, optional): Variant type that shoud be considered. Defaults to ['MUTATIONS','CNV','SV'].
Returns:
Union[float,pd.Series]: A pd.Series when groupby options passed, a float value when not.
"""
# empty CDx
if len(self) == 0:
return | pd.Series([], dtype='float64') | pandas.Series |
# Type: module
# String form: <module 'WindPy' from '/opt/conda/lib/python3.6/WindPy.py'>
# File: /opt/conda/lib/python3.6/WindPy.py
# Source:
from ctypes import *
import threading
import traceback
from datetime import datetime, date, time, timedelta
import time as t
import re
from WindData import *
from WindBktData import *
from XMLParser import XMLReader
import pandas as pd
import logging
import getpass
r = XMLReader("/wind/serverapi/wsq_decode.xml")
# import speedtcpclient as client
expolib = None
speedlib = None
TDB_lib = None
c_lib = None
# For test use! Should be replaced with a real userID
# userID = "1214779"
api_retry = 1
interval = 2
userName = getpass.getuser()
authDataPath = "/home/" + userName + "/.wind/authData"
authString = readFile(authDataPath)
# userID = str(getJsonTag(authString, 'accountID'))
# if userID == '':
# userID = "1214779"
wind_log_path = "/usr/local/log/"
def DemoWSQCallback(out):
print("DemoWSQCallback")
print(out)
wsq_items = []
def g_wsq_callback(reqID, indata):
out = WindData()
out.set(indata, 3)
out.RequestID = reqID
id2rtField = {}
for item in wsq_items:
id2rtField[item['id']] = item['funname'].upper()
tmp = [id2rtField[str(val)] for val in out.Fields]
out.Fields = tmp
out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S')
try:
g_wsq_callback.callback_funcs[reqID](out)
except:
print(out)
SPDCBTYPE = CFUNCTYPE(None, c_int, POINTER(c_apiout))
spdcb = SPDCBTYPE(g_wsq_callback)
g_wsq_callback.callback_funcs = {}
REQUEST_ID_CANCELALL = 0
REQUEST_ID_SYNC = 1
REQUEST_ID_MAX_RESQUEST = 9999
REQUEST_ID_MIN_RESQUEST = 3
g_requestID = REQUEST_ID_MIN_RESQUEST # The minimum id of NONE BLOCKING MODE
def retry(func):
def wrapper(*args, **kargs):
out = func(*args, **kargs)
if not out:
return out
error_code = type_check(out)
if error_code == -10:
for i in range(api_retry):
out = func(*args, **kargs)
error_code = type_check(out)
if error_code != -10:
break
return out
# 判断out类型,若带usedf参数则为tuple
def type_check(out):
if isinstance(out, tuple):
error_code = out[0]
else:
error_code = out.ErrorCode
return error_code
return wrapper
class WindQnt:
b_start = False
def __static_var(var_name, inital_value):
def _set_var(obj):
setattr(obj, var_name, inital_value)
return obj
return _set_var
def __stringify(arg):
if arg is None:
tmp = [""]
elif arg == "":
tmp = [""]
elif isinstance(arg, str):
a_l = arg.strip().split(',')
arg = ','.join([a.strip() for a in a_l])
tmp = [arg]
elif isinstance(arg, list):
tmp = [str(x) for x in arg]
elif isinstance(arg, tuple):
tmp = [str(x) for x in arg]
elif isinstance(arg, float) or isinstance(arg, int):
tmp = [str(arg)]
elif str(type(arg)) == "<type 'unicode'>":
tmp = [arg]
else:
tmp = None
if tmp is None:
return None
else:
return ";".join(tmp)
def __parseoptions(self, arga=None, argb=None):
options = WindQnt._WindQnt__stringify(self)
if options is None:
return None
if isinstance(arga, tuple):
for i in range(len(arga)):
v = WindQnt._WindQnt__stringify(arga[i])
if v is None:
continue
else:
if options == "":
options = v
else:
options = options + ";" + v
if isinstance(argb, dict):
keys = argb.keys()
for key in keys:
v = WindQnt._WindQnt__stringify(argb[key])
if v is None:
continue
else:
if options == "":
options = str(key) + "=" + v
else:
options = options + ";" + str(key) + "=" + v
return options
@staticmethod
def format_option(options):
if options is None:
return None
option_f = options.replace(';', '&&')
return option_f
# with_time param means you can format hours:minutes:seconds, but not must be
def __parsedate(self, with_time=False):
d = self
if d is None:
d = datetime.today().strftime("%Y-%m-%d")
return d
elif isinstance(d, date):
d = d.strftime("%Y-%m-%d")
return d
elif isinstance(d, datetime):
d = d.strftime("%Y-%m-%d")
return d
elif isinstance(d, str):
try:
d = pure_num = ''.join(list(filter(str.isdigit, d)))
if len(d) != 8 and len(d) != 14:
return None
if len(pure_num) == 14:
d = pure_num[:8] + ' ' + pure_num[8:]
if int(d[9:11]) > 24 or int(d[9:11]) < 0 or \
int(d[11:13]) > 60 or int(d[11:13]) < 0 or \
int(d[13:15]) > 60 or int(d[13:15]) < 0:
return None
if int(d[:4]) < 1000 or int(d[:4]) > 9999 or \
int(d[4:6]) < 1 or int(d[4:6]) > 12 or \
int(d[6:8]) < 1 or int(d[6:8]) > 31:
return None
date_time = d.split(' ')
YMD = date_time[0][:4] + '-' + date_time[0][4:6] + '-' + date_time[0][6:8]
HMS = ''
if with_time and len(date_time) == 2:
HMS = ' ' + date_time[1][:2] + ':' + date_time[1][2:4] + ':' + date_time[1][4:6]
d = YMD + HMS
return d
except:
return None
return d
# def __parsedate(d):
# if d is None:
# d = datetime.today().strftime("%Y-%m-%d")
# return d
# elif isinstance(d, date):
# d = d.strftime("%Y-%m-%d")
# return d
# elif isinstance(d, str):
# try:
# #Try to get datetime object from the user input string.
# #We will go to the except block, given an invalid format.
# if re.match(r'^(?:(?!0000)[0-9]{4}-(?:(?:0[1-9]|1[0-2])-(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])-(?:29|30)|(?:0[13578]|1[02])-31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)-02-29)$',d, re.I|re.M):
# d = datetime.strptime(d, "%Y-%m-%d")
# return d.strftime("%Y-%m-%d")
# elif re.match(r'^(?:(?!0000)[0-9]{4}(?:(?:0[1-9]|1[0-2])(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])(?:29|30)|(?:0[13578]|1[02])31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)0229)$', d, re.I|re.M):
# d = datetime.strptime(d, "%Y%m%d")
# return d.strftime("%Y-%m-%d")
# else:
# return None
# except:
# return None
# else:
# return None
#
# return d
def use_debug_file(self, debug_expo='/wind/serverapi/libExpoWrapperDebug.so',
debug_speed='/wind/serverapi/libSpeedWrapperDebug.so'):
WindQnt.debug_expo = debug_expo
WindQnt.debug_speed = debug_speed
@staticmethod
def format_wind_data(error_codes, msg):
out = WindData()
out.ErrorCode = error_codes
out.Codes = ['ErrorReport']
out.Fields = ['OUT MESSAGE']
out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S')
out.Data = [[msg]]
return out
@staticmethod
def to_dataframe(out):
if out.ErrorCode != 0:
return pd.DataFrame([out.ErrorCode], columns=['ErrorCode'])
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
return df.T.infer_objects()
def isconnected(self):
return 0
class __start:
def __init__(self):
self.restype = c_int32
self.argtypes = [c_wchar_p, c_wchar_p, c_int32]
self.lastCall = 0
def __call__(self, show_welcome=True, retry=1):
global expolib
global speedlib
global TDB_lib
global c_lib
global api_retry
if t.time() - self.lastCall > interval:
if WindQnt.b_start:
return
WindQnt.b_start = True
self.lastCall = t.time()
TDB_lib = CDLL("/wind/serverapi/libtdb.so")
c_lib = CDLL("/wind/serverapi/libtradeapi.so")
c_lib.tLogon.restype = POINTER(c_variant)
c_lib.tQuery.restype = POINTER(c_variant)
c_lib.tLogout.restype = POINTER(c_variant)
c_lib.tSendOrder.restype = POINTER(c_variant)
c_lib.tCancelOrder.restype = POINTER(c_variant)
if hasattr(WindQnt, "debug_expo"):
expolib = CDLL(WindQnt.debug_expo)
else:
expolib = CDLL("/wind/serverapi/libExpoWrapper.so")
expolib.SendMsg2Expo.restype = POINTER(c_apiout)
if hasattr(WindQnt, "debug_speed"):
speedlib = CDLL(WindQnt.debug_speed)
else:
speedlib = CDLL("/wind/serverapi/libSpeedWrapper.so")
speedlib.SendMsg2SpeedAsyc.restype = POINTER(c_apiout)
api_retry = int(retry) if int(retry) < 6 else 5
if show_welcome:
print("COPYRIGHT (C) 2017 Wind Information Co., Ltd. ALL RIGHTS RESERVED.\n"
"IN NO CIRCUMSTANCE SHALL WIND BE RESPONSIBLE FOR ANY DAMAGES OR LOSSES\n"
"CAUSED BY USING WIND QUANT API FOR PYTHON.")
return
else:
# print ("wait a while to start!")
return ERR_WAIT
def __str__(self):
return ("Start the Wind Quant API")
start = __start()
class __wses:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p,c_wchar_p,c_wchar_p,c_wchar_p,c_wchar_p]
self.lastCall = 0
@retry
def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, *arga, **argb):
# write_log('call wsd')
s = int(t.time()*1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
if isinstance(endTime, str):
# 判断是否为日期宏,若不是,则调用parsedate方法
endTime_compile = re.findall('\d\d\d\d\d\d\d\d', endTime.replace('-', ''))
if endTime_compile:
endTime = WindQnt._WindQnt__parsedate(endTime)
else:
# 处理datetime类型日期
endTime = WindQnt._WindQnt__parsedate(endTime)
if endTime == None:
print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
return
if isinstance(beginTime, str):
beginTime_compile = re.findall('\d\d\d\d\d\d\d\d', beginTime.replace('-', ''))
if beginTime_compile:
beginTime = WindQnt._WindQnt__parsedate(beginTime)
else:
beginTime = WindQnt._WindQnt__parsedate(beginTime)
if beginTime == None:
print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01")
return
if(endTime==None): endTime = datetime.today().strftime("%Y-%m-%d")
if(beginTime==None): beginTime = endTime
# chech if the endTime is before than the beginTime
# endD = datetime.strptime(endTime, "%Y-%m-%d")
# beginD = datetime.strptime(beginTime, "%Y-%m-%d")
# if (endD-beginD).days < 0:
# print("The endTime should be later than or equal to the beginTime!")
# return
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if codes == None or fields == None or options == None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wses|"+codes+"|"+fields+"|"+beginTime+"|"+endTime+"|"+options+"|"+userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time()*1000)
write_log(str(e-s) + ' call wses')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate = True)
if 'usedf' in argb.keys():
usedf = argb['usedf']
if usedf:
if not isinstance(usedf, bool):
print('the sixth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
col = out.Times
if len(out.Codes) == len(out.Fields) == 1:
idx = out.Fields
elif len(out.Codes) > 1 and len(out.Fields) == 1:
idx = out.Codes
elif len(out.Codes) == 1 and len(out.Fields) > 1:
idx = out.Fields
else:
idx = None
df = pd.DataFrame(out.Data, columns=col)
if idx:
df.index = idx
e = int(t.time()*1000)
write_log(str(e-s) + ' call wsd')
return out.ErrorCode, df.T.infer_objects()
except Exception:
print(traceback.format_exc())
return
if out.ErrorCode != 0:
if len(out.Data) != 0 and len(out.Data[0]) > 100:
if len(out.Data) > 1:
print(str(out.Data)[:10] + '...]...]')
else:
print(str(out.Data)[:10] + '...]]')
else:
print(out.Data)
e = int(t.time()*1000)
write_log(str(e-s) + ' call wses')
return out
def __str__(self):
return ("WSES")
wses = __wses()
class __wsee:
def __init__(self):
self.restype = POINTER(c_apiout)
self.argtypes = [c_wchar_p,c_wchar_p,c_wchar_p] #codes,fields,options
self.lastCall = 0
@retry
def __call__(self, codes, fields, options=None, *arga, **argb):
# write_log('call wsee')
s = int(t.time()*1000)
if expolib is None:
return WindQnt.format_wind_data(-103, '')
if t.time() - self.lastCall < interval:
t.sleep(interval)
codes = WindQnt._WindQnt__stringify(codes)
fields = WindQnt._WindQnt__stringify(fields)
options = WindQnt._WindQnt__parseoptions(options, arga, argb)
if fields == None or options == None:
print("Insufficient arguments!")
return
userID = str(getJsonTag(authString, 'accountID'))
if userID == '':
userID = "1214779"
tmp = "wsee|"+codes+"|"+fields+"|"+options+"|"+userID
tmp = tmp.encode("utf16") + b"\x00\x00"
apiOut = expolib.SendMsg2Expo(tmp, len(tmp))
self.lastCall = t.time()
if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010:
msg = 'Request Timeout'
e = int(t.time()*1000)
write_log(str(e-s) + ' call wsee')
return WindQnt.format_wind_data(-40521010, msg)
else:
out = WindData()
out.set(apiOut, 1, asdate=True)
#将winddata类型数据改为dataframe格式
if 'usedf' in argb.keys():
usedf = argb['usedf']
if usedf:
if not isinstance(usedf, bool):
print('the fourth parameter is usedf which should be the Boolean type!')
return
try:
if out.ErrorCode != 0:
df = pd.DataFrame(out.Data, index=out.Fields)
df.columns = [x for x in range(df.columns.size)]
return out.ErrorCode, df.T.infer_objects()
if out.Codes == 1 or out.Fields == 1:
return out.ErrorCode, WindQnt.to_dataframe(out)
else:
df = | pd.DataFrame(out.Data, columns=out.Codes, index=out.Fields) | pandas.DataFrame |
# Copyright 2022 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from feast import RequestSource
from feast.feature_view import FeatureView
from feast.field import Field
from feast.infra.offline_stores.file_source import FileSource
from feast.on_demand_feature_view import OnDemandFeatureView, on_demand_feature_view
from feast.types import Float32, String, UnixTimestamp
def udf1(features_df: pd.DataFrame) -> pd.DataFrame:
df = | pd.DataFrame() | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import unittest
import functools
import itertools
import types
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.stats
from skbio import Sequence, DNA, RNA, Protein, TabularMSA
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._decorator import overrides
from skbio.util._testing import ReallyEqualMixin
from skbio.metadata._testing import (MetadataMixinTests,
PositionalMetadataMixinTests)
from skbio.util import assert_data_frame_almost_equal
from skbio.util._testing import assert_index_equal
class TabularMSASubclass(TabularMSA):
"""Used for testing purposes."""
pass
class TestTabularMSAMetadata(unittest.TestCase, ReallyEqualMixin,
MetadataMixinTests):
def setUp(self):
self._metadata_constructor_ = functools.partial(TabularMSA, [])
class TestTabularMSAPositionalMetadata(unittest.TestCase, ReallyEqualMixin,
PositionalMetadataMixinTests):
def setUp(self):
def factory(axis_len, positional_metadata=None):
return TabularMSA([DNA('A' * axis_len)],
positional_metadata=positional_metadata)
self._positional_metadata_constructor_ = factory
class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
def test_from_dict_empty(self):
self.assertEqual(TabularMSA.from_dict({}), TabularMSA([], index=[]))
def test_from_dict_single_sequence(self):
self.assertEqual(TabularMSA.from_dict({'foo': DNA('ACGT')}),
TabularMSA([DNA('ACGT')], index=['foo']))
def test_from_dict_multiple_sequences(self):
msa = TabularMSA.from_dict(
{1: DNA('ACG'), 2: DNA('GGG'), 3: DNA('TAG')})
# Sort because order is arbitrary.
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('ACG'), DNA('GGG'), DNA('TAG')], index=[1, 2, 3]))
def test_from_dict_invalid_input(self):
# Basic test to make sure error-checking in the TabularMSA constructor
# is being invoked.
with self.assertRaisesRegex(
ValueError, 'must match the number of positions'):
TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
def test_constructor_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
TabularMSA([Sequence('')])
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*int'):
TabularMSA([42, DNA('')])
def test_constructor_not_monomorphic(self):
with self.assertRaisesRegex(TypeError,
'matching type.*RNA.*DNA'):
TabularMSA([DNA(''), RNA('')])
with self.assertRaisesRegex(TypeError,
'matching type.*float.*Protein'):
TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
def test_constructor_unequal_length(self):
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 0'):
TabularMSA([Protein(''), Protein('P')])
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 3'):
TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
def test_constructor_non_iterable(self):
with self.assertRaises(TypeError):
TabularMSA(42)
def test_constructor_minter_and_index_both_provided(self):
with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
index=['a', 'b'])
def test_constructor_invalid_minter_callable(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=float)
def test_constructor_missing_minter_metadata_key(self):
with self.assertRaises(KeyError):
TabularMSA([DNA('ACGT', metadata={'foo': 'bar'}), DNA('TGCA')],
minter='foo')
def test_constructor_unhashable_minter_metadata_key(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=[])
def test_constructor_index_length_mismatch_iterable(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
def test_constructor_index_length_mismatch_index_object(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=pd.Index([]))
def test_constructor_invalid_index_scalar(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=42)
def test_constructor_non_unique_labels(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
assert_index_equal(msa.index, pd.Int64Index([1, 1]))
def test_constructor_empty_no_index(self):
# sequence empty
msa = TabularMSA([])
self.assertIsNone(msa.dtype)
self.assertEqual(msa.shape, (0, 0))
assert_index_equal(msa.index, pd.RangeIndex(0))
with self.assertRaises(StopIteration):
next(iter(msa))
# position empty
seqs = [DNA(''), DNA('')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (2, 0))
assert_index_equal(msa.index, pd.RangeIndex(2))
self.assertEqual(list(msa), seqs)
def test_constructor_empty_with_labels(self):
# sequence empty
msa = TabularMSA([], minter=lambda x: x)
assert_index_equal(msa.index, pd.Index([]))
msa = TabularMSA([], index=iter([]))
assert_index_equal(msa.index, pd.Index([]))
# position empty
msa = TabularMSA([DNA('', metadata={'id': 42}),
DNA('', metadata={'id': 43})], minter='id')
assert_index_equal(msa.index, pd.Index([42, 43]))
msa = TabularMSA([DNA(''), DNA('')], index=iter([42, 43]))
assert_index_equal(msa.index, pd.Index([42, 43]))
def test_constructor_non_empty_no_labels_provided(self):
# 1x3
seqs = [DNA('ACG')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (1, 3))
assert_index_equal(msa.index, pd.RangeIndex(1))
self.assertEqual(list(msa), seqs)
# 3x1
seqs = [DNA('A'), DNA('C'), DNA('G')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 1))
assert_index_equal(msa.index, pd.RangeIndex(3))
self.assertEqual(list(msa), seqs)
def test_constructor_non_empty_with_labels_provided(self):
seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
msa = TabularMSA(seqs, minter=str)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 3))
assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
self.assertEqual(list(msa), seqs)
msa = TabularMSA(seqs, index=iter([42, 43, 44]))
assert_index_equal(msa.index, pd.Index([42, 43, 44]))
def test_constructor_works_with_iterator(self):
seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
msa = TabularMSA(iter(seqs), minter=str)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (3, 3))
assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
self.assertEqual(list(msa), seqs)
def test_constructor_with_multiindex_index(self):
msa = TabularMSA([DNA('AA'), DNA('GG')],
index=[('foo', 42), ('bar', 43)])
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_constructor_with_multiindex_minter(self):
def multiindex_minter(seq):
if str(seq) == 'AC':
return ('foo', 42)
else:
return ('bar', 43)
msa = TabularMSA([DNA('AC'), DNA('GG')], minter=multiindex_minter)
self.assertIsInstance(msa.index, pd.MultiIndex)
assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
def test_copy_constructor_respects_default_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('----'), DNA('AAAA')])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
assert_index_equal(msa.index, pd.RangeIndex(3))
assert_index_equal(copy.index, pd.RangeIndex(3))
def test_copy_constructor_without_metadata(self):
msa = TabularMSA([DNA('ACGT'), DNA('----')])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
assert_index_equal(copy.index, pd.RangeIndex(2))
def test_copy_constructor_with_metadata(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa)
self.assertEqual(msa, copy)
self.assertIsNot(msa, copy)
self.assertIsNot(msa.metadata, copy.metadata)
self.assertIsNot(msa.positional_metadata, copy.positional_metadata)
# pd.Index is immutable, no copy necessary.
self.assertIs(msa.index, copy.index)
def test_copy_constructor_state_override_with_minter(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa, metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
minter=str)
self.assertNotEqual(msa, copy)
self.assertEqual(
copy,
TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
minter=str))
def test_copy_constructor_state_override_with_index(self):
msa = TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 42},
positional_metadata={'bar': range(4)},
index=['idx1', 'idx2'])
copy = TabularMSA(msa, metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
index=['a', 'b'])
self.assertNotEqual(msa, copy)
self.assertEqual(
copy,
TabularMSA([DNA('ACGT'),
DNA('----')],
metadata={'foo': 43},
positional_metadata={'bar': range(4, 8)},
index=['a', 'b']))
def test_copy_constructor_with_minter_and_index(self):
msa = TabularMSA([DNA('ACGT'), DNA('----')], index=['idx1', 'idx2'])
with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
TabularMSA(msa, index=['a', 'b'], minter=str)
def test_dtype(self):
self.assertIsNone(TabularMSA([]).dtype)
self.assertIs(TabularMSA([Protein('')]).dtype, Protein)
with self.assertRaises(AttributeError):
TabularMSA([]).dtype = DNA
with self.assertRaises(AttributeError):
del TabularMSA([]).dtype
def test_shape(self):
shape = TabularMSA([DNA('ACG'), DNA('GCA')]).shape
self.assertEqual(shape, (2, 3))
self.assertEqual(shape.sequence, shape[0])
self.assertEqual(shape.position, shape[1])
with self.assertRaises(TypeError):
shape[0] = 3
with self.assertRaises(AttributeError):
TabularMSA([]).shape = (3, 3)
with self.assertRaises(AttributeError):
del TabularMSA([]).shape
def test_index_getter_default_index(self):
msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
assert_index_equal(msa.index, pd.RangeIndex(3))
# immutable
with self.assertRaises(TypeError):
msa.index[1] = 2
# original state is maintained
assert_index_equal(msa.index, pd.RangeIndex(3))
def test_index_getter(self):
index = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')], minter=str).index
self.assertIsInstance(index, pd.Index)
assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
# immutable
with self.assertRaises(TypeError):
index[1] = 'AA'
# original state is maintained
assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
def test_index_mixed_type(self):
msa = TabularMSA([DNA('AC'), DNA('CA'), DNA('AA')],
index=['abc', 'd', 42])
assert_index_equal(msa.index, pd.Index(['abc', 'd', 42]))
def test_index_setter_empty(self):
msa = TabularMSA([])
msa.index = iter([])
assert_index_equal(msa.index, pd.Index([]))
def test_index_setter_non_empty(self):
msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
msa.index = range(3)
assert_index_equal(msa.index, pd.RangeIndex(3))
msa.index = range(3, 6)
assert_index_equal(msa.index, pd.RangeIndex(3, 6))
def test_index_setter_length_mismatch(self):
msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
index = | pd.Index(['ACGT', 'TGCA']) | pandas.Index |
import collections
import fnmatch
import os
from typing import Union
import tarfile
import pandas as pd
import numpy as np
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import download_one_zip, unzip_nested_zip
CAMELS_NO_DATASET_ERROR_LOG = (
"We cannot read this dataset now. Please check if you choose the correct dataset:\n"
' ["AUS", "BR", "CA", "CL", "GB", "US", "YR"]'
)
def time_intersect_dynamic_data(obs: np.array, date: np.array, t_range: list):
"""
chose data from obs in the t_range
Parameters
----------
obs
a np array
date
all periods for obs
t_range
the time range we need, such as ["1990-01-01","2000-01-01"]
Returns
-------
np.array
the chosen data
"""
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
[c, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
class Camels(DataSourceBase):
def __init__(self, data_path, download=False, region: str = "US"):
"""
Initialization for CAMELS series dataset
Parameters
----------
data_path
where we put the dataset
download
if true, download
region
the default is CAMELS(-US), since it's the first CAMELS dataset.
Others now include: AUS, BR, CL, GB, YR
"""
super().__init__(data_path)
region_lst = ["AUS", "BR", "CA", "CE", "CL", "GB", "US", "YR"]
assert region in region_lst
self.region = region
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.camels_sites = self.read_site_info()
def get_name(self):
return "CAMELS_" + self.region
def set_data_source_describe(self) -> collections.OrderedDict:
"""
Introduce the files in the dataset and list their location in the file system
Returns
-------
collections.OrderedDict
the description for a CAMELS dataset
"""
camels_db = self.data_source_dir
if self.region == "US":
# shp file of basins
camels_shp_file = os.path.join(
camels_db, "basin_set_full_res", "HCDN_nhru_final_671.shp"
)
# config of flow data
flow_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"usgs_streamflow",
)
# forcing
forcing_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"basin_mean_forcing",
)
forcing_types = ["daymet", "maurer", "nldas"]
# attr
attr_dir = os.path.join(
camels_db, "camels_attributes_v2.0", "camels_attributes_v2.0"
)
gauge_id_file = os.path.join(attr_dir, "camels_name.txt")
attr_key_lst = ["topo", "clim", "hydro", "vege", "soil", "geol"]
download_url_lst = [
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_set_full_res.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_FORCING_TYPE=forcing_types,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
CAMELS_DOWNLOAD_URL_LST=download_url_lst,
)
elif self.region == "AUS":
# id and name
gauge_id_file = os.path.join(
camels_db,
"01_id_name_metadata",
"01_id_name_metadata",
"id_name_metadata.csv",
)
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"02_location_boundary_area",
"02_location_boundary_area",
"shp",
"CAMELS_AUS_BasinOutlets_adopted.shp",
)
# config of flow data
flow_dir = os.path.join(camels_db, "03_streamflow", "03_streamflow")
# attr
attr_dir = os.path.join(camels_db, "04_attributes", "04_attributes")
# forcing
forcing_dir = os.path.join(
camels_db, "05_hydrometeorology", "05_hydrometeorology"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "BR":
# attr
attr_dir = os.path.join(
camels_db, "01_CAMELS_BR_attributes", "01_CAMELS_BR_attributes"
)
# we don't need the location attr file
attr_key_lst = [
"climate",
"geology",
"human_intervention",
"hydrology",
"land_cover",
"quality_check",
"soil",
"topography",
]
# id and name, there are two types stations in CAMELS_BR, and we only chose the 897-stations version
gauge_id_file = os.path.join(attr_dir, "camels_br_topography.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"14_CAMELS_BR_catchment_boundaries",
"14_CAMELS_BR_catchment_boundaries",
"camels_br_catchments.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(
camels_db, "02_CAMELS_BR_streamflow_m3s", "02_CAMELS_BR_streamflow_m3s"
)
flow_dir_mm_selected_catchments = os.path.join(
camels_db,
"03_CAMELS_BR_streamflow_mm_selected_catchments",
"03_CAMELS_BR_streamflow_mm_selected_catchments",
)
flow_dir_simulated = os.path.join(
camels_db,
"04_CAMELS_BR_streamflow_simulated",
"04_CAMELS_BR_streamflow_simulated",
)
# forcing
forcing_dir_precipitation_chirps = os.path.join(
camels_db,
"05_CAMELS_BR_precipitation_chirps",
"05_CAMELS_BR_precipitation_chirps",
)
forcing_dir_precipitation_mswep = os.path.join(
camels_db,
"06_CAMELS_BR_precipitation_mswep",
"06_CAMELS_BR_precipitation_mswep",
)
forcing_dir_precipitation_cpc = os.path.join(
camels_db,
"07_CAMELS_BR_precipitation_cpc",
"07_CAMELS_BR_precipitation_cpc",
)
forcing_dir_evapotransp_gleam = os.path.join(
camels_db,
"08_CAMELS_BR_evapotransp_gleam",
"08_CAMELS_BR_evapotransp_gleam",
)
forcing_dir_evapotransp_mgb = os.path.join(
camels_db,
"09_CAMELS_BR_evapotransp_mgb",
"09_CAMELS_BR_evapotransp_mgb",
)
forcing_dir_potential_evapotransp_gleam = os.path.join(
camels_db,
"10_CAMELS_BR_potential_evapotransp_gleam",
"10_CAMELS_BR_potential_evapotransp_gleam",
)
forcing_dir_temperature_min_cpc = os.path.join(
camels_db,
"11_CAMELS_BR_temperature_min_cpc",
"11_CAMELS_BR_temperature_min_cpc",
)
forcing_dir_temperature_mean_cpc = os.path.join(
camels_db,
"12_CAMELS_BR_temperature_mean_cpc",
"12_CAMELS_BR_temperature_mean_cpc",
)
forcing_dir_temperature_max_cpc = os.path.join(
camels_db,
"13_CAMELS_BR_temperature_max_cpc",
"13_CAMELS_BR_temperature_max_cpc",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[
flow_dir_m3s,
flow_dir_mm_selected_catchments,
flow_dir_simulated,
],
CAMELS_FORCING_DIR=[
forcing_dir_precipitation_chirps,
forcing_dir_precipitation_mswep,
forcing_dir_precipitation_cpc,
forcing_dir_evapotransp_gleam,
forcing_dir_evapotransp_mgb,
forcing_dir_potential_evapotransp_gleam,
forcing_dir_temperature_min_cpc,
forcing_dir_temperature_mean_cpc,
forcing_dir_temperature_max_cpc,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "CL":
# attr
attr_dir = os.path.join(camels_db, "1_CAMELScl_attributes")
attr_file = os.path.join(attr_dir, "1_CAMELScl_attributes.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"CAMELScl_catchment_boundaries",
"catchments_camels_cl_v1.3.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(camels_db, "2_CAMELScl_streamflow_m3s")
flow_dir_mm = os.path.join(camels_db, "3_CAMELScl_streamflow_mm")
# forcing
forcing_dir_precip_cr2met = os.path.join(
camels_db, "4_CAMELScl_precip_cr2met"
)
forcing_dir_precip_chirps = os.path.join(
camels_db, "5_CAMELScl_precip_chirps"
)
forcing_dir_precip_mswep = os.path.join(
camels_db, "6_CAMELScl_precip_mswep"
)
forcing_dir_precip_tmpa = os.path.join(camels_db, "7_CAMELScl_precip_tmpa")
forcing_dir_tmin_cr2met = os.path.join(camels_db, "8_CAMELScl_tmin_cr2met")
forcing_dir_tmax_cr2met = os.path.join(camels_db, "9_CAMELScl_tmax_cr2met")
forcing_dir_tmean_cr2met = os.path.join(
camels_db, "10_CAMELScl_tmean_cr2met"
)
forcing_dir_pet_8d_modis = os.path.join(
camels_db, "11_CAMELScl_pet_8d_modis"
)
forcing_dir_pet_hargreaves = os.path.join(
camels_db,
"12_CAMELScl_pet_hargreaves",
)
forcing_dir_swe = os.path.join(camels_db, "13_CAMELScl_swe")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[flow_dir_m3s, flow_dir_mm],
CAMELS_FORCING_DIR=[
forcing_dir_precip_cr2met,
forcing_dir_precip_chirps,
forcing_dir_precip_mswep,
forcing_dir_precip_tmpa,
forcing_dir_tmin_cr2met,
forcing_dir_tmax_cr2met,
forcing_dir_tmean_cr2met,
forcing_dir_pet_8d_modis,
forcing_dir_pet_hargreaves,
forcing_dir_swe,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=attr_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "GB":
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"CAMELS_GB_catchment_boundaries",
"CAMELS_GB_catchment_boundaries.shp",
)
# flow and forcing data are in a same file
flow_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"timeseries",
)
forcing_dir = flow_dir
# attr
attr_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
)
gauge_id_file = os.path.join(
attr_dir, "CAMELS_GB_hydrometry_attributes.csv"
)
attr_key_lst = [
"climatic",
"humaninfluence",
"hydrogeology",
"hydrologic",
"hydrometry",
"landcover",
"soil",
"topographic",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "YR":
# shp files of basins
camels_shp_files_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "Normal_Camels_YR_basin_boundary"
)
# attr, flow and forcing data are all in the same dir. each basin has one dir.
flow_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "1_Normal_Camels_YR_basin_data"
)
forcing_dir = flow_dir
attr_dir = flow_dir
# no gauge id file for CAMELS_YR; natural_watersheds.txt showed unregulated basins in CAMELS_YR
gauge_id_file = os.path.join(
camels_db, "9_Normal_Camels_YR", "natural_watersheds.txt"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CA":
# shp file of basins
camels_shp_files_dir = os.path.join(camels_db, "CANOPEX_BOUNDARIES")
# config of flow data
flow_dir = os.path.join(
camels_db, "CANOPEX_NRCAN_ASCII", "CANOPEX_NRCAN_ASCII"
)
forcing_dir = flow_dir
# There is no attr data in CANOPEX, hence we use attr from HYSET -- https://osf.io/7fn4c/
attr_dir = camels_db
gauge_id_file = os.path.join(camels_db, "STATION_METADATA.xlsx")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CE":
# We use A_basins_total_upstrm
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"3_shapefiles",
"Basins_A.shp",
)
# config of flow data
flow_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "D_gauges", "2_timeseries", "daily"
)
forcing_dir = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"2_timeseries",
"daily",
)
attr_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "A_basins_total_upstrm", "1_attributes"
)
gauge_id_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"D_gauges",
"1_attributes",
"Gauge_attributes.csv",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def download_data_source(self) -> None:
"""
Download CAMELS dataset.
Now we only support CAMELS-US's downloading.
For others, please download it manually and put all files of a CAMELS dataset in one directory.
For example, all files of CAMELS_AUS should be put in "camels_aus" directory
Returns
-------
None
"""
camels_config = self.data_source_description
if self.region == "US":
if not os.path.isdir(camels_config["CAMELS_DIR"]):
os.makedirs(camels_config["CAMELS_DIR"])
[
download_one_zip(attr_url, camels_config["CAMELS_DIR"])
for attr_url in camels_config["CAMELS_DOWNLOAD_URL_LST"]
if not os.path.isfile(
os.path.join(camels_config["CAMELS_DIR"], attr_url.split("/")[-1])
)
]
print("The CAMELS_US data have been downloaded!")
print(
"Please download it manually and put all files of a CAMELS dataset in the CAMELS_DIR directory."
)
print("We unzip all files now.")
if self.region == "CE":
# We only use CE's dauly files now and it is tar.gz formatting
file = tarfile.open(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily.tar.gz")
)
# extracting file
file.extractall(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily")
)
file.close()
for f_name in os.listdir(camels_config["CAMELS_DIR"]):
if fnmatch.fnmatch(f_name, "*.zip"):
unzip_dir = os.path.join(camels_config["CAMELS_DIR"], f_name[0:-4])
file_name = os.path.join(camels_config["CAMELS_DIR"], f_name)
unzip_nested_zip(file_name, unzip_dir)
def read_site_info(self) -> pd.DataFrame:
"""
Read the basic information of gages in a CAMELS dataset
Returns
-------
pd.DataFrame
basic info of gages
"""
camels_file = self.data_source_description["CAMELS_GAUGE_FILE"]
if self.region == "US":
data = pd.read_csv(
camels_file, sep=";", dtype={"gauge_id": str, "huc_02": str}
)
elif self.region == "AUS":
data = pd.read_csv(camels_file, sep=",", dtype={"station_id": str})
elif self.region == "BR":
data = pd.read_csv(camels_file, sep="\s+", dtype={"gauge_id": str})
elif self.region == "CL":
data = pd.read_csv(camels_file, sep="\t", index_col=0)
elif self.region == "GB":
data = pd.read_csv(camels_file, sep=",", dtype={"gauge_id": str})
elif self.region == "YR":
dirs_ = os.listdir(self.data_source_description["CAMELS_ATTR_DIR"])
data = | pd.DataFrame({"gauge_id": dirs_}) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
import xgboost as xgb
from sklearn.metrics import f1_score
import param
############################ 定义评估函数 ############################
def micro_avg_f1(preds, dtrain):
y_true = dtrain.get_label()
return 'micro_avg_f1', f1_score(y_true, preds, average='micro')
############################ 加载特征 & 标签 ############################
df_tfidf_lr = pd.read_csv(param.data_path + '/output/feature/tfidf/lr_prob_12w.csv')
df_tfidf_bnb = pd.read_csv(param.data_path + '/output/feature/tfidf/bnb_prob_12w.csv')
df_tfidf_mnb = pd.read_csv(param.data_path + '/output/feature/tfidf/mnb_prob_12w.csv')
df_tfidf_svc = pd.read_csv(param.data_path + '/output/feature/tfidf/svc_prob_12w.csv')
df_amt = pd.read_csv(param.data_path + '/output/feature/amt/amt_12w.csv')
df_dbow_nn = pd.read_csv(param.data_path + '/output/feature/dbowd2v/nn_prob_12w.csv')
df_w2v = pd.read_csv(param.data_path + '/output/feature/w2v/w2v_12w.csv')
# df_dm = pd.read_csv(param.data_path + 'dmd2v_stack_20W.csv')
df_lb = pd.read_csv(param.data_path + '/output/corpus/all_data.csv', usecols=['id', 'penalty'], nrows=param.train_num)
df_lb['penalty'] = df_lb['penalty'] - 1 # 让标签属于 [0, 8)
############################ xgboost ############################
tr_num = param.cv_train_num
df_sub = | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2020 <NAME>, <NAME>
# Code -- Study 2 -- What Personal Information Can a Consumer Facial Image Reveal?
# https://github.com/computationalmarketing/facialanalysis/
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mtick
from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'font.size': 12})
rcParams['font.family'] = 'serif'
rcParams['font.sans-serif'] = ['Times']
import seaborn as sns
import torchvision.models as models
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import os
from os import walk
from tqdm import tqdm
from sklearn.utils import class_weight
from sklearn import metrics, svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.model_selection import KFold, GroupKFold, ShuffleSplit, GroupShuffleSplit
from sklearn.neighbors import NearestNeighbors
import scipy.stats
from scipy.special import softmax
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import dendrogram, linkage
# ATTENTION: we disable notifications when AUC cannot be computed -- during nn finetuning
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
import json
import numpy as np
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
import pickle
import sys
'''
CustomDataset object takes care of supplying an observation (image, labels).
It also performs image preprocessing, such as normalization by color channel.
In case of training, it also performs random transformations, such as horizontal flips, resized crops, rotations, and color jitter.
'''
class CustomDataset(Dataset):
def __init__(self, data, tr = True):
self.data = data
self.paths = self.data['img_path'].values.astype('str')
self.data_len = self.data.shape[0]
self.labels = self.data[q_list].values.astype('int32')
self.control_metrics = self.data[control_list].values.astype('float32')
# transforms
if tr:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.1,contrast=0.1,saturation=0.1,hue=0.1)], p=0.75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
else:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
def __getitem__(self, index):
img_path = PATH + '/'+ self.paths[index]
img = Image.open(img_path)
img_tensor = self.transforms(img)
label = self.labels[index]
control_metric = self.control_metrics[index]
return (img_tensor, label, control_metric)
def __len__(self):
return self.data_len
#get pretrained resnet50 model
def get_pretrained():
model = models.resnet50(pretrained=True)
return model
# replace last layer
def prepare_for_finetuning(model):
for param in model.parameters():
param.requires_grad = False
param.requires_grad = True
#replacing last layer with new fully connected
model.fc = torch.nn.Linear(model.fc.in_features,n_outs)
return
# create an object that uses CustomDataset object from above to load multiple observations in parallel
def create_dataloader(data,rand=True):
if rand: # shuddle observations
dataset = CustomDataset(data, tr=True)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=10, drop_last=False)
else: # load in fixed order of data
dataset = CustomDataset(data, tr=False)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SequentialSampler(dataset), num_workers=10, drop_last=False)
return loader
#finetune and save neural net model
def finetune_and_save(loader_train, loader_test):
# loading pretrained model and preparing it for finetuning
model = get_pretrained()
prepare_for_finetuning(model)
if CUDA:
model.cuda()
# optimize only last six layers
layers = list(model.children())
params = list(layers[len(layers)-1].parameters())+list(layers[len(layers)-2].parameters())+list(layers[len(layers)-3].parameters())+list(layers[len(layers)-4].parameters())+list(layers[len(layers)-5].parameters())+list(layers[len(layers)-6].parameters())
optimizer = optim.Adamax(params=params, lr=0.001)
# print("starting finetuning")
hist = {}
hist['d_labs'] = q_list
hist['train_loss'] = []
hist['val_loss'] = []
hist['train_loss_d'] = []
hist['val_loss_d'] = []
hist['train_auc_d'] = []
hist['val_auc_d'] = []
acc_best = 0.0
#train
for epoch in range(N_EPOCHS):
train_loss, train_loss_d, train_auc_d = run_epoch(model, loss_f, optimizer, loader_train, update_model = True) # training
eval_loss, eval_loss_d, eval_auc_d = run_epoch(model, loss_f, optimizer, loader_test, update_model = False) # evaluation
hist['train_loss'].append(train_loss)
hist['val_loss'].append(eval_loss)
hist['train_loss_d'].append(train_loss_d)
hist['val_loss_d'].append(eval_loss_d)
hist['train_auc_d'].append(train_auc_d)
hist['val_auc_d'].append(eval_auc_d)
with open(RESULTS+'/eval_record.json', 'w') as fjson:
json.dump(hist, fjson)
# saving model
torch.save(model, RESULTS+"/finetuned_model")
return
# function that performa training (or evaluation) over an epoch (full pass through a data set)
def run_epoch(model, loss_f, optimizer, loader, update_model = False):
if update_model:
model.train()
else:
model.eval()
loss_hist = []
loss_hist_detailed = []
auc_hist_detailed = []
for batch_i, var in tqdm(enumerate(loader)):
loss, loss_detailed, auc_detailed = loss_f(model, var)
if update_model:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_hist.append(loss.data.item())
loss_hist_detailed.append(loss_detailed)
auc_hist_detailed.append(auc_detailed)
loss_detailed = pd.DataFrame(loss_hist_detailed)
loss_detailed.columns = q_list
auc_detailed = pd.DataFrame(auc_hist_detailed)
auc_detailed.columns = q_list
return np.mean(loss_hist).item(), loss_detailed.mean(0).values.tolist(), auc_detailed.mean(0).values.tolist()
# function to compute loss from a batch data
def loss_f(model, var):
data, target, _ = var
data, target = Variable(data), Variable(target)
if CUDA:
data, target = data.cuda(), target.cuda()
output = model(data) # match for the user and focal game
loss = 0
loss_detailed = []
auc_detailed = []
for i in range(len(q_d_list)):
w = torch.FloatTensor(class_weights[i])
if CUDA:
w = w.cuda()
# output contains scores for each level of every predicted variable
# q_d_list[i] is number of levels to variable i
# q_d_list_cumsum[i] is a cumulative sum over number of levels for variable i and all variables before it
# all variables ordered as in q_list
# (q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i] then gives exact coordinates of the scores for variable i
# among all scores in the output
temp = F.cross_entropy(output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]], target[:,i].long(), weight=w)
loss_detailed.append(temp.data.item())
loss += temp
# now we calculate AUC
y_true = target[:,i].detach().cpu().numpy()
y_score = output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]].detach().cpu().numpy()[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc_detailed.append(metrics.auc(fpr, tpr))
return loss, loss_detailed, auc_detailed
# building class balancing weights as in
# https://datascience.stackexchange.com/questions/13490/how-to-set-class-weights-for-imbalanced-classes-in-keras
def calculate_class_weights(X):
class_weights = []
for i in q_list:
class_weights.append(
class_weight.compute_class_weight('balanced', np.unique(X[i].values), X[i].values))
return class_weights
# extract data from a dataloader as a set of image features X and set of labels y, corresponding to those image features
# can also blackout specified areas of the loaded images before extracting the image features -- this is used in our experiments
# when data loader is deterministic, then it will load in the same data again and again
def extract_data(loader, modelred, blackout=None):
X = []
y = []
z = []
for batch_i, var in tqdm(enumerate(loader)):
data, target, control_metrics = var
if blackout is not None:
data[:, :, blackout[0]:blackout[1], blackout[2]:blackout[3]] = 0.0
data, target, control_metrics = Variable(data), Variable(target), Variable(control_metrics)
if CUDA:
data, target, control_metrics = data.cuda(), target.cuda(), control_metrics.cuda()
data_out = modelred(data)
X.append(data_out.detach().cpu().numpy())
y.append(target.detach().cpu().numpy())
z.append(control_metrics.detach().cpu().numpy())
X = np.vstack(X).squeeze()
y = np.vstack(y)
z = np.vstack(z)
return X, y, z
# function to evaluate a set of trained classifier using AUC metric
# 'models' contains classifiers in order of binary variables to be predicted -- which are contaiend in Y
# X is a matrix of covariates
def analytics_lin(models, X, Y):
acc = {}
auc = {}
for i in tqdm(range(Y.shape[1])):
y_true = Y[:,i]
mod = models[i]
y_pred = np.argmax(mod.predict_proba(X),axis=1)
# auc
y_prob = mod.predict_proba(X)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_prob)
auc[q_list[i]] = metrics.auc(fpr, tpr)
return auc
# sequentially yield coordinates for blackout in an image
def sliding_window(image_shape, stepSize, windowSize):
# slide a window across the image
for yc in range(0, image_shape[0], stepSize):
for xc in range(0, image_shape[1], stepSize):
# yield the current window
yield (yc, yc + windowSize[1], xc, xc + windowSize[0])
# calculating decrease in AUC when blocking a particular area of an image -- over 8x8 grid placed over the image
def img_area_importance(modelred, models, svd, dat, auc_true):
patch_importance = {}
for (y0, y1, x0, x1) in sliding_window(image_shape=(224,224), stepSize = 28, windowSize=(28,28)):
loader = create_dataloader(dat,rand=False)
# X_modified_raw contains image features extracted from images with a portion of the image blocked
X_modified_raw, Y, _ = extract_data(loader, modelred, (y0, y1, x0, x1))
# image features reduced to 500 via svd
X_modified = svd.transform(X_modified_raw)
auc = analytics_lin(models, X_modified, Y)
patch_importance_q = {} # contains -(decrease in auc after blocking of an image)
for q in q_list:
patch_importance_q[q] = auc_true[q] - auc[q]
patch_importance[(y0, y1, x0, x1)] = patch_importance_q # decrease in auc across all variables -- for the given blocked portion of the image
return patch_importance
# START OF THE RUN
torch.set_num_threads(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
N_EPOCHS = 20
FINETUNE = True
CUDA = torch.cuda.is_available()
batch_size=10
PATH = './data'
# analysis on face vs. bodies
CASHIER = sys.argv[1]#'ALL' #'4' # 3 #
control_list = ['02.05','03.05','04.05','05.05','06.05','07.05','08.05','09.05','10.05', '11.05', '12.05', '13.05',
'time_1', 'time_2', 'time_3', 'time_4']
if CASHIER == 'ALL':
data = | pd.read_csv(PATH+'/data_face.csv') | pandas.read_csv |
import pandas as pd
import pickle
def main():
gene_info = pd.read_csv('./../list/GRCh38_ensembl96_geneset.csv', sep='\t')
gene_info_dict = {}
for n, r in gene_info.iterrows():
gene_info_dict[r['transcript_stable_id']] = [
r['display_label'], r['gene_stable_id']
]
score = pd.read_csv('./scores/gMVP_raw_score_Feb24.tsv', sep='\t')
info = pd.read_csv('./scores/all_possible_missense_info.csv',
sep='\t',
dtype={'chrom': str})
#score = pd.read_csv('./scores/sample_score.csv', sep='\t')
#info = pd.read_csv('./scores/sample_info.csv',
# sep='\t',
# dtype={'chrom': str})
info = info[info['consequence'] == 'missense_variant']
def get_var(x):
return '_'.join([
x['transcript_id'],
str(x['protein_position']), x['ref_aa'], x['alt_aa']
])
info['var'] = info.apply(get_var, axis=1)
df = pd.merge(info, score, on='var', how='inner')
#normalized by gene
df2 = []
_cnt = 0
def _get_rank(x):
nonlocal _cnt
_cnt += 1
return _cnt
for n, g in df.groupby(by='transcript_id'):
g2 = g.sort_values(by='gMVP', axis=0, ascending=True)
_cnt = 0
g2['gMVP_normalized'] = g2['gMVP'].apply(_get_rank) / g2.shape[0]
genename, gene_id = gene_info_dict.get(n, ['', ''])
g2['gene_symbol'] = genename
g2['gene_id'] = gene_id
df2.append(g2)
df = | pd.concat(df2, axis=0) | pandas.concat |
import pandas as pd
from texthero import preprocessing
from . import PandasTestCase
import unittest
import string
class TestPreprocessing(PandasTestCase):
"""
Remove digits.
"""
def test_remove_digits_only_block(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h1n1")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_block(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits hn")
self.assertEqual(preprocessing.remove_digits(s, only_blocks=False),
s_true)
def test_remove_digits_brackets(self):
s = pd.Series("Digits in bracket (123 $) needs to be cleaned out")
s_true = pd.Series("Digits in bracket ( $) needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_start(self):
s = pd.Series("123 starting digits needs to be cleaned out")
s_true = pd.Series(" starting digits needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_end(self):
s = pd.Series("end digits needs to be cleaned out 123")
s_true = pd.Series("end digits needs to be cleaned out ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_phone(self):
s = pd.Series("+41 1234 5678")
s_true = pd.Series("+ ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_punctuation(self):
s = pd.Series(string.punctuation)
s_true = pd.Series(string.punctuation)
self.assertEqual(preprocessing.remove_digits(s), s_true)
"""
Remove punctuation.
"""
def test_remove_punctation(self):
s = pd.Series("Remove all! punctuation!! ()")
s_true = pd.Series(
"Remove all punctuation ") # TODO maybe just remove space?
self.assertEqual(preprocessing.remove_punctuation(s), s_true)
"""
Remove diacritics.
"""
def test_remove_diactitics(self):
s = pd.Series("hèllo")
s_true = pd.Series("hello")
self.assertEqual(preprocessing.remove_diacritics(s), s_true)
"""
Remove whitespace.
"""
def test_remove_whitespace(self):
s = pd.Series("hello world hello world ")
s_true = pd.Series("hello world hello world")
self.assertEqual(preprocessing.remove_whitespace(s), s_true)
"""
Text pipeline.
"""
def test_pipeline_stopwords(self):
s = | pd.Series("E-I-E-I-O\nAnd on") | pandas.Series |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
pd.DataFrame(sim_R).to_csv(results_dir + "baseline_R_L_" + strain + ".csv")
pd.DataFrame(md).to_csv(results_dir + "md_" + strain + ".csv")
pd.DataFrame(masks).to_csv(results_dir + "masks_" + strain + ".csv")
macro = 2 * expit(logodds.T)
pd.DataFrame(macro).to_csv(results_dir + "macro_" + strain + ".csv")
pd.DataFrame(voc_vacc_product).to_csv(results_dir + "voc_vacc_product_" + strain + ".csv")
# calculate TP
R_L = (
2 * expit(logodds.T)
* md
* masks
* sim_R
* voc_vacc_product
)
# now we increase TP by 15% based on school reopening (this code can probably be reused
# but inferring it would be pretty difficult
# due to lockdowns and various interruptions since March 2020)
if scenarios[state] == "school_opening_2022":
R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :] = (
1.15 * R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :]
)
# calculate summary stats
R_L_med = np.median(R_L, axis=1)
R_L_lower = np.percentile(R_L, 25, axis=1)
R_L_upper = np.percentile(R_L, 75, axis=1)
R_L_bottom = np.percentile(R_L, 5, axis=1)
R_L_top = np.percentile(R_L, 95, axis=1)
# R_L
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend([typ] * df_state.shape[0])
state_Rs["date"].extend(dd.values) # repeat mob_samples times?
state_Rs["lower"].extend(R_L_lower)
state_Rs["median"].extend(R_L_med)
state_Rs["upper"].extend(R_L_upper)
state_Rs["top"].extend(R_L_top)
state_Rs["bottom"].extend(R_L_bottom)
state_Rs["mean"].extend(np.mean(R_L, axis=1))
state_Rs["std"].extend(np.std(R_L, axis=1))
state_R[state] = R_L
# generate a summary for the R_I
for state in states:
# R_I
if strain == "Delta":
R_I = samples["R_I"].values[:df_state.shape[0]]
elif strain == "Omicron":
# if Omicron period, then we need to multiply in the VoC effect as there's a period
# in the fitting where Delta and Omicron overlap (i.e. R_I = R_I * P(t) where P(t) is
# a product term).
R_I = samples["R_I_omicron"].values[:df_state.shape[0]]
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend(["R_I"] * df_state.shape[0])
state_Rs["date"].extend(dd.values)
state_Rs["lower"].extend(np.repeat(np.percentile(R_I, 25), df_state.shape[0]))
state_Rs["median"].extend(np.repeat(np.median(R_I), df_state.shape[0]))
state_Rs["upper"].extend(np.repeat(np.percentile(R_I, 75), df_state.shape[0]))
state_Rs["top"].extend(np.repeat(np.percentile(R_I, 95), df_state.shape[0]))
state_Rs["bottom"].extend(np.repeat(np.percentile(R_I, 5), df_state.shape[0]))
state_Rs["mean"].extend(np.repeat(np.mean(R_I), df_state.shape[0]))
state_Rs["std"].extend(np.repeat(np.std(R_I), df_state.shape[0]))
df_Rhats = pd.DataFrame().from_dict(state_Rs)
df_Rhats = df_Rhats.set_index(["state", "date", "type"])
d = pd.DataFrame()
for state in states:
for i, typ in enumerate(forecast_type):
if i == 0:
t = pd.DataFrame.from_dict(state_R[state])
t["date"] = dd.values
t["state"] = state
t["type"] = typ
else:
temp = pd.DataFrame.from_dict(state_R[state])
temp["date"] = dd.values
temp["state"] = state
temp["type"] = typ
t = t.append(temp)
# R_I
if strain == "Delta":
# use the Delta import reproduction number before Omicron starts
i = pd.DataFrame(np.tile(samples["R_I"].values, (len(dd.values), 1)))
elif strain == "Omicron":
# use the Omicron import reproduction number after Omicron starts
i = pd.DataFrame(np.tile(samples["R_I_omicron"].values, (len(dd.values), 1)))
i["date"] = dd.values
i["type"] = "R_I"
i["state"] = state
t = t.append(i)
d = d.append(t)
d = d.set_index(["state", "date", "type"])
df_Rhats = df_Rhats.join(d)
df_Rhats = df_Rhats.reset_index()
df_Rhats.state = df_Rhats.state.astype(str)
df_Rhats.type = df_Rhats.type.astype(str)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_6_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=12 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
print("============")
print("Saving results")
print("============")
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_12_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
# save values for the functional omicron related proportions for each state
prop_omicron_vars = ("r", "tau", "m0", "m1")
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state].copy()
for v in prop_omicron_vars:
# take right size of the values to be N by N
y = samples[v + "[" + str(kk + 1) + "]"].values
pd.DataFrame(y[:mob_samples]).to_csv(
results_dir
+ v
+ "_"
+ state
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# now we save the sampled TP paths
# convert the appropriate sampled susceptible depletion factors to a csv and save them for simulation
# NOTE: this will not save an updated median, mean etc for the R_I's. We don't use it so it's not
# really important but it should be noted for later if we are comparing things. The step function
# R_I -> R_I_omicron, is noticeable and shouldn't be overlooked.
df_Rhats = df_Rhats[
["state", "date", "type", "median", "bottom", "lower", "upper", "top"]
+ [i for i in range(mob_samples)]
]
# # save the file as a csv (easier to handle in Julia for now)
df_Rhats.to_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
return None
def calculate_Reff_local(
Reff,
R_I,
R_I_omicron,
voc_effect,
prop_import,
omicron_start_day,
):
"""
Apply the same mixture model idea as per the TP model to get
R_eff^L = (R_eff - rho * RI)/(1 - rho)
and use this to weight the TP historically.
"""
# calculate this all in one step. Note that we set the Reff to -1 if
# the prop_import = 1 as in that instance the relationship breaks due to division by 0.
Reff_local = np.zeros(shape=Reff.shape[0])
for n in range(len(Reff_local)):
# adjust the Reff based on the time period of interest
if n < omicron_start_day:
R_I_tmp = R_I
else:
R_I_tmp = R_I_omicron * voc_effect
if prop_import[n] < 1:
Reff_local[n] = (Reff[n] - prop_import[n] * R_I_tmp) / (1 - prop_import[n])
else:
Reff_local[n] = 0
# Reff_local = [
# (Reff[t] - prop_import[t] * R_I) / (1 - prop_import[t])
# if prop_import[t] < 1 else -1 for t in range(Reff.shape[0])
# ]
return Reff_local
def adjust_TP(data_date):
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
n_days_nowcast_TP_adjustment,
mob_samples,
)
print("============")
print("Adjusting TP forecasts using data from", data_date)
print("============")
data_date = pd.to_datetime(data_date)
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
sim_start_date = pd.to_datetime(sim_start_date)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start=third_start_date, end=third_end_date).values,
"NT": pd.date_range(start="2021-12-01", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-11-25", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-08-01", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder, TimeSeriesDataRecorder
from zvt.recorders.emquantapi.common import mainCallback
from zvt.recorders.joinquant.common import to_entity_id
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import now_pd_timestamp, to_time_str, TIME_FORMAT_DAY
from zvt.domain import BlockStock, Block, Block1dKdata, BlockMoneyFlow
from zvt.settings import ZVT_HOME
try:
from jqdatasdk import finance, query
from EmQuantAPI import *
except:
pass
class EmChinaBlockRecorder(Recorder):
"""
choice板块数据
"""
provider = 'emquantapi'
data_schema = Block
hs_category_map = pd.read_excel(f'{ZVT_HOME}/data/沪深股票GICS行业分类.xlsx')
hk_category_map = pd.read_excel(f'{ZVT_HOME}/data/港股GICS行业分类.xlsx')
us_category_map = pd.read_excel(f'{ZVT_HOME}/data/美股GICS行业分类.xlsx')
gics_one = hs_category_map[['一级板块代码','一级板块名称']].append(hk_category_map[['一级板块代码','一级板块名称']]).append(us_category_map[['一级板块代码','一级板块名称']]).dropna(subset=['一级板块名称'])
gics_two = hs_category_map[['二级板块代码','二级板块名称']].append(hk_category_map[['二级板块代码','二级板块名称']]).append(us_category_map[['二级板块代码','二级板块名称']]).dropna(subset=['二级板块名称'])
gics_three = hs_category_map[['三级板块代码','三级板块名称']].append(hk_category_map[['三级板块代码','三级板块名称']]).append(us_category_map[['三级板块代码','三级板块名称']]).dropna(subset=['三级板块名称'])
gics_four = hs_category_map[['四级板块代码','四级板块名称']].append(hk_category_map[['四级板块代码','四级板块名称']]).append(us_category_map[['四级板块代码','四级板块名称']]).dropna(subset=['四级板块名称'])
gics_one['一级板块代码'] = gics_one['一级板块代码'].apply(lambda x:str(int(x)).zfill(6))
gics_two['二级板块代码'] = gics_two['二级板块代码'].apply(lambda x:str(int(x)).zfill(9))
gics_three['三级板块代码'] = gics_three['三级板块代码'].apply(lambda x:str(int(x)).zfill(12))
gics_four['四级板块代码'] = gics_four['四级板块代码'].apply(lambda x:str(int(x)).zfill(15))
category_map=[]
category_map.extend(gics_one.to_dict(orient='records'))
category_map.extend(gics_two.to_dict(orient='records'))
category_map.extend(gics_three.to_dict(orient='records'))
category_map.extend(gics_four.to_dict(orient='records'))
def __init__(self, batch_size=10, force_update=True, sleeping_time=10) -> None:
super().__init__(batch_size, force_update, sleeping_time)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '', mainCallback)
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
def run(self):
# get stock blocks from sina
for category_map_dict in self.category_map:
# df = get_industries(name=category, date=None)
category, name_ch = category_map_dict.items()
df = pd.DataFrame(index=[0])
if '一级板块代码' in category:
df['code'] = category[1]
if category[1].startswith('003'):
df['exchange'] = 'cn'
elif category[1].startswith('204'):
df['exchange'] = 'us'
elif category[1].startswith('402'):
df['exchange'] = 'hk'
df['block_type'] = 'gicsl1'
elif '二级板块代码' in category:
df['code'] = category[1]
if category[1].startswith('003'):
df['exchange'] = 'cn'
elif category[1].startswith('204'):
df['exchange'] = 'us'
elif category[1].startswith('402'):
df['exchange'] = 'hk'
df['block_type'] = 'gicsl2'
elif '三级板块代码' in category:
df['code'] = category[1]
if category[1].startswith('003'):
df['exchange'] = 'cn'
elif category[1].startswith('204'):
df['exchange'] = 'us'
elif category[1].startswith('402'):
df['exchange'] = 'hk'
df['block_type'] = 'gicsl3'
elif '四级板块代码' in category:
df['code'] = category[1]
if category[1].startswith('003'):
df['exchange'] = 'cn'
elif category[1].startswith('204'):
df['exchange'] = 'us'
elif category[1].startswith('402'):
df['exchange'] = 'hk'
df['block_type'] = 'gicsl4'
df['timestamp'] = now_pd_timestamp()
df['name'] = name_ch[1]
df['entity_type'] = 'block'
df['category'] = "industry"
df['id'] = df['entity_id'] = df.apply(lambda x: "block_" + x.exchange + "_" + x.code, axis=1)
df_to_db(data_schema=self.data_schema, df=df, provider=self.provider,
force_update=True)
self.logger.info(f"完成choice数据行业数据保存:{category[1],name_ch[1]}")
class EmChinaBlockStockRecorder(TimeSeriesDataRecorder):
entity_provider = 'joinquant'
entity_schema = Block
provider = 'emquantapi'
data_schema = BlockStock
def __init__(self, entity_type='block', exchanges=None, entity_ids=None, codes=None, batch_size=10,
force_update=True, sleeping_time=5, default_size=2000, real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '', mainCallback)
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
def record(self, entity, start, end, size, timestamps):
if not entity.block_type:
return
if 'gics' not in entity.block_type:
return None
# industry_stocks = get_industry_stocks(entity.code,date=now_pd_timestamp())
industry_stocks = c.sector(entity.code, to_time_str(now_pd_timestamp()))
if len(industry_stocks.Data) == 0:
return None
codes = industry_stocks.Codes
names = [i for i in industry_stocks.Data if i not in codes]
df = pd.DataFrame({"stock": codes,"stock_name":names})
df["stock_id"] = df.stock.apply(lambda x: to_entity_id(x, "stock").lower())
df["stock_code"] = df.stock_id.str.split("_", expand=True)[2]
df["code"] = entity.code
df["exchange"] = entity.exchange
df["name"] = entity.name
df["timestamp"] = now_pd_timestamp()
df["entity_id"] = entity.id
df["block_type"] = entity.block_type
df["entity_type"] = "block"
df["id"] = df.apply(lambda x: x.entity_id + "_" + x.stock_id, axis=1)
if df.empty:
return None
df_to_db(data_schema=self.data_schema, df=df, provider=self.provider,
force_update=True)
self.logger.info('finish recording BlockStock:{},{}'.format(entity.category, entity.name))
class JqChinaBlockKdataRecorder(TimeSeriesDataRecorder):
entity_provider = 'joinquant'
entity_schema = Block
provider = 'emquantapi'
data_schema = Block1dKdata
def __init__(self, entity_type='block', exchanges=None, entity_ids=None, codes=None, batch_size=10,
force_update=True, sleeping_time=5, default_size=2000, real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '', mainCallback)
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
# def record(self, entity, start, end, size, timestamps):
# # if entity.exchange == "swl1":
# # return None
# if not end:
# if (now_pd_timestamp() - start).days > 365:
# from datetime import timedelta
# end = to_time_str(start + timedelta(days=365))
# else:
# end = to_time_str(now_pd_timestamp())
# start = to_time_str(start)
# if entity.code in ['801780', '801180', '801150', '801160', '801230', '801890',
# '801720', '801710', '801110', '801880', '801120', '801080',
# '801750', '801170', '801140', '801770', '801760', '801010',
# '801200', '801030', '801050', '801790', '801730', '801210',
# '801740', '801020', '801130', '801040']:
# return None
# entityid = entity.id.replace('cn', 'swl3')
# df = get_data(data_schema=Block1dKdata, entity_id=entityid,provider='joinquant')
#
# # df = c.csd(f"{entity.code}.SWI", "OPEN,CLOSE,HIGH,LOW,VOLUME,AMOUNT", start, end,
# # "period=1,adjustflag=1,curtype=1,order=1,ispandas=1")
# if type(df) != pd.DataFrame:
# return None
# if pd_is_not_null(df):
# df['entity_id'] = entity.id
# df['provider'] = 'emquantapi'
#
# def generate_kdata_id(se):
# return "{}_{}".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY))
#
# df['id'] = df[['entity_id', 'timestamp']].apply(generate_kdata_id, axis=1)
#
# df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
#
# return None
def record(self, entity, start, end, size, timestamps):
# if entity.exchange == "swl1":
# return None
if not end:
if (now_pd_timestamp() - start).days > 365:
from datetime import timedelta
end = to_time_str(start + timedelta(days=365))
else:
end = to_time_str(now_pd_timestamp())
start = to_time_str(start)
df = c.csd(f"{entity.code}.SWI", "OPEN,CLOSE,HIGH,LOW,VOLUME,AMOUNT", start, end,
"period=1,adjustflag=1,curtype=1,order=1,ispandas=1")
if type(df) != pd.DataFrame:
return None
df.rename(columns={
'DATES': 'timestamp',
'OPEN': 'open',
'CLOSE': 'close',
'HIGH': 'high',
'LOW': 'low',
'VOLUME': 'volume',
'AMOUNT': 'turnover',
}, inplace=True)
if pd_is_not_null(df):
df['name'] = entity.name
df['entity_id'] = entity.id
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['provider'] = 'joinquant'
df['level'] = '1d'
df['code'] = entity.code
def generate_kdata_id(se):
return "{}_{}".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY))
df['id'] = df[['entity_id', 'timestamp']].apply(generate_kdata_id, axis=1)
df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
return None
class JqChinaBlockMoneyFlowRecorder(TimeSeriesDataRecorder):
# entity_provider = 'joinquant'
# entity_schema = Block
provider = 'emquantapi'
data_schema = BlockMoneyFlow
def __init__(self, entity_type='block', exchanges=None, entity_ids=None, codes=None, batch_size=10,
force_update=True, sleeping_time=5, default_size=2000, real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '', mainCallback)
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
def record(self, entity, start, end, size, timestamps):
if "swl1" not in entity.id:
return None
start = to_time_str(start)
df = finance.run_query(
query(finance.SW1_DAILY_PRICE).filter(
finance.SW1_DAILY_PRICE.code == entity.code).filter(
finance.SW1_DAILY_PRICE.date >= start).limit(size))
if pd_is_not_null(df):
df['name'] = entity.name
df.rename(columns={'money': 'turnover', 'date': 'timestamp'}, inplace=True)
df['entity_id'] = entity.id
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
#!/usr/bin/env python3
'''
A module for reading Next Gen Stats data
'''
import pandas as pd
import csv
class NextGenStatsReader(object):
'''
A class for reading and manipulating Next Gen Stats data in a DataFrame
'''
def load_ngs_data_into_dataframe(self, file_path):
'''
Load a CSV file of Next Gen Stats data into a DataFrame
@param {string} file_path, e.g. '/data/train.csv'
@return {DataFrame}
'''
self.ngs_df = pd.read_csv(file_path, low_memory=False)
def load_ngs_data_into_dict_reader(self, file_path):
'''
Load a CSV file of Next Gen Stats data into a DictReader
@param {string} file_path, e.g. '/data/train.csv'
@return {DictReader}
'''
with open(file_path, newline='') as csvfile:
return csv.DictReader(csvfile)
def get_positionals_dataframe_for_play(self, play_id):
'''
Get each player's positions for a play as a dataframe
@param {int} play_id
@return {DataFrame}
'''
return self.ngs_df.loc[self.ngs_df['PlayId'] == int(play_id)]
def get_positionals_dict_for_play(self, play_id):
'''
Get each player's positions for a play as a dictionary
@param {int} play_id
@return {dict<dict>} a dict of each player's positional data as a dict, keyed by NflId
'''
play_positionals_df = self.get_positionals_dataframe_for_play(play_id)
play_positionals_dict = dict()
for index, dict_row in play_positionals_df.iterrows():
if dict_row['NflId'] not in play_positionals_dict:
play_positionals_dict[dict_row['NflId']] = dict_row
return play_positionals_dict
def __init__(self):
'''
Constructor
'''
self.ngs_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import seaborn as sns
import pylab as plt
__package__ = "Byron times plot"
__author__ = "<NAME> (<EMAIL>)"
if __name__ == '__main__':
filename = 'byron_times.dat'
data = | pd.read_csv(filename, sep=',', header=0) | pandas.read_csv |
import pandas as pd
import numpy as np
#import psycopg2
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import Constants
import sys
from pathlib import Path
output_folder = Path(sys.argv[1])
output_folder.mkdir(parents = True, exist_ok = True)
#conn = psycopg2.connect('dbname=mimic user=haoran host=mimic password=password')
mimiciii_csv_path = "/nobackup/users/nhulkund/6.864/files/mimiciii/1.4/"
# pats = pd.read_sql_query('''
# select subject_id, gender, dob, dod from mimiciii.patients
# ''', conn)
pats_df = pd.read_csv(mimiciii_csv_path + "PATIENTS.csv")
pats_df = pats_df.rename(columns = {name: name.lower() for name in pats_df.columns})
pats = pats_df[["subject_id", "gender", "dob", "dod"]]
n_splits = 12
pats = pats.sample(frac = 1, random_state = 42).reset_index(drop = True)
kf = KFold(n_splits = n_splits, shuffle = True, random_state = 42)
for c,i in enumerate(kf.split(pats, groups = pats.gender)):
pats.loc[i[1], 'fold'] = str(c)
# adm = pd.read_sql_query('''
# select subject_id, hadm_id, insurance, language,
# religion, ethnicity,
# admittime, deathtime, dischtime,
# HOSPITAL_EXPIRE_FLAG, DISCHARGE_LOCATION,
# diagnosis as adm_diag
# from mimiciii.admissions
# ''', conn)
admissions = pd.read_csv(mimiciii_csv_path+"ADMISSIONS.csv")
admissions = admissions.rename(columns = {name: name.lower() for name in admissions.columns})
adm = admissions.rename(columns = {'diagnosis': 'adm_diag'})[['subject_id', 'hadm_id', 'insurance', 'language', 'religion', 'ethnicity', 'admittime', 'deathtime', 'dischtime', 'hospital_expire_flag', 'discharge_location', 'adm_diag']]
df = pd.merge(pats, adm, on='subject_id', how = 'inner')
def merge_death(row):
if not(pd.isnull(row.deathtime)):
return row.deathtime
else:
return row.dod
df['dod_merged'] = df.apply(merge_death, axis = 1)
# notes = pd.read_sql_query('''
# select category, chartdate, charttime, hadm_id, row_id as note_id, text from mimiciii.noteevents
# where iserror is null
# ''', conn)
notes_df = pd.read_csv(mimiciii_csv_path + "NOTEEVENTS.csv")
notes = notes_df.rename(columns = {name: name.lower() for name in notes_df.columns})
notes = notes.rename(columns={'row_id': 'note_id'})[notes.iserror.isna()][['category', 'chartdate', 'charttime', 'hadm_id', 'note_id', 'text']]
# drop all outpatients. They only have a subject_id, so can't link back to insurance or other fields
notes = notes[~(pd.isnull(notes['hadm_id']))]
df = pd.merge(left = notes, right = df, on='hadm_id', how = 'left')
df.ethnicity.fillna(value = 'UNKNOWN/NOT SPECIFIED', inplace = True)
others_set = set()
def cleanField(string):
mappings = {'HISPANIC OR LATINO': 'HISPANIC/LATINO',
'BLACK/AFRICAN AMERICAN': 'BLACK',
'UNABLE TO OBTAIN':'UNKNOWN/NOT SPECIFIED',
'PATIENT DECLINED TO ANSWER': 'UNKNOWN/NOT SPECIFIED'}
bases = ['WHITE', 'UNKNOWN/NOT SPECIFIED', 'BLACK', 'HISPANIC/LATINO',
'OTHER', 'ASIAN']
if string in bases:
return string
elif string in mappings:
return mappings[string]
else:
for i in bases:
if i in string:
return i
others_set.add(string)
return 'OTHER'
df['ethnicity_to_use'] = df['ethnicity'].apply(cleanField)
df = df[df.chartdate >= df.dob]
# ages = []
# for i in range(df.shape[0]):
# ages.append((df.chartdate.iloc[i] - df.dob.iloc[i]).days/365.24)
# df['age'] = ages
chartdate = pd.to_datetime(df.chartdate)#.dt.date
dob = pd.to_datetime(df.dob)#.dt.date
df['age'] = (chartdate.apply(lambda s: s.timestamp()) - dob.apply(lambda s:s.timestamp()))/ 60./60/24/365
df.loc[(df.category == 'Discharge summary') |
(df.category == 'Echo') |
(df.category == 'ECG'), 'fold'] = 'NA'
# icds = (pd.read_sql_query('select * from mimiciii.diagnoses_icd', conn)
# .groupby('hadm_id')
# .agg({'icd9_code': lambda x: list(x.values)})
# .reset_index())
diagnoses_icd = pd.read_csv(mimiciii_csv_path + "DIAGNOSES_ICD.csv")
diagnoses_icd = diagnoses_icd.rename(columns = {name: name.lower() for name in diagnoses_icd.columns})
icds = diagnoses_icd.groupby('hadm_id').agg({'icd9_code': lambda x: list(x.values)}).reset_index()
df = pd.merge(left = df, right = icds, on = 'hadm_id')
def map_lang(x):
if x == 'ENGL':
return 'English'
if pd.isnull(x):
return 'Missing'
return 'Other'
df['language_to_use'] = df['language'].apply(map_lang)
for i in Constants.groups:
assert(i['name'] in df.columns), i['name']
# acuities = pd.read_sql_query('''
# select * from (
# select a.subject_id, a.hadm_id, a.icustay_id, a.oasis, a.oasis_prob, b.sofa from
# (mimiciii.oasis a
# natural join mimiciii.sofa b )) ab
# natural join
# (select subject_id, hadm_id, icustay_id, sapsii, sapsii_prob from
# mimiciii.sapsii) c
# ''', conn)
df_sapsii = pd.read_csv(mimiciii_csv_path + "sapsii")[['subject_id', 'hadm_id', 'icustay_id', 'sapsii', 'sapsii_prob']]
df_sapsii = df_sapsii.rename(columns = {name: name.lower() for name in df_sapsii.columns})
df_oasis = pd.read_csv(mimiciii_csv_path + "oasis")
df_oasis = df_oasis.rename(columns = {name: name.lower() for name in df_oasis.columns})
df_sofa = pd.read_csv(mimiciii_csv_path + "sofa")
df_sofa = df_sofa.rename(columns = {name: name.lower() for name in df_sofa.columns})
oasis_sofa_merge = pd.merge(df_oasis, df_sofa)[['subject_id', 'hadm_id', 'icustay_id', 'oasis', 'oasis_prob', 'sofa']]
acuities = pd.merge(oasis_sofa_merge, df_sapsii)
# icustays = pd.read_sql_query('''
# select subject_id, hadm_id, icustay_id, intime, outtime
# from mimiciii.icustays
# ''', conn).set_index(['subject_id','hadm_id'])
icustays_df = pd.read_csv(mimiciii_csv_path + 'ICUSTAYS.csv')
icustays_df = icustays_df.rename(columns = {name: name.lower() for name in icustays_df.columns})
df_timestamp_cols = ['chartdate', 'charttime', 'dob', 'dod', 'admittime', 'deathtime', 'dischtime', "dod_merged"]
icustays_timestamp_cols = ['intime', 'outtime']
for col in df_timestamp_cols:
df[col] = pd.to_datetime(df[col])
for col in icustays_timestamp_cols:
icustays_df[col] = pd.to_datetime(icustays_df[col])
icustays = icustays_df[["subject_id", "hadm_id", "icustay_id", "intime", "outtime"]].set_index(['subject_id', 'hadm_id'])
icustays = icustays.sort_index()
def fill_icustay(row):
try:
opts = icustays.loc[(row['subject_id'],row['hadm_id'])]
except:
return None
if | pd.isnull(row['charttime']) | pandas.isnull |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
from app.data_process import bp
from datetime import datetime
import pandas as pd
from pathlib import Path
from bs4 import BeautifulSoup
import glob
import os
positivity_replace = {
'ALG':3526,
'BRN':3527,
'CKH':3540,
'DUR':3530,
'EOH':3558,
'GBH':3533,
'HNH':3534,
'HKP':3535,
'HAL':3536,
'HAM':3537,
'HPE':3538,
'HPH':3539,
'KFL':3541,
'LAM':3542,
'LGL':3543,
'MSL':3544,
'NIA':3546,
'NPS':3547,
'NWR':3549,
'OTT':3551,
'PEL':3553,
'PET':3555,
'PQP':3556,
'WAT':3565,
'REN':3557,
'SMD':3560,
'SWH':3575,
'SUD':3561,
'THB':3562,
'TSK':3563,
'TOR':3595,
'WDG':3566,
'WEK':3568,
'YRK':3570,
'overall':6
}
def get_file_path(data, step='raw', today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + step + '/'
if data['type'] != '':
file_name = data['table_name'] + '_' + today + '.' + data['type']
else:
file_name = data['table_name'] + '_' + today
save_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_path = save_dir + '/' + file_name
return file_path, save_dir
@bp.cli.command('public_ontario_gov_daily_change_in_cases_by_phu')
def process_public_ontario_gov_daily_change_in_cases_by_phu():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'daily_change_in_cases_by_phu', 'type': 'csv'}
date_field = ['Date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.melt(id_vars='Date')
replace = {
'Algoma_Public_Health_Unit':3526,
'Algoma_District':3526,
'Brant_County_Health_Unit':3527,
'Brant_County':3527,
'Chatham-Kent_Health_Unit':3540,
'Chatham_Kent':3540,
'Durham_Region_Health_Department':3530,
'Durham_Region':3530,
'Eastern_Ontario_Health_Unit':3558,
'Eastern_Ontario':3558,
'Grey_Bruce_Health_Unit':3533,
'Grey_Bruce':3533,
'Haldimand-Norfolk_Health_Unit':3534,
'Haldimand_Norfolk':3534,
'Haliburton,_Kawartha,_Pine_Ridge_District_Health_Unit':3535,
'Haliburton_Kawartha_Pine_Ridge':3535,
'Halton_Region_Health_Department':3536,
'Halton_Region':3536,
'Hamilton_Public_Health_Services':3537,
'City_of_Hamilton':3537,
'Hastings_and_Prince_Edward_Counties_Health_Unit':3538,
'Hastings_Prince_Edward':3538,
'Huron_Perth_District_Health_Unit':3539,
'Huron_Perth':3539,
'Kingston,_Frontenac_and_Lennox_&_Addington_Public_Health':3541,
'KFLA':3541,
'Lambton_Public_Health':3542,
'Lambton_County':3542,
'Leeds,_Grenville_and_Lanark_District_Health_Unit':3543,
'Leeds_Grenville_Lanark':3543,
'Middlesex-London_Health_Unit':3544,
'Middlesex_London':3544,
'Niagara_Region_Public_Health_Department':3546,
'Niagara_Region':3546,
'North_Bay_Parry_Sound_District_Health_Unit':3547,
'North_Bay_Parry_Sound_District':3547,
'Northwestern_Health_Unit':3549,
'Northwestern':3549,
'Ottawa_Public_Health':3551,
'City_of_Ottawa':3551,
'Peel_Public_Health':3553,
'Peel_Region':3553,
'Peterborough_Public_Health':3555,
'Peterborough_County_City':3555,
'Porcupine_Health_Unit':3556,
'Porcupine':3556,
'Region_of_Waterloo,_Public_Health':3565,
'Waterloo_Region':3565,
'Renfrew_County_and_District_Health_Unit':3557,
'Renfrew_County_and_District':3557,
'Simcoe_Muskoka_District_Health_Unit':3560,
'Simcoe_Muskoka_District':3560,
'Southwestern_Public_Health':3575,
'Southwestern':3575,
'Sudbury_&_District_Health_Unit':3561,
'Sudbury_and_District':3561,
'Thunder_Bay_District_Health_Unit':3562,
'Thunder_Bay_District':3562,
'Timiskaming_Health_Unit':3563,
'Timiskaming':3563,
'Toronto_Public_Health':3595,
'Toronto':3595,
'Wellington-Dufferin-Guelph_Public_Health':3566,
'Wellington_Dufferin_Guelph':3566,
'Windsor-Essex_County_Health_Unit':3568,
'Windsor_Essex_County':3568,
'York_Region_Public_Health_Services':3570,
'York_Region':3570,
'Total':6
}
df['HR_UID'] = df['variable'].replace(replace)
for column in date_field:
df[column] = pd.to_datetime(df[column], errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_conposcovidloc')
def process_public_ontario_gov_conposcovidloc():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'conposcovidloc', 'type': 'csv'}
field_map = {
"Row_ID":"row_id",
"Accurate_Episode_Date": "accurate_episode_date",
"Case_Reported_Date": "case_reported_date",
"Specimen_Date": "specimen_reported_date",
"Test_Reported_Date": "test_reported_date",
"Age_Group":"age_group",
"Client_Gender":"client_gender",
"Case_AcquisitionInfo": "case_acquisition_info",
"Outcome1": "outcome_1",
"Outbreak_Related": "outbreak_related",
"Reporting_PHU": "reporting_phu",
"Reporting_PHU_Address": "reporting_phu_address",
"Reporting_PHU_City": "reporting_phu_city",
"Reporting_PHU_Postal_Code": "reporting_phu_postal_code",
"Reporting_PHU_Website": "reporting_phu_website",
"Reporting_PHU_Latitude":"reporting_phu_latitude",
"Reporting_PHU_Longitude": "reporting_phu_longitude",
}
date_field = ['accurate_episode_date', 'case_reported_date', 'specimen_reported_date', 'test_reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.replace("12:00:00 AM", None)
df = df.rename(columns=field_map)
for column in date_field:
df[column] = pd.to_datetime(df[column], errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_vaccination')
def process_public_ontario_gov_vaccination():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'vaccination', 'type': 'csv'}
date_field = ['date']
field_map = {
'report_date': 'date'
}
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df.dropna(how='all', axis=1, inplace=True)
df.dropna(how='any', inplace=True)
for index, row in df.iterrows():
if type(row['previous_day_total_doses_administered'])==str:
df.at[index,'previous_day_doses_administered'] = row['previous_day_doses_administered'].replace(",","")
if type(row['total_doses_administered'])==str:
df.at[index,'total_doses_administered'] = row['total_doses_administered'].replace(",","")
if type(row['total_doses_in_fully_vaccinated_individuals'])==str:
df.at[index,'total_doses_in_fully_vaccinated_individuals'] = row['total_doses_in_fully_vaccinated_individuals'].replace(",","")
if type(row['total_individuals_fully_vaccinated'])==str:
df.at[index,'total_individuals_fully_vaccinated'] = row['total_individuals_fully_vaccinated'].replace(",","")
for column in date_field:
df[column] = pd.to_datetime(df[column])
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_covidtesting')
def process_public_ontario_gov_covidtesting():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'covidtesting', 'type': 'csv'}
date_field = ['reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
to_include = []
for column in df.columns:
name = column.replace(' ','_').lower()
df[name] = df[column]
to_include.append(name)
df = df[to_include]
for column in date_field:
df[column] = pd.to_datetime(df[column])
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_211_call_reports')
def process_confidential_211_call_reports():
data = {'classification':'confidential', 'source_name':'211', 'table_name':'call_reports', 'type': 'csv'}
field_map = {
"CallReportNum":"call_report_num",
"CallDateAndTimeStart": "call_date_and_time_start",
"Demographics of Inquirer - Age Category": "age_of_inquirer"
}
date_field = ['call_date_and_time_start']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = | pd.read_csv(file) | pandas.read_csv |
import json
from unittest import TestCase
import pandas
from pandas.io.json import json_normalize
from pandas.util.testing import assert_frame_equal
from gamebench_api_client.api.utilities.dataframe_utilities import \
json_to_normalized_dataframe, session_detail_to_dataframe, to_dataframe
from tests import *
class TestDataFrameUtilities(TestCase):
def setUp(self):
self.data_frame = pandas.DataFrame()
with open(os.path.join(
PARENT_DIR + API_SAMPLES + "gpu_img.json")) as json_data:
self.test_json = json.load(json_data)
with open(os.path.join(
PARENT_DIR + API_SAMPLES + "sessionid.json")) as json_data:
self.session_json = json.load(json_data)
def test_json_to_normalized_dataframe(self):
""" Verify a DataFrame is returned after normalizing a JSON."""
expected = json_normalize(
self.test_json['response'],
'samples',
['id', 'sessionId']
)
actual = json_to_normalized_dataframe(self.test_json['response'])
assert_frame_equal(
expected,
actual
)
def test_session_metric_to_dataframe(self):
""" Verify a DataFrame is returned when given a specific metric."""
expected = | pandas.DataFrame(data=[self.session_json['response']['app']]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.cluster import hierarchy
import copy
import sys
sys.path.append('/home/sd375')
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster import hierarchy
from .load_and_save_environment_data import load_target_residuals
from ..environment_processing.base_processing import path_inputs_env
from UsefulFonctions import ComputeDistanceMatrix
import os
dict_ensemble_to_datasets = {
'ENSEMBLE_HealthAndMedicalHistory' : ['Breathing', 'CancerScreening', 'ChestPain', 'Claudification', 'Eyesight', 'GeneralHealth', 'GeneralPain', 'Hearing', 'Medication', 'Mouth'],
'ENSEMBLE_LifestyleAndEnvironment' : ['Alcohol', 'Diet', 'ElectronicDevices', 'PhysicalActivityQuestionnaire', 'SexualFactors', 'Sleep', 'Smoking', 'SunExposure'],
'ENSEMBLE_PsychosocialFactors' : ['MentalHealth', 'SocialSupport'],
'ENSEMBLE_SocioDemographics' : ['Education', 'Employment', 'Household', 'OtherSociodemographics']
}
cols_ethnicity = ['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish',
'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean',
'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian',
'Ethnicity.Pakistani', 'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other',
'Ethnicity.Black', 'Ethnicity.Caribbean', 'Ethnicity.African',
'Ethnicity.Black_Other', 'Ethnicity.Chinese',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know',
'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA', 'Ethnicity.Other']
cols_age_sex_eid_ethnicity = ['Sex', 'eid', 'Age when attended assessment centre'] + cols_ethnicity
## Agglomerative Clustering :
# Metrics :
def NegativeIntersection(x, y):
"""
x, y 1D numpy vectors
"""
return -x.dot(y) #1 - x.dot(y) / np.sum(x | y)
def CreateDictSizes(path_dataset_full, target_dataset, env_dataset):
## Load everything
dict_name_to_df = {}
dict_name_to_num_features = {}
print("Loading Full raw data")
if env_dataset is not None :
if 'ENSEMBLE' in env_dataset :
subdatasets = dict_ensemble_to_datasets[env_dataset]
usecols = []
for sub_envdataset in subdatasets :
usecol = pd.read_csv(path_inputs_env + '%s.csv' % sub_envdataset, nrows = 1).set_index('id').columns
usecol = [elem for elem in usecol if elem not in ['eid', 'Sex', 'Age when attended assessment centre', 'Unnamed: 0'] + cols_ethnicity]
usecols += usecol
else :
usecols = pd.read_csv(path_inputs_env + '%s.csv' % env_dataset, nrows = 1).set_index('id').columns
usecols = [elem for elem in usecols if elem not in ['eid', 'Sex', 'Age when attended assessment centre', 'Unnamed: 0'] + cols_ethnicity]
else :
usecols = None
full_df = pd.read_csv(path_dataset_full, usecols = usecols + ['id', 'eid', 'Age when attended assessment centre']).set_index('id')
if target_dataset is not None :
target_dataset = target_dataset.replace('\\', '')
Alan_residuals = load_target_residuals(target_dataset)
full_df = full_df.join(Alan_residuals)
full_df = full_df[~full_df['residuals'].isna()]
print("Starting to convert columns to vectors")
cols = [elem for elem in full_df.columns if elem not in cols_age_sex_eid_ethnicity + ['residuals']]
for col in cols :
if not full_df[col].dropna().empty :
col_name = col
dict_name_to_df[col_name] = full_df[[col, 'eid']].dropna()
dict_name_to_num_features[col_name] = 1
else :
continue
print("End dict series")
df_age = full_df['Age when attended assessment centre'].index
return dict_name_to_df, dict_name_to_num_features, df_age
def CreateDataArray(dict_name_to_df, ids):
n = len(dict_name_to_df)
dim = len(ids)
array_fill_0 = np.zeros((n, dim), dtype = 'int')
map_name_to_idx = dict(zip(dict_name_to_df.keys(), range(len(dict_name_to_df))))
for name, elem in dict_name_to_df.items():
idx = map_name_to_idx[name]
array_fill_0[idx] = ids.isin(elem.index)
return array_fill_0, map_name_to_idx
def FindArgmin(full_distance_matrix):
argmin = full_distance_matrix.argmin()
argmin_i, argmin_j = np.unravel_index(argmin, full_distance_matrix.shape)
if argmin_i > argmin_j:
argmin_i, argmin_j = argmin_j, argmin_i
return argmin_i, argmin_j
def ReplaceIbyIJ(array_fill_0, dataset_ij, argmin_i, argmin_j, ids):
## Replace i by ij
array_fill_0[argmin_i] = 0
array_fill_0[argmin_i] = ids.isin(dataset_ij.index)
## Delete j
array_fill_0 = np.delete(array_fill_0, argmin_j, axis = 0)
return array_fill_0
def CreateNewMapping(map_idx_to_name, argmin_i, argmin_j):
new_mapping = dict()
for index in range(argmin_i):
new_mapping[index] = map_idx_to_name[index]
new_mapping[argmin_i] = map_idx_to_name[argmin_i] + '//' + map_idx_to_name[argmin_j]
for index in range(argmin_i + 1, argmin_j):
new_mapping[index] = map_idx_to_name[index]
for index in range(argmin_j, len(map_idx_to_name) - 1):
new_mapping[index] = map_idx_to_name[index + 1]
map_idx_to_name = copy.deepcopy(new_mapping)
map_name_to_idx = {v : k for k, v in map_idx_to_name.items()}
return map_idx_to_name, map_name_to_idx
def RecomputeDistanceMatrix(full_distance_matrix, array_fill_0, argmin_i, argmin_j):
full_distance_matrix = np.delete(np.delete(full_distance_matrix, argmin_j, axis = 0), argmin_j, axis = 1)
new_point = array_fill_0[argmin_i]
old_points = array_fill_0
new_distance_matrix = ComputeDistanceMatrix(new_point[np.newaxis, :], old_points)
full_distance_matrix[:, argmin_i] = new_distance_matrix[0, :]
full_distance_matrix[argmin_i, :] = new_distance_matrix[0, :]
full_distance_matrix[argmin_i, argmin_i] = np.inf
return full_distance_matrix
def AglomerativeClusteringFull(path_input, target_dataset = None, env_dataset = None):
## Load eid and ids, compute max_size and min_size :
dict_name_to_df, dict_name_to_num_features, ids = CreateDictSizes(path_input, target_dataset, env_dataset)
## Create Array with size vectors, and create mapping between idx and dataset names
array_fill_0, map_name_to_idx = CreateDataArray(dict_name_to_df, ids)
map_idx_to_name = {v : k for k, v in map_name_to_idx.items()}
## Initialise distance matrix
full_distance_matrix = ComputeDistanceMatrix(array_fill_0, array_fill_0)
np.fill_diagonal(full_distance_matrix, np.inf)
print("Done computing full distance matrix ", full_distance_matrix)
dict_not_changed_index = copy.deepcopy(map_idx_to_name)
dict_not_changed_index_to_num_features = dict((map_name_to_idx[key], value) for (key, value) in dict_name_to_num_features.items())
initial_dict = copy.deepcopy(dict_not_changed_index)
n = array_fill_0.shape[0]
Zdf = pd.DataFrame(columns = {'index_i', 'index_j', 'index_ij', 'num_features_i', 'num_features_j', 'num_features_ij', 'distance_ij', 'number_ij', 'samplesize_i','samplesize_j', 'samplesize_ij', 'name_i', 'name_j', 'name_ij', 'step'})
for count in range(n - 1):
if count % 10 == 0:
print(count/(n-1))
## Find Argmin i and j
argmin_i, argmin_j = FindArgmin(full_distance_matrix)
## Store Names :
dataset_name_i, dataset_name_j = map_idx_to_name[argmin_i], map_idx_to_name[argmin_j]
name_ij = dataset_name_i + '//' + dataset_name_j
## Store sample sizes
row_i, row_j = array_fill_0[argmin_i], array_fill_0[argmin_j]
samplesize_ij = row_i.dot(row_j)
samplesize_i, samplesize_j = row_i.dot(row_i), row_j.dot(row_j)
## Store distance
distance_ij = full_distance_matrix[argmin_i, argmin_j]
## Merge argmin_i and argmin_j
dataset_i, dataset_j = dict_name_to_df[dataset_name_i], dict_name_to_df[dataset_name_j]
dataset_ij = dataset_i.join(dataset_j, how = 'inner', rsuffix = '_r').drop(columns = 'eid_r')
dict_name_to_df[name_ij] = dataset_ij
dict_name_to_df.pop(dataset_name_i, None)
dict_name_to_df.pop(dataset_name_j, None)
print("Merge %s and %s => %s " % (dataset_name_i, dataset_name_j, name_ij))
## Replace i by ij, delete j
array_fill_0 = ReplaceIbyIJ(array_fill_0, dataset_ij, argmin_i, argmin_j, ids)
## New mapping
map_idx_to_name, map_name_to_idx = CreateNewMapping(map_idx_to_name, argmin_i, argmin_j)
## Recompute distances with new cluster
full_distance_matrix = RecomputeDistanceMatrix(full_distance_matrix, array_fill_0, argmin_i, argmin_j)
## Update saving index and creating final Z row:
dict_not_changed_index[count + n] = name_ij
i_index_not_changed = [key for key, value in dict_not_changed_index.items() if value == dataset_name_i][0]
j_index_not_changed = [key for key, value in dict_not_changed_index.items() if value == dataset_name_j][0]
number_in_ij = len(name_ij.split('//'))
## Store number of features
features_i, features_j = dict_not_changed_index_to_num_features[i_index_not_changed], dict_not_changed_index_to_num_features[j_index_not_changed]
features_ij = features_i + features_j
dict_not_changed_index_to_num_features[count + n] = features_ij
Zdf = Zdf.append({'index_i' : i_index_not_changed, 'index_j' : j_index_not_changed, 'index_ij' : count + n,
'num_features_i' : features_i, 'num_features_j' : features_j, 'num_features_ij' : features_ij,
'samplesize_ij' : samplesize_ij, 'samplesize_i' : samplesize_i, 'samplesize_j' : samplesize_j,
'name_i' : dataset_name_i, 'name_j' : dataset_name_j, 'name_ij' : name_ij,
'distance_ij': distance_ij, 'number_ij' : number_in_ij, 'step' : count + n
}, ignore_index = True)
return dict_not_changed_index, Zdf, initial_dict
## Processing HC
def Create_full_linkage_matrix(Zdf, initial_dict):
Z = Zdf[['index_i', 'index_j', 'distance_ij', 'number_ij']].astype(float).values
tree = hierarchy.to_tree(Z)
cols = list(initial_dict.values())
linkage_matrix_raw = Zdf.set_index('index_ij')
linkage_matrix_raw['Score_i'] = linkage_matrix_raw['samplesize_i'] * linkage_matrix_raw['num_features_i']
linkage_matrix_raw['Score_j'] = linkage_matrix_raw['samplesize_j'] * linkage_matrix_raw['num_features_j']
linkage_matrix_raw['Score_ij'] = linkage_matrix_raw['samplesize_ij'] * linkage_matrix_raw['num_features_ij']
return tree, linkage_matrix_raw
def CreateMappingScoreAndFeatures(linkage_matrix_raw):
mapping_index_to_score = {}
mapping_index_to_features = {}
first_node = linkage_matrix_raw.index[0]
for elem in linkage_matrix_raw.index:
mapping_index_to_score[elem] = linkage_matrix_raw.loc[elem, 'Score_ij']
mapping_index_to_features[elem] = linkage_matrix_raw.loc[elem, 'num_features_ij']
for index in range(first_node):
if index in linkage_matrix_raw['index_i'].values:
score = linkage_matrix_raw[linkage_matrix_raw.index_i == index]['Score_i'].values[0]
feature = linkage_matrix_raw[linkage_matrix_raw.index_i == index]['num_features_i'].values[0]
elif index in linkage_matrix_raw['index_j'].values:
score = linkage_matrix_raw[linkage_matrix_raw.index_j == index]['Score_j'].values[0]
feature = linkage_matrix_raw[linkage_matrix_raw.index_j == index]['num_features_j'].values[0]
mapping_index_to_score[index] = score
mapping_index_to_features[index] = feature
return mapping_index_to_score, mapping_index_to_features
## Find interesting nodes
def GetInterestingNodes(tree_, linkage_matrix_raw, printing = True):
list_interesting = []
def recurse(tree):
score_parent = linkage_matrix_raw.loc[tree.get_id(), 'Score_ij']
if printing:
print(" PARENT : ", tree.get_id(), ", Score : ", score_parent)
## Compare parent and left child
if not tree.get_left().is_leaf():
score_left = linkage_matrix_raw.loc[tree.get_left().get_id(), 'Score_ij']
else:
row = linkage_matrix_raw.loc[tree.get_id()]
if row['index_i'] == tree.get_left().get_id():
score = row['Score_i']
else :
score = row['Score_j']
score_left = score
if printing:
print(" CHILD LEFT : ", tree.get_left().get_id(), ", Score left : ", score_left)
## Compare parent and right child
if not tree.get_right().is_leaf():
score_right = linkage_matrix_raw.loc[tree.get_right().get_id(), 'Score_ij']
else :
row = linkage_matrix_raw.loc[tree.get_id()]
if row['index_i'] == tree.get_right().get_id():
score = row['Score_i']
else :
score = row['Score_j']
score_right = score
if printing:
print(" CHILD RIGHT : ", tree.get_right().get_id(), ", Score right : ", score_right)
## Append interesting nodes
if score_right > score_parent:
list_interesting.append(tree.get_right().get_id())
if score_left > score_parent:
list_interesting.append(tree.get_left().get_id())
## Explore left and right if possible
if not tree.get_left().is_leaf():
recurse(tree.get_left())
if not tree.get_right().is_leaf():
recurse(tree.get_right())
return list_interesting
list_interesting = recurse(tree_)
if list_interesting == [] :
return [linkage_matrix_raw.index.max()]
else :
return list_interesting
def CreateBestClusterFromInterestingNodes(list_interesting, linkage_matrix_raw, path_input, path_clusters, target, env_df):
print("linkage_matrix_raw", linkage_matrix_raw)
try :
df_sorted = linkage_matrix_raw.loc[list_interesting].sort_values('Score_ij', ascending = False)
except KeyError:
df_sorted = linkage_matrix_raw.sort_values('Score_ij', ascending = False)
best_cluster = df_sorted.iloc[0]
list_features = best_cluster['name_ij'].split('//')
df_cluster = pd.read_csv(path_input, usecols = ['id'] + list_features ).set_index('id')
df_sex_age_ethnicity = pd.read_csv('/n/groups/patel/Alan/Aging/Medical_Images/data/data-features_instances.csv').set_index('id').drop(columns = ['Abdominal_images_quality', 'instance', 'outer_fold'])
df_sex_age_ethnicity = df_sex_age_ethnicity.rename(columns = {'Age' : 'Age when attended assessment centre'})
df_cluster = df_cluster.join(df_sex_age_ethnicity)
df_cluster.to_csv(path_clusters + 'Clusters_%s_%s.csv' % (env_df, target))
def CreateClustersFromInterestingNodes(list_interesting, linkage_matrix_raw, path_input, path_clusters, target = None):
## EWAS :
if target is not None:
os.mkdir(path_clusters + target )
path_saving = path_clusters + target + '/'
## Biomarkers
else :
path_saving = path_clusters
## Compute interesting Nodes
for node_id in list_interesting:
print(node_id)
features = linkage_matrix_raw.loc[node_id, 'name_ij']
score = linkage_matrix_raw.loc[node_id, 'Score_ij']
num_features = linkage_matrix_raw.loc[node_id, 'num_features_ij']
print(features)
list_features = features.split('//')
df_cluster = pd.read_csv(path_input, usecols = ['id'] + list_features ).set_index('id') ## Remember to drop nas
df_sex_age_ethnicity = | pd.read_csv('/n/groups/patel/Alan/Aging/Medical_Images/data/data-features_instances.csv') | pandas.read_csv |
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt, mpld3
from pandas import DataFrame
#----------------------------------------------------------------------------------------------------------------------#
#Load the data into a DF
with open(r"C:/Users/Pathtoyourdata", 'r', encoding='utf8', errors='ignore') as read_file:
json_reloaded = json.load(read_file)
print(len(json_reloaded))
complete_data = pd.DataFrame(json_reloaded)
print(complete_data.head()) #head
def artists():
df_top_freq = complete_data.groupby(['artistName'])['artistName'].agg(
{"artist_count": len}).sort_values("artist_count", ascending=False)
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(df_top_freq)
df_top_freq.to_csv('top_artists.csv')
def songs():
df_top_songs = complete_data.groupby(["trackName"])["trackName"].agg(
{"song_count": len}).sort_values("song_count", ascending=False)
with | pd.option_context('display.max_rows', None, 'display.max_columns', None) | pandas.option_context |
import logging
import os
import gc
import pandas as pd
from src.data_models.tdidf_model import FrequencyModel
from src.evaluations.statisticalOverview import StatisticalOverview
from src.globalVariable import GlobalVariable
from src.kemures.tecnics.content_based import ContentBased
from src.preprocessing.preprocessing import Preprocessing
def execute_one_time():
Preprocessing.database_evaluate_graph()
scenario = GlobalVariable.ONE_SCENARIO_SIZE
scenario_class_df = pd.DataFrame()
scenario_results_df = pd.DataFrame()
for run in range(GlobalVariable.RUN_TIMES):
os.system('cls||clear')
logger.info("+ Rodada " + str(run + 1))
logger.info("+ Carregando o Cenário com " + str(scenario))
songs_base_df, users_preference_base_df = Preprocessing.load_data_test(scenario)
run_class_df, run_results_df = ContentBased.run_recommenders(
users_preference_base_df, FrequencyModel.mold(songs_base_df), scenario, run + 1
)
scenario_results_df = pd.concat([scenario_results_df, run_results_df])
scenario_class_df = pd.concat([scenario_class_df, run_class_df])
StatisticalOverview.result_info(scenario_results_df)
StatisticalOverview.graphics(scenario_results_df)
os.system('cls||clear')
StatisticalOverview.comparate(scenario_results_df)
def execute_by_scenarios():
Preprocessing.database_evaluate_graph()
application_class_df = pd.DataFrame()
application_results_df = pd.DataFrame()
for scenario in GlobalVariable.SCENARIO_SIZE_LIST:
gc.collect()
scenario_class_df = pd.DataFrame()
scenario_results_df = | pd.DataFrame() | pandas.DataFrame |
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_absolute_error, f1_score
from itertools import product
from tqdm import tqdm
from functools import partial
import multiprocessing as mp
import numpy as np
import pandas as pd
from pandas.api.types import (
is_numeric_dtype,
is_bool_dtype,
is_object_dtype,
is_categorical_dtype,
is_string_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
)
NOT_SUPPORTED_ANYMORE = "NOT_SUPPORTED_ANYMORE"
TO_BE_CALCULATED = -1
def _calculate_model_cv_score_(
df, target, feature, task, cross_validation, random_seed, **kwargs
):
"Calculates the mean model score based on cross-validation"
# Sources about the used methods:
# https://scikit-learn.org/stable/modules/tree.html
# https://scikit-learn.org/stable/modules/cross_validation.html
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
metric = task["metric_key"]
model = task["model"]
# shuffle the rows - this is important for cross-validation
# because the cross-validation just takes the first n lines
# if there is a strong pattern in the rows eg 0,0,0,0,1,1,1,1
# then this will lead to problems because the first cv sees mostly 0 and the later 1
# this approach might be wrong for timeseries because it might leak information
df = df.sample(frac=1, random_state=random_seed, replace=False)
# preprocess target
if task["type"] == "classification":
label_encoder = preprocessing.LabelEncoder()
df[target] = label_encoder.fit_transform(df[target])
target_series = df[target]
else:
target_series = df[target]
# preprocess feature
if _dtype_represents_categories(df[feature]):
one_hot_encoder = preprocessing.OneHotEncoder()
array = df[feature].__array__()
sparse_matrix = one_hot_encoder.fit_transform(array.reshape(-1, 1))
feature_input = sparse_matrix
else:
# reshaping needed because there is only 1 feature
feature_input = df[feature].values.reshape(-1, 1)
# Cross-validation is stratifiedKFold for classification, KFold for regression
# CV on one core (n_job=1; default) has shown to be fastest
scores = cross_val_score(
model, feature_input, target_series, cv=cross_validation, scoring=metric
)
return scores.mean()
def _normalized_mae_score(model_mae, naive_mae):
"Normalizes the model MAE score, given the baseline score"
# # Value range of MAE is [0, infinity), 0 is best
# 10, 5 ==> 0 because worse than naive
# 10, 20 ==> 0.5
# 5, 20 ==> 0.75 = 1 - (mae/base_mae)
if model_mae > naive_mae:
return 0
else:
return 1 - (model_mae / naive_mae)
def _mae_normalizer(df, y, model_score, **kwargs):
"In case of MAE, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].median()
baseline_score = mean_absolute_error(df[y], df["naive"]) # true, pred
ppscore = _normalized_mae_score(abs(model_score), baseline_score)
return ppscore, baseline_score
def _normalized_f1_score(model_f1, baseline_f1):
"Normalizes the model F1 score, given the baseline score"
# # F1 ranges from 0 to 1
# # 1 is best
# 0.5, 0.7 ==> 0 because model is worse than naive baseline
# 0.75, 0.5 ==> 0.5
#
if model_f1 < baseline_f1:
return 0
else:
scale_range = 1.0 - baseline_f1 # eg 0.3
f1_diff = model_f1 - baseline_f1 # eg 0.1
return f1_diff / scale_range # 0.1/0.3 = 0.33
def _f1_normalizer(df, y, model_score, random_seed):
"In case of F1, calculates the baseline score for y and derives the PPS."
label_encoder = preprocessing.LabelEncoder()
df["truth"] = label_encoder.fit_transform(df[y])
df["most_common_value"] = df["truth"].value_counts().index[0]
random = df["truth"].sample(frac=1, random_state=random_seed)
baseline_score = max(
f1_score(df["truth"], df["most_common_value"], average="weighted"),
f1_score(df["truth"], random, average="weighted"),
)
ppscore = _normalized_f1_score(model_score, baseline_score)
return ppscore, baseline_score
VALID_CALCULATIONS = {
"regression": {
"type": "regression",
"is_valid_score": True,
"model_score": TO_BE_CALCULATED,
"baseline_score": TO_BE_CALCULATED,
"ppscore": TO_BE_CALCULATED,
"metric_name": "mean absolute error",
"metric_key": "neg_mean_absolute_error",
"model": tree.DecisionTreeRegressor(),
"score_normalizer": _mae_normalizer,
},
"classification": {
"type": "classification",
"is_valid_score": True,
"model_score": TO_BE_CALCULATED,
"baseline_score": TO_BE_CALCULATED,
"ppscore": TO_BE_CALCULATED,
"metric_name": "weighted F1",
"metric_key": "f1_weighted",
"model": tree.DecisionTreeClassifier(),
"score_normalizer": _f1_normalizer,
},
"predict_itself": {
"type": "predict_itself",
"is_valid_score": True,
"model_score": 1,
"baseline_score": 0,
"ppscore": 1,
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"target_is_constant": {
"type": "target_is_constant",
"is_valid_score": True,
"model_score": 1,
"baseline_score": 1,
"ppscore": 0,
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"target_is_id": {
"type": "target_is_id",
"is_valid_score": True,
"model_score": 0,
"baseline_score": 0,
"ppscore": 0,
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"feature_is_id": {
"type": "feature_is_id",
"is_valid_score": True,
"model_score": 0,
"baseline_score": 0,
"ppscore": 0,
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
}
INVALID_CALCULATIONS = [
"target_is_datetime",
"target_data_type_not_supported",
"empty_dataframe_after_dropping_na",
"unknown_error",
]
def _dtype_represents_categories(series) -> bool:
"Determines if the dtype of the series represents categorical values"
return (
is_bool_dtype(series)
or is_object_dtype(series)
or is_string_dtype(series)
or is_categorical_dtype(series)
)
def _determine_case_and_prepare_df(df, x, y, sample=5_000, random_seed=123):
"Returns str with the name of the determined case based on the columns x and y"
if x == y:
return df, "predict_itself"
df = df[[x, y]]
# IDEA: log.warning when values have been dropped
df = df.dropna()
if len(df) == 0:
return df, "empty_dataframe_after_dropping_na"
# IDEA: show warning
# raise Exception(
# "After dropping missing values, there are no valid rows left"
# )
df = _maybe_sample(df, sample, random_seed=random_seed)
if _feature_is_id(df, x):
return df, "feature_is_id"
category_count = df[y].value_counts().count()
if category_count == 1:
# it is helpful to separate this case in order to save unnecessary calculation time
return df, "target_is_constant"
if _dtype_represents_categories(df[y]) and (category_count == len(df[y])):
# it is important to separate this case in order to save unnecessary calculation time
return df, "target_is_id"
if _dtype_represents_categories(df[y]):
return df, "classification"
if is_numeric_dtype(df[y]):
# this check needs to be after is_bool_dtype (which is part of _dtype_represents_categories) because bool is considered numeric by pandas
return df, "regression"
if is_datetime64_any_dtype(df[y]) or | is_timedelta64_dtype(df[y]) | pandas.api.types.is_timedelta64_dtype |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import pandas_datareader.data as web
pytestmark = pytest.mark.stable
class TestEcondb(object):
def test_get_cdh_e_fos(self):
# EUROSTAT
# Employed doctorate holders in non managerial and non professional
# occupations by fields of science (%)
df = web.DataReader(
"dataset=CDH_E_FOS&GEO=NO,PL,PT,RU&FOS07=FOS1&Y_GRAD=TOTAL",
"econdb",
start= | pd.Timestamp("2005-01-01") | pandas.Timestamp |
import os
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
from mavedbconvert import empiric, constants
from tests import ProgramTestCase
class TestEmpiricInit(ProgramTestCase):
def setUp(self):
super().setUp()
self.path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
def test_offset_inframe(self):
empiric.Empiric(src=self.path, wt_sequence="ATC", offset=3)
def test_error_offset_not_inframe(self):
with self.assertRaises(ValueError):
empiric.Empiric(src=self.path, wt_sequence="ATC", offset=1)
def test_error_noncoding(self):
with self.assertRaises(ValueError):
empiric.Empiric(src=self.path, wt_sequence="ATC", is_coding=False)
class TestInferProEvent(unittest.TestCase):
def test_infers_equal_event(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="V", wt_aa="v", codon_pos=0),
"p.Val1=",
)
def test_infers_sub_event_event(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="V", wt_aa="F", codon_pos=0),
"p.Phe1Val",
)
def test_converts_triple_q_to_Xaa(self):
self.assertEqual(
empiric.infer_pro_substitution(mut_aa="?", wt_aa="V", codon_pos=0),
"p.Val1Xaa",
)
class TestInferNTEvent(unittest.TestCase):
def test_infers_equal_event(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="aaa", mut_codon="AAA", codon_pos=0),
"c.[1=;2=;3=]",
)
def test_infers_sub_event_event(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="ATC", mut_codon="GTA", codon_pos=0),
"c.[1A>G;2=;3C>A]",
)
def test_adds_codon_pos_multiplied_by_3_to_position(self):
self.assertEqual(
empiric.infer_nt_substitution(wt_codon="ATC", mut_codon="GTA", codon_pos=1),
"c.[4A>G;5=;6C>A]",
)
class TestEmpiric(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input, wt_sequence="AAA", one_based=False
)
def test_error_missing_amino_acid(self):
for nan in constants.extra_na:
df = pd.DataFrame({"Position": [0], "Amino Acid": [nan], "row_num": [0]})
self.empiric.validate_columns(df)
with self.assertRaises(ValueError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_value_error_codon_doesnt_match_aa_column(self):
with self.assertRaises(ValueError):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["V"], "Codon": ["AAT"], "row_num": [0]}
)
self.empiric.validate_columns(df)
self.empiric.parse_row(row=df.iloc[0, :])
def test_error_infer_nt_true_but_missing_codon_value(self):
for nan in constants.extra_na:
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "row_num": [0], "Codon": [nan]}
)
self.empiric.validate_columns(df)
with self.assertRaises(ValueError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_index_error_negative_position(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["K"], "row_num": [0], "Codon": ["AAA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = True
with self.assertRaises(IndexError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_index_error_out_of_codon_bounds(self):
df = pd.DataFrame(
{"Position": [56], "Amino Acid": ["K"], "row_num": [0], "Codon": ["AAA"]}
)
self.empiric.validate_columns(df)
with self.assertRaises(IndexError):
self.empiric.parse_row(row=df.iloc[0, :])
def test_amino_acid_column_is_case_insensitive(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["v"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_infers_hgvs_pro_event_from_one_based_position(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = True
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_infers_hgvs_pro_event_from_zero_based_position(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.wt_sequence = "GTAAAA"
self.empiric.one_based = False
_, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_pro, "p.Lys2Val")
def test_protein_output_is_singular_when_inferring_nt(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
hgvs_nt, hgvs_pro = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[1A>G;2A>T;3=]")
self.assertEqual(hgvs_pro, "p.Lys1Val")
def test_hgvs_nt_is_none_when_codon_is_not_in_axes(self):
df = pd.DataFrame({"Position": [0], "Amino Acid": ["V"], "row_num": [0]})
self.empiric.validate_columns(df)
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertIsNone(hgvs_nt)
def test_correctly_infers_hgvs_nt_positions_when_zero_based(self):
df = pd.DataFrame(
{"Position": [1], "Amino Acid": ["V"], "row_num": [0], "Codon": ["GTA"]}
)
self.empiric.validate_columns(df)
self.empiric.one_based = False
self.empiric.wt_sequence = "GGGAAT"
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[4A>G;5A>T;6T>A]")
def test_correctly_infers_hgvs_nt_positions_when_one_based(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.empiric.one_based = True
self.empiric.wt_sequence = "GTAAAA"
hgvs_nt, _ = self.empiric.parse_row(row=df.iloc[0, :])
self.assertEqual(hgvs_nt, "c.[1G>A;2T>A;3A>T]")
class TestEmpiricValidateColumns(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input, wt_sequence="AAA", one_based=False
)
def test_error_cannot_find_case_insensitive_aa_column(self):
df = pd.DataFrame({"Position": [1], "aa": ["N"], "Codon": ["AAT"]})
with self.assertRaises(ValueError):
self.empiric.validate_columns(df)
def test_error_cannot_find_case_insensitive_position_column(self):
df = pd.DataFrame({"pos": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
with self.assertRaises(ValueError):
self.empiric.validate_columns(df)
def test_sets_codon_column_as_none_if_not_present(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.codon_column, None)
def test_sets_codon_column_if_present(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.codon_column, "Codon")
def test_sets_position_column(self):
df = pd.DataFrame({"Position": [1], "Amino Acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.position_column, "Position")
def test_sets_aa_column(self):
df = pd.DataFrame({"Position": [1], "amino acid": ["N"], "Codon": ["AAT"]})
self.empiric.validate_columns(df)
self.assertEqual(self.empiric.aa_column, "amino acid")
class TestEmpiricParseScoresInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input,
wt_sequence="AAA",
one_based=False,
input_type="scores",
score_column="A",
)
def test_deletes_position_amino_acid_codon_row_num_columns(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "Codon": ["AAT"], "A": [1.2]}
)
result = self.empiric.parse_input(df)
self.assertNotIn("Position", result.columns)
self.assertNotIn("Amino Acid", result.columns)
self.assertNotIn("Codon", result.columns)
self.assertNotIn("row_num", result.columns)
def test_keeps_additional_non_score_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertIn("B", result.columns)
def test_renames_score_column_to_score_and_drops_original(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertListEqual(list(df["A"]), list(result["score"]))
self.assertIn("B", result.columns)
self.assertNotIn("A", result.columns)
def test_sets_hgvs_pro_column(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(result[constants.pro_variant_col].values[0], "p.Lys1Asn")
def test_correctly_infers_hgvs_nt_column_when_codon_column_present(self):
df = pd.DataFrame(
{
"Position": [1],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
self.empiric.one_based = False
self.empiric.wt_sequence = "GGGAAA"
result = self.empiric.parse_input(df)
self.assertEqual(result[constants.nt_variant_col].values[0], "c.[4=;5=;6A>T]")
def test_orders_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(list(result.columns).index(constants.nt_variant_col), 0)
self.assertEqual(list(result.columns).index(constants.pro_variant_col), 1)
self.assertEqual(list(result.columns).index(constants.mavedb_score_column), 2)
def test_removes_null_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"B": [None],
"A": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertNotIn("B", result.columns)
def test_drops_nt_when_codon_column_is_not_provided(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "A": [1.2], "B": [2.4]}
)
result = self.empiric.parse_input(df)
self.assertNotIn(constants.nt_variant_col, result.columns)
def test_drops_non_numeric_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": ["a"],
}
)
result = self.empiric.parse_input(df)
self.assertNotIn("B", result.columns)
def test_keeps_int_type_as_int(self):
df = pd.DataFrame(
{"Position": [0], "Amino Acid": ["N"], "Codon": ["AAT"], "A": [1]}
)
result = self.empiric.parse_input(df)
self.assertTrue(
np.issubdtype(
result[constants.mavedb_score_column].values[0], np.signedinteger
)
)
class TestEmpiricParseCountsInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.input = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.empiric = empiric.Empiric(
src=self.input,
wt_sequence="AAA",
one_based=False,
input_type="counts",
score_column="A",
)
def test_orders_columns(self):
df = pd.DataFrame(
{
"Position": [0],
"Amino Acid": ["N"],
"Codon": ["AAT"],
"A": [1.2],
"B": [2.4],
}
)
result = self.empiric.parse_input(df)
self.assertEqual(list(result.columns).index(constants.nt_variant_col), 0)
self.assertEqual(list(result.columns).index(constants.pro_variant_col), 1)
self.assertEqual(list(result.columns).index("A"), 2)
self.assertEqual(list(result.columns).index("B"), 3)
class TestEmpiricLoadInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.excel_path = os.path.join(self.data_dir, "empiric", "empiric.xlsx")
self.excel_header_footer_path = os.path.join(
self.data_dir, "empiric", "empiric_header_footer.xlsx"
)
self.csv_path = os.path.join(self.data_dir, "empiric", "tmp.csv")
self.tsv_path = os.path.join(self.data_dir, "empiric", "tmp.tsv")
self.excel_multisheet_path = os.path.join(
self.data_dir, "empiric", "empiric_multisheet.xlsx"
)
def test_extra_na_load_as_nan(self):
for value in constants.extra_na:
df = pd.read_excel(self.excel_path, engine="openpyxl")
df["A"] = [value] * len(df)
df.to_csv(self.csv_path, index=False)
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
expected = pd.Series([np.NaN] * len(df), index=df.index, name="A")
assert_series_equal(result["A"], expected)
def test_loads_first_sheet_by_default(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="score",
input_type=constants.score_type,
)
result = p.load_input_file()
expected = pd.read_excel(
self.excel_multisheet_path, na_values=constants.extra_na, engine="openpyxl"
)
assert_frame_equal(result, expected)
def test_loads_correct_sheet(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
sheet_name="Sheet3",
)
result = p.load_input_file()
expected = pd.read_excel(
self.excel_multisheet_path,
na_values=constants.extra_na,
sheet_name="Sheet3",
engine="openpyxl",
)
assert_frame_equal(result, expected)
def test_error_missing_sheet(self):
p = empiric.Empiric(
src=self.excel_multisheet_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
sheet_name="BadSheet",
)
with self.assertRaises(KeyError):
p.load_input_file()
def test_handles_csv(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df.to_csv(self.csv_path, index=False, sep=",")
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
assert_frame_equal(result, df)
def test_loads_with_skipped_rows(self):
p = empiric.Empiric(
src=self.excel_header_footer_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
skip_header_rows=2,
skip_footer_rows=2,
)
result = p.load_input_file()
df = pd.read_excel(self.excel_path, engine="openpyxl")
assert_frame_equal(result, df)
def test_handles_tsv(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df.to_csv(self.tsv_path, index=False, sep="\t")
e = empiric.Empiric(
src=self.tsv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
result = e.load_input_file()
assert_frame_equal(result, df)
def test_error_position_not_in_columns(self):
df = pd.read_excel(self.excel_path, engine="openpyxl")
df = df.drop(columns=["Position"])
df.to_csv(self.csv_path, index=False, sep="\t")
with self.assertRaises(ValueError):
e = empiric.Empiric(
src=self.csv_path,
wt_sequence="TTTTCTTATTGT",
score_column="col_A",
input_type=constants.score_type,
one_based=False,
)
e.load_input_file()
def test_error_amino_acid_not_in_columns(self):
df = | pd.read_excel(self.excel_path, engine="openpyxl") | pandas.read_excel |
# Utility scripts
#
import sys
import os
import logging
from os.path import abspath, dirname, isdir, join, exists
from collections import defaultdict
from pathlib import Path
import numpy as np
import pandas as pd
from enum import Enum
# Setup log
class colr:
GRN = "\033[92m"
END = "\033[0m"
WARN = "\033[93m"
def setupLogging(LOGNAME):
global log
if "darwin" in sys.platform:
stdoutformat = logging.Formatter(
colr.GRN + "%(asctime)s" + colr.END + ": %(message)s",
datefmt="[%b %d %I:%M %p]",
)
else:
stdoutformat = logging.Formatter(
"%(asctime)s: %(message)s", datefmt="[%I:%M %p]"
)
fileformat = logging.Formatter("%(asctime)s: %(message)s", datefmt="[%x %H:%M:%S]")
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sth = logging.StreamHandler()
sth.setLevel(logging.INFO)
sth.setFormatter(stdoutformat)
log.addHandler(sth)
fhnd = logging.FileHandler(LOGNAME)
fhnd.setLevel(logging.DEBUG)
fhnd.setFormatter(fileformat)
log.addHandler(fhnd)
class TaxonomicLevel(Enum):
kingdom = 1
phylum = 2
_class = 3
order = 4
famliy = 5
genus = 6
species = 7
def which(file):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
def make_output_dir(dirpath, strict=False):
"""Make an output directory if it doesn't exist
Returns the path to the directory
dirpath -- a string describing the path to the directory
strict -- if True, raise an exception if dir already
exists
"""
dirpath = abspath(dirpath)
# Check if directory already exists
if isdir(dirpath):
if strict:
err_str = "Directory '%s' already exists" % dirpath
raise IOError(err_str)
return dirpath
try:
os.makedirs(dirpath)
except IOError as e:
err_str = (
"Could not create directory '%s'. Are permissions set "
+ "correctly? Got error: '%s'" % e
)
raise IOError(err_str)
return dirpath
def read_otutable(fh):
"""Read OTU table file"""
df = pd.read_csv(fh, sep="\t", header=0, index_col=0)
return df
def read_taxatable(fh):
"""Read taxa table from QIIME. Currently only work on Greengene's notation"""
alignment = ["kingdom", "phylum", "class", "order", "family", "genus", "species"]
predf = | pd.read_csv(fh, sep="\t", index_col=0, header=0) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from recipe_app import read_all_files, path # import the web-extracted data reader
from typing import List, Tuple
d_full = read_all_files(path) #define the path as the location of the csv web-extrcted path. read files will be
# the function which will read all the files there and concatanate them
# Note: write read_files
d_full = d_full[d_full.Image.notna()]
# Cast as a list of values for calculating weights
text_data= d_full.ingredients_list.values.tolist() # The target columns should have text data
# Calculate TF-IDF matrix
def tf_idf(search_keys:str, data:List) -> Tuple:
"""calculate the tf-idf matrices for the vocabulary
and keyword matrix"""
tfidf_vectorizer = TfidfVectorizer()
tfidf_weights_matrix = tfidf_vectorizer.fit_transform(data)
search_query_weights = tfidf_vectorizer.transform([search_keys])
return search_query_weights, tfidf_weights_matrix
# Calculate the cosine similarity between search query and TF-IDF vectors
def cos_similarity(search_query_weights, tfidf_weights_matrix) -> np.ndarray:
"""find the cosine similarity between the vocabulary matrix and the keyword matrix"""
cosine_distance = cosine_similarity(search_query_weights, tfidf_weights_matrix)
similarity_list = cosine_distance[0]
return similarity_list
# Calculate number of relevant vectors
def calculate_num_vectors(cosine_similar):
"""check the number of non-zero vectos,
which has some similarity with keyword"""
num = 0
for i in cosine_similar:
if i != 0.0:
num += 1
return num
# Calculate the most relevant vectors
def most_similar(similarity_list: np.ndarray, N: int):
"""returns the most similar vectors in
descedning order of their similarity"""
most_similar = []
while N > 0:
tmp_index = np.argmax(similarity_list)
most_similar.append(tmp_index)
similarity_list[tmp_index] = 0
N -= 1
return most_similar
# Create weights at specific index for quick retrieval
def create_matrix_dict(cosine_similarity):
matrix_dict = {}
iter = 0
for i in cosine_similarity:
matrix_dict[iter] = i
iter += 1
return matrix_dict
# -----------
# Return the recipes with relevant search term
def return_relevant_recipes(search_term):
# Create local variables
search, matrix = tf_idf(search_term, text_data)
cosine_sim_list = cos_similarity(search, matrix)
num_relevant_vectors = calculate_num_vectors(cosine_sim_list)
#dictionary = create_matrix_dict(cosine_sim_list)
list_of_most_similar = most_similar(cosine_sim_list, num_relevant_vectors)
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = | DataFrame({'x': [1.], 'y': [2.], 'z': [3.]}) | pandas.core.api.DataFrame |
# streamlit4.py
import streamlit as st
import pandas as pd
import numpy as np
import time
# import joblib
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import nltk #Natural language processing tool-kit
from nltk.corpus import stopwords #Stopwords corpus
from nltk.stem import PorterStemmer, WordNetLemmatizer
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
# Ignore the warnings
import warnings
warnings.filterwarnings("ignore")
@st.cache(suppress_st_warning=True)
def data_clean1(data):
# Ensure that 1st column is text column
txt = data.iloc[:,0].name
data = data.drop_duplicates(subset=[txt], ignore_index=True)
data = data.dropna()
data.iloc[:,1] = data.iloc[:,1].replace({'I': 1, 'II': 2,'III': 3,'IV': 4,'V': 5,'VI':6})
return data
# @st.cache(suppress_st_warning=True)
def nlp_preprocess(sentence, stopwords,lemmer):
import nltk
import string
import re
sentence = sentence.lower() # Converting to lowercase
sentence = re.sub(r'\d+', '', sentence) # Removing numbers
sentence = re.sub(r'[^\w\s]', '', sentence)
#sentence = sentence.translate(string.maketrans('',''), string.punctuation) # Remove punctuations
sentence = ' '.join(sentence.split()) # Remove whitespaces
tokens = nltk.word_tokenize(sentence) # Create tokens
output = [i for i in tokens if not i in stopwords] # Remove stop words
words = [lemmer.lemmatize(word) for word in output] # Lemmatize words
sentence = ' '.join(words)
return sentence
# @st.cache(suppress_st_warning=True)
def preprocess_func(train_text1,test_text1):
stop_words = set(stopwords.words('english'))
lemmer = nltk.stem.WordNetLemmatizer()
train_text2 = train_text1.apply(lambda x: nlp_preprocess(x,stop_words,lemmer))
test_text2 = test_text1.apply(lambda x: nlp_preprocess(x,stop_words,lemmer))
return train_text2, test_text2
def result_format(train_y,train_pred, test_y,test_pred):
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
a = accuracy_score(train_y, train_pred) # Train accuracy
b = accuracy_score(test_y, test_pred) # Test accuracy
c = f1_score(test_y, test_pred, average='weighted',zero_division=0) # weighted F1
d = f1_score(test_y, test_pred, average=None, zero_division=0) # list of F1s
d = [ round(elem, 2) for elem in d ]
e = precision_score(test_y, test_pred, average=None, zero_division=0) # Precision scores
e = [ round(elem, 2) for elem in e ]
f = recall_score(test_y, test_pred, average=None, zero_division=0) # Recall scores
f = [ round(elem, 2) for elem in f ]
return [a,b,c,d,e,f]
def get_results(x_train,x_test, train_y, test_y,mod,param):
#def get_results(train_y,train_pred, test_y,test_pred):
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import RandomizedSearchCV
try:
#s_split = StratifiedShuffleSplit(n_splits=3,test_size=0.2,random_state=108)
model = RandomizedSearchCV(mod,param_distributions=param,cv=5, scoring='f1_weighted', random_state=108)
except:
model = mod
model.fit(x_train, train_y)
train_pred = model.predict(x_train)
test_pred = model.predict(x_test)
scores = result_format(train_y,train_pred,test_y,test_pred)
return scores, model
# @st.cache(suppress_st_warning=True)
def r_f(x_train,x_test, y_train, y_test):
from sklearn.ensemble import RandomForestClassifier
runtime = time.time()
model = RandomForestClassifier(max_depth=4,max_features=4)
param = {'class_weight':['balanced'],
'random_state':[108],
'n_estimators':[100,200,300],
'max_depth':[5,7,9,11]
}
#rf.fit(x_train,y_train)
#scores = get_results(y_train,rf.predict(x_train),y_test,rf.predict(x_test))
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def LogR(x_train,x_test, y_train, y_test):
from sklearn.linear_model import LogisticRegression
param = {'penalty':['l1','l2'],
'C':[0.01,0.1,1,10,100],
'solver':['saga']}
model = LogisticRegression(max_iter=1000)
runtime = time.time()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def GNB(x_train,x_test, y_train, y_test):
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
param = {'var_smoothing':[10**-8,10**-9,10**-10]}
runtime = time.time()
x_train = x_train.toarray()
x_test = x_test.toarray()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def KNC(x_train,x_test, y_train, y_test):
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
param = {'n_neighbors':[5,9,13,17,21,25,29,33,35],
'weights':['distance']}
runtime = time.time()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def SVC(x_train,x_test, y_train, y_test):
from sklearn.svm import SVC
model = SVC()
param = {'C':range(1,100,10),
'gamma':[0.1, 0.5, 0.9, 1,2,3],
# 'class_weight':['balanced'],
"kernel" : ['rbf']
}
runtime = time.time()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def DTC(x_train,x_test, y_train, y_test):
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
param = {'criterion':['gini','entropy'],
'max_depth':[5,7,9,11],
'class_weight':['balanced']}
runtime = time.time()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def GBC(x_train,x_test, y_train, y_test):
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier()
param = {'learning_rate':[0.01,0.05,0.1,0.2],
'random_state':[108],
'n_estimators':[100,200]
}
runtime = time.time()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
def XGB(x_train,x_test, y_train, y_test):
from xgboost import XGBClassifier
model = XGBClassifier(random_state=108)
param = { 'objective':['reg:squarederror', 'binary:logistic'],
#logistic regression for binary classification, output probability
'learning_rate': np.linspace(uniform.ppf(0.01), uniform.ppf(0.09), 20), # or `eta` value
'max_depth': [6,10, 20, 30], # avoid 0
'min_child_weight': [2,3],
'subsample': [0.8,0.9,1],
# 'colsample_bytree': [0.8,0.9,1]
}
runtime = time.time()
scores, model = get_results(x_train,x_test, y_train, y_test,model,param)
g = time.time() - runtime
scores.append(g)
return scores, model
# @st.cache(suppress_st_warning=True)
def n_n(x_train,x_test, y_train, y_test):
from tensorflow.keras.layers import Activation, Dense, Dropout, InputLayer, BatchNormalization
from tensorflow.keras import optimizers, initializers, losses
from tensorflow.keras.layers import Embedding, Dense, GlobalMaxPool1D, Bidirectional, LSTM, Dropout, SpatialDropout1D
from tensorflow.keras.models import Sequential
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
from tensorflow.keras.initializers import Constant
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping
runtime = time.time()
n_layers = 5
neurons = 20
act='relu'
bat=True
dout=True
drop=0.2
lr=0.001
in_shape = x_train.toarray().shape[1]
output = np.unique(y_train).shape[0]
#st.write(x_train[0])
clf = Sequential()
clf.add(InputLayer(input_shape=(in_shape,)))
for i in range(n_layers):
clf.add(Dense(neurons))
if bat:
clf.add(BatchNormalization())
clf.add(Activation(act))
if dout:
clf.add(Dropout(drop))
# neurons=neurons-50
clf.add(Dense(output, activation='softmax'))
adam = optimizers.Adam(learning_rate=lr)
clf.compile(optimizer=adam, loss = 'categorical_crossentropy', metrics= ['accuracy'])
history = clf.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50, batch_size=5)
train_pred = clf.predict(x_train)
test_pred = clf.predict(x_test)
scores = result_format(y_train,train_pred,y_test,test_pred)
g = time.time() - runtime
scores.append(g)
return scores, clf
# @st.cache(suppress_st_warning=True)
def lstm(x_train,x_test, y_train, y_test):
from tensorflow.keras.layers import Activation, Dense, Dropout, InputLayer, BatchNormalization
from tensorflow.keras import optimizers, initializers, losses
from tensorflow.keras.layers import Embedding, Dense, GlobalMaxPool1D, Bidirectional, LSTM, Dropout, SpatialDropout1D
from tensorflow.keras.models import Sequential
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
from tensorflow.keras.initializers import Constant
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping
runtime = time.time()
embed_dim = 128
lstm_out = 150
max_vocab = 10000
inp_len = x_train.toarray().shape[1]
out = np.unique(y_train).shape[0]
model = Sequential()
model.add(Embedding(max_vocab,embed_dim,input_length = inp_len))
model.add(SpatialDropout1D(0.6))
model.add(LSTM(lstm_out, dropout=0.5, recurrent_dropout=0.6))
# model.add(Dropout(0.5))
# model.add(LSTM(lstm_out-30, dropout=0.5, recurrent_dropout=0.5))
model.add(Dense(out,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50, batch_size=32)
train_pred = model.predict(x_train)
test_pred = model.predict(x_test)
scores = result_format(y_train,train_pred,y_test,test_pred)
g = time.time() - runtime
scores.append(g)
return scores, model
def bilstm(x_train,x_test, y_train, y_test):
from tensorflow.keras.layers import Activation, Dense, Dropout, InputLayer, BatchNormalization
from tensorflow.keras import optimizers, initializers, losses
from tensorflow.keras.layers import Embedding, Dense, GlobalMaxPool1D, Bidirectional, LSTM, Dropout, SpatialDropout1D
from tensorflow.keras.models import Sequential
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
from tensorflow.keras.initializers import Constant
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping
runtime = time.time()
inp_len = x_train.toarray().shape[1]
out = np.unique(y_train).shape[0]
max_vocab = 10000
embed_dim = 128
biLSTM = Sequential()
biLSTM.add(Embedding(max_vocab, embed_dim,
input_length= inp_len))
biLSTM.add(Bidirectional(LSTM(units=32, recurrent_dropout = 0.5, dropout = 0.5,
return_sequences = True)))
biLSTM.add(Bidirectional(LSTM(units=16, recurrent_dropout = 0.5, dropout = 0.5)))
biLSTM.add(Dense(out, activation='softmax'))
biLSTM.compile(optimizer=optimizers.Adam(lr = 0.01), loss='categorical_crossentropy', metrics=['accuracy'])
history = biLSTM.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50, batch_size=32)
train_pred = biLSTM.predict(x_train)
test_pred = biLSTM.predict(x_test)
scores = result_format(y_train,train_pred,y_test,test_pred)
g = time.time() - runtime
scores.append(g)
return scores, biLSTM
# @st.cache(suppress_st_warning=True)
def tt_split(data,split):
from sklearn.model_selection import train_test_split
train_text,test_text,train_label,test_label = train_test_split(data.iloc[:,0],data.iloc[:,1],test_size = split/100,
stratify=data.iloc[:,1], random_state=42)
#st.write("\nTrain-test split done:")
return train_text,test_text,train_label,test_label
# @st.cache(suppress_st_warning=True)
def encode_labels(train_label,test_label):
# 5. One hot encoding of labels
from sklearn import preprocessing
lb = preprocessing.LabelEncoder()
#lb = preprocessing.LabelBinarizer()
y_train = lb.fit_transform(train_label)
y_test = lb.transform(test_label)
return y_train,y_test, lb
@st.cache(suppress_st_warning=True,allow_output_mutation=True)
def vectorize(Vect, train_text_,test_text_):
if Vect == 'tf-idf':
Vector = TfidfVectorizer()
elif Vect == 'CountVec':
Vector = CountVectorizer()
else:
Vector = glove()
x_train = Vector.fit_transform(train_text_)
x_test = Vector.transform(test_text_)
return Vector, x_train, x_test
def preprecess_chatbot(sentence,vector):
stop_words = set(stopwords.words('english'))
lemmer = nltk.stem.WordNetLemmatizer()
sentence1 = nlp_preprocess(sentence,stop_words,lemmer)
output = vector.transform([sentence1])
return output
def prepare_trans_df(train_df,text,labels):
file_add = 'https://raw.githubusercontent.com/mhtkmr1/Industrial_safety_chatbot/main/Google_translated_data.csv'
google_translated = pd.read_csv(file_add)
google_translated.drop_duplicates(subset=[text],inplace=True, ignore_index=True) # remove duplicate rows
google_translated.set_index(text,inplace=True)
train_df = train_df.set_index(text)
clms = ['es', 'hi', 'it', 'Id', 'ja', 'he','ga', 'de', 'fr']
dtf = pd.concat([train_df, google_translated.loc[train_df.index,clms]],axis=1)
dtf.reset_index(inplace=True)
dtf1 = dtf[[text,labels]].copy(deep=True)
for lang in clms:
new_df = pd.DataFrame()
new_df[text] = dtf[lang]
new_df[labels] = dtf[labels]
dtf1 = pd.concat([dtf1, new_df],ignore_index=True)
#dtf1.drop_duplicates(subset=[text],inplace=True, ignore_index=True) # remove duplicate rows
# Function output contains some duplicates which can be removed later
return dtf1
def syn_augmentor(text_df,text,labels):
import nlpaug
import nlpaug.augmenter.char as nac
import nlpaug.augmenter.word as naw
import nlpaug.augmenter.sentence as nas
values = text_df[labels].value_counts().values
levels = text_df[labels].value_counts().index
augmented_sen = []
level = []
for i in range(1,len(levels)):
data = text_df[text_df[labels] == levels[i]]
for dt in data[text]:
sent = dt
for k in range(values[0]//values[i]):
if len(sent) < 10:
wrd_aug = naw.SynonymAug(aug_min=3)
sent = wrd_aug.augment(sent)
elif len(sent) > 10 and len(sent) < 25:
wrd_aug = naw.SynonymAug(aug_min=5)
sent = wrd_aug.augment(sent)
else:
wrd_aug = naw.SynonymAug(aug_min=8)
sent = wrd_aug.augment(sent)
augmented_sen.append(sent)
level.append(levels[i])
desc = pd.concat([text_df[text],pd.Series(augmented_sen)])
acc_lvl = pd.concat([text_df[labels], pd.Series(level)])
aug_df = pd.concat([desc,acc_lvl],axis=1)
aug_df.reset_index(drop=True, inplace=True)
aug_df.columns = [text,labels]
return aug_df
# @st.cache(suppress_st_warning=True,allow_output_mutation=True)
#def load_model():
#final_model = joblib.load('final_model.pkl')
#return final_model
#st.write("_" * 30)
# >>>>>>>>>>>>>>>>
def main():
st.set_page_config(layout='wide')
#st.write(st.session_state)
hide_menu_style = """
<style>
#MainMenu {visibility:hidden;}
footer {visibility:hidden;}
</style>
"""
st.markdown(hide_menu_style, unsafe_allow_html = True)
st.title('INDUSTRIAL SAFETY CHATBOT')
#separate into 3 columns
st.session_state.col_size = [1,2,3]
col11, col12, col13 = st.columns(st.session_state.col_size)
with col11:
st.header("STEPS")
st.write("**Step-1: Upload the data file:**")
# Provide button in sidebar to clear all cache and restart the app
#if st.sidebar.button('Press here to restart the app'):
# for key in st.session_state.keys():
# del st.session_state[key]
with col12:
st.header("INPUT")
uploaded_file = st.file_uploader(
label="Select and upload the csv file containing the data from your system:",
type="csv",
accept_multiple_files=False,
key = 'file',
help='''Upload a csv file that contains your chabot corpus.
required structure:
one column with text data or accident description
and another column with Accident levels or categories
first row = column headers
''')
uploaded_file1 = st.button('Press to use an example data from repo',key='uploaded_file1')
with col13:
st.header("OUTPUT AND STATUS")
if uploaded_file1 and not uploaded_file:
#st.write('value of uploaded_file1 parameter is: ',uploaded_file1)
file_url='https://raw.githubusercontent.com/mhtkmr1/Industrial_safety_chatbot/main/Data%20Set%20-%20industrial_safety_and_health_database_with_accidents_description.csv'
st.session_state.df = pd.read_csv(file_url)
with col13:
st.write('**NOTE: Currently the example file is being used**')
if uploaded_file:
#if 'df' in st.session_state: del st.session_state[df] # Remove df if already exists, generally in case of new file uploaded
st.session_state.df = pd.read_csv(uploaded_file)
with col13:
st.write('FILENAME: ', uploaded_file.name)
st.write('FILETYPE: ', uploaded_file.type)
if 'df' in st.session_state:
with col13:
st.success('File uploaded successfully')
st.write('**Shape of original data: **',st.session_state.df.shape)
#st.dataframe(st.session_state.df.head())
col11_, col12_ = st.columns([1,5])
with col12_:
st.write('**Preview of the uploaded dataframe**')
st.dataframe(st.session_state.df.head())
st.write("_" * 30)
if 'df' in st.session_state:
col21, col22, col23, col24 = st.columns([1,1,1,3])
columns = st.session_state.df.columns
with col21:
st.write("**Step-2: Select the columns with text and labels:**")
with col22:
text = st.radio('Select column with text data',(columns), key = 'text')
with col23:
labels = st.radio('Select column with labels',(columns), key = 'labels')
with col24:
st.write('Selected columns are:')
st.write('**Text column: **',st.session_state['text'])
st.write('**Label column: **',st.session_state['labels'])
st.write("_" * 30)
if 'text' in st.session_state and 'labels' in st.session_state:
col31, col32, col33 = st.columns(st.session_state.col_size)
with col31:
st.write("**Step-3: Perform Data cleaning:**")
with col32:
cleaning = st.button(label = ' Press here for Data cleaning and pre-processing', key = 'cleaning')
if cleaning:
for keys in ['y_train']: # remove all keys of importance to next step
if keys in st.session_state:
del st.session_state[keys]
st.session_state.df1 = data_clean1(st.session_state.df[[st.session_state.text,st.session_state.labels]]).copy(deep=True) # first column should be the text column
if 'df1' in st.session_state:
with col33:
st.success('Data cleaning is complete')
st.write('**Shape of dataframe after cleaning: **', st.session_state.df1.shape)
st.write('Preview of Dataframe after cleaning and removing duplicates from text column: ')
st.dataframe(st.session_state.df1.head())
st.write("_" * 30)
if 'df1' in st.session_state:
col41, col42, col43,col44 = st.columns([1,2,1.5,1.5])
with col41:
st.write("**Step-4: Train - Test split and encoding of labels**")
with col42:
test_split = st.slider('Select percentage of test data for test-train split?', min_value=5, max_value=50,value=20, step=5, key = 'test_split')
#test_split = st.number_input('Write the percentage of test data for test-train split? (example: write "20" for 20%)')
#if test_split is not None and test_split > 0:
with col43:
st.write("**Percentage of test split you entered: **", st.session_state.test_split,"%")
st.write(' ')
st.write(' ')
st.write('**Label distribution before train-test split:**')
distribution = st.session_state.df1.iloc[:,1].value_counts()
st.dataframe(distribution)
check_error = distribution.index[distribution <= 1].tolist()
with col42:
perform_split = st.button(label = 'Step - 3: Perform train-test split',key = 'perform_split')
if perform_split:
for keys in ['aug_train_text','y_train']: # remove all keys of importance to next step
if keys in st.session_state:
del st.session_state[keys]
if check_error:
st.error('Some of the labels have only one value, thus train-test split cannot be performed. Please change the input paramaters accordingly')
else:
st.session_state.train_text,st.session_state.test_text,st.session_state.train_label,st.session_state.test_label =tt_split(st.session_state.df1,test_split)
# One hot encoding of labels
st.session_state.y_train, st.session_state.y_test, st.session_state.lb =encode_labels(st.session_state.train_label,st.session_state.test_label)
if 'y_train' in st.session_state:
with col44:
st.success('Train - Test split is complete')
st.write('**Shape of train data after stratified split: **',st.session_state.train_text.shape)
st.write('**Shape of test data after split: **',st.session_state.test_text.shape)
st.write('**Label distribution of test data after split:**')
st.dataframe(st.session_state.test_label.value_counts())
st.write("_" * 30)
if 'y_train' in st.session_state:
col51, col52, col53 = st.columns([1,2,3])
with col51:
st.write("**Step-5: Oversampling/augmentation of train data **")
with col52:
perform_oversampling = st.button(label = 'Step - 4: Perform Oversampling (only for train data)',key = 'perform_oversampling')
if perform_oversampling:
for keys in ['train_text1']: # remove all keys of importance to next step
if keys in st.session_state:
del st.session_state[keys]
# perform the Augmentation and avoid recalculation during program reruns
if 'aug_train_text' not in st.session_state:
with st.spinner('Augmentation in progress, please wait...'):
a = prepare_trans_df( | pd.concat([st.session_state.train_text, st.session_state.train_label], axis=1) | pandas.concat |
#!/usr/bin/env python3
"""
Pipeline for PANGAEA data, with custom NETCDF reading.
This script allows for data updates.
@author: giuseppeperonato
"""
import json
import logging
import os
import shutil
import sys
import frictionless
import numpy as np
import pandas as pd
import requests
import utilities
import xarray
from pyproj import CRS
# Constants
logging.basicConfig(level=logging.INFO)
Z = None
DT = 720
SEL = "MON"
EPSG = 4326 # source
# Settings for the query metadata
# these are the fields that are used to construct a query
QUERY_FIELDS = ["scale"] # empty list means all; None means do not use query fields.
# these are parameters that added to those automatically generated by the pipeline
QUERY_PARAMETERS = {
"temporal_granularity": "month",
"is_tiled": False,
"is_raster": True,
}
DB_URL = utilities.DB_URL
def prepareNETCDF(
df: pd.DataFrame,
crs: CRS = CRS.from_epsg(3035),
delete_orig: bool = False,
):
"""
Convert NetCDF into EnerMaps rasters (single band, GeoTiff, EPSG:3035).
Adapted to multi-dimensional NetCDF files as the ones from PANGAEA.
Parameters
----------
df : DataFrame.
Results of API extraction.
crs : pyproj.crs.CRS.
coordinate reference system.
delete_orig : bool, optional.
Set to True to delete original downloaded file (e.g. NetCDF).
Returns
-------
df : DataFrame
Results with schema for EnerMaps data table
"""
dicts = []
for i, row in df.iterrows():
filename_orig = row["value"]
logging.info(filename_orig)
xds = xarray.open_dataset(filename_orig)
if "crs" not in df.columns:
raise ValueError("Missing crs")
if "variable" not in df.columns:
raise ValueError("Missing variable")
xds.rio.write_crs(row["crs"].to_string(), inplace=True)
variable = row.variable
dims = list(xds[variable].dims)
if "lat" in dims:
dims.remove("lat")
if "lon" in dims:
dims.remove("lon")
def np_encoder(object):
"""Source: https://stackoverflow.com/a/65151218."""
if isinstance(object, np.generic):
return object.item()
def prepareFile(tmp_filename, dest_filename, my_dict, filename_orig):
"""Export raster."""
# Change day to 1st of the month, to be consistent across datasets
my_dict["start_at"] = my_dict["start_at"].replace(day=1)
if not os.path.exists(tmp_filename):
reprojected.rio.to_raster(tmp_filename)
dicts.append(my_dict)
# Compress
os.system( # nosec
"gdal_translate {filename} {dest_filename} -of GTIFF --config"
" GDAL_PAM_ENABLED NO -co COMPRESS=DEFLATE -co BIGTIFF=YES".format(
filename=tmp_filename, dest_filename=dest_filename
)
)
os.remove(tmp_filename)
return dicts
if len(dims) == 2:
for d0 in range(xds[variable][dims[0]].shape[0]):
for d1 in range(xds[variable][dims[1]].shape[0]):
my_dict = {}
dest_filename = "{}_{}_{}.tif".format(
filename_orig.split(".")[0], d0, d1
)
tmp_filename = "{}_{}_{}_tmp.tif".format(
filename_orig.split(".")[0], d0, d1
)
my_dict["fid"] = os.path.basename(dest_filename)
my_dict["variable"] = xds[variable][d0][d1].attrs["long_name"]
my_dict["unit"] = xds[variable][d0].attrs.get("units")
# Add extra fields
my_dict["fields"] = {
**xds.attrs, # at the dataset level
**xds[variable][d0][d1].attrs,
} # at the dimension level
for dim in dims:
if dim != "time": # add information about extra dimensions
my_dict["fields"][dim] = str(
xds[variable][d0][d1][dim].values
)
my_dict["fields"] = json.dumps(
my_dict["fields"], default=np_encoder
)
my_dict["israster"] = True
my_dict["start_at"] = pd.to_datetime(
xds[variable][d0][d1].time.values
)
date_future = my_dict["start_at"] + pd.DateOffset(months=1)
my_dict["dt"] = (
date_future - my_dict["start_at"]
).total_seconds() / 3600
# reproj
reprojected = xds[variable][d0][d1].rio.reproject(crs.to_string())
dicts = prepareFile(
tmp_filename, dest_filename, my_dict, filename_orig
)
elif len(dims) == 1:
for d0 in range(xds[variable][dims[0]].shape[0]):
my_dict = {}
dest_filename = "{}_{}.tif".format(filename_orig.split(".")[0], d0)
tmp_filename = "{}_{}_tmp.tif".format(filename_orig.split(".")[0], d0)
my_dict["fid"] = os.path.basename(dest_filename)
my_dict["variable"] = xds[variable][d0].attrs["long_name"]
my_dict["unit"] = xds[variable][d0].attrs.get("units")
# Add extra fields
my_dict["fields"] = {
**xds.attrs, # at the dataset level
**xds[variable][d0].attrs,
} # at the dimension level
my_dict["fields"] = json.dumps(my_dict["fields"], default=np_encoder)
my_dict["israster"] = True
my_dict["start_at"] = pd.to_datetime(xds[variable][d0].time.values)
date_future = my_dict["start_at"] + pd.DateOffset(months=1)
my_dict["dt"] = (
date_future - my_dict["start_at"]
).total_seconds() / 3600
# Reproject
if xds[variable][d0].dtype == "<m8[ns]":
# otherwise an error is thrown
reprojected = (
xds[variable][d0]
.astype(np.float32)
.rio.reproject(crs.to_string())
)
else:
reprojected = xds[variable][d0].rio.reproject(crs.to_string())
dicts = prepareFile(tmp_filename, dest_filename, my_dict, filename_orig)
else:
raise ValueError("Too many dimensions")
if delete_orig:
os.remove(filename_orig)
data = pd.DataFrame(
dicts,
columns=[
"start_at",
"fields",
"variable",
"value",
"ds_id",
"fid",
"dt",
"z",
"unit",
"israster",
],
)
return data
def prepare(dp: frictionless.package.Package):
"""
Prepare data in EnerMaps format.
Parameters
----------
dp : frictionless.package.Package
Valid datapackage
Returns
-------
DataFrame
Data in EnerMaps format.
"""
if not os.path.exists("tmp"):
os.mkdir("tmp")
for resource_idx, resource in enumerate(dp["resources"]):
file_list = resource["path"]
r = requests.get(file_list, stream=True)
lines = [line for line in r.iter_lines()]
skiprows = [ind for ind, i in enumerate(lines) if i.startswith(b"*/")][0]
files = pd.read_csv(file_list, skiprows=skiprows + 1, delimiter="\t")
files = files.loc[files["File name"].str.contains(SEL), :]
# Prepare df containing paths to rasters
rasters = []
for r, row in files.iterrows():
if not os.path.exists(os.path.join("tmp", row["File name"])):
logging.info("Downloading {}".format(row["File name"]))
utilities.download_url(
row["URL file"], os.path.join("tmp", row["File name"])
)
raster = {
"value": os.path.join("tmp", row["File name"]),
"start_at": pd.to_datetime(row["File name"].split("_")[6]),
"z": None,
"unit": None,
"dt": DT,
"crs": CRS.from_epsg(EPSG),
"variable": row["File name"].split("_")[0],
}
rasters.append(raster)
rasters = | pd.DataFrame(rasters) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import FunctionTransformer, StandardScaler, RobustScaler
from sklearn.preprocessing import Imputer, MultiLabelBinarizer
from sklearn.impute import SimpleImputer
from data_science_toolbox.pandas.profiling.data_types import df_binary_columns_list
from functools import reduce
import warnings
###############################################################################################################
# Custom Transformers from PyData Seattle 2017 Talk
###############################################################################################################
# Reference
# http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html
# https://github.com/jem1031/pandas-pipelines-custom-transformers
class DFFunctionTransformer(TransformerMixin):
# FunctionTransformer but for pandas DataFrames
def __init__(self, *args, **kwargs):
self.ft = FunctionTransformer(*args, **kwargs)
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
Xt = self.ft.transform(X)
Xt = pd.DataFrame(Xt, index=X.index, columns=X.columns)
return Xt
class DFFeatureUnion(BaseEstimator, TransformerMixin):
# FeatureUnion but for pandas DataFrames
def __init__(self, transformer_list):
self.transformer_list = transformer_list
def fit(self, X, y=None):
for (name, t) in self.transformer_list:
t.fit(X, y)
return self
def transform(self, X):
# assumes X is a DataFrame
Xts = [t.transform(X) for _, t in self.transformer_list]
Xunion = reduce(lambda X1, X2: | pd.merge(X1, X2, left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python
# coding=utf-8
"""
@version: 0.1
@author: li
@file: factor_solvency.py
@time: 2019-01-28 11:33
"""
import gc, six
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
@six.add_metaclass(Singleton)
class FactorSolvency(object):
"""
偿债能力
"""
def __init__(self):
__str__ = 'factor_solvency'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '偿债能力'
self.description = '财务指标的二级指标-偿债能力'
@staticmethod
def BondsToAsset(tp_solvency, factor_solvency, dependencies=['bonds_payable', 'total_assets']):
"""
:name: 应付债券与总资产之比
:desc: 应付债券MRQ/资产总计MRQ*100%
"""
management = tp_solvency.loc[:, dependencies]
management['BondsToAsset'] = np.where(
CalcTools.is_zero(management.total_assets.values), 0,
management.bonds_payable.values / management.total_assets.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def BookLev(tp_solvency, factor_solvency, dependencies=['total_non_current_liability', 'total_assets']):
"""
:name: 账面杠杆
:desc:非流动负债合计/股东权益合计(含少数股东权益)(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['BookLev'] = np.where(
CalcTools.is_zero(management.total_assets.values), 0,
management.total_non_current_liability.values / management.total_assets.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def CurrentRatio(tp_solvency, factor_solvency, dependencies=['total_current_assets', 'total_current_liability']):
"""
:name: 流动比率
:desc: 流动资产合计/流动负债合计(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['CurrentRatio'] = np.where(
CalcTools.is_zero(management.total_current_liability.values), 0,
management.total_current_assets.values / management.total_current_liability.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def DA(tp_solvency, factor_solvency, dependencies=['total_liability', 'total_assets']):
"""
:name: 债务总资产比
:desc:负债合计MRQ/资产总计MRQ
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['DA'] = np.where(
CalcTools.is_zero(contrarian['total_assets']), 0,
contrarian['total_liability'] / contrarian['total_assets'])
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, on="security_code")
return factor_solvency
@staticmethod
def DTE(tp_solvency, factor_solvency,
dependencies=['total_liability', 'total_current_liability', 'fixed_assets']):
"""
:name:有形净值债务率
:desc:负债合计/有形净值(MRQ)
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['DTE'] = np.where(
CalcTools.is_zero(contrarian['total_current_liability'] + contrarian['fixed_assets']), 0,
contrarian['total_current_liability'] / (contrarian['total_current_liability'] + contrarian['fixed_assets'])
)
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, on="security_code")
return factor_solvency
@staticmethod
def EquityRatio(tp_solvency, factor_solvency,
dependencies=['total_liability', 'equities_parent_company_owners']):
"""
:name:权益比率
:desc:负债合计/归属母公司股东的权益(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
management['EquityRatio'] = management.apply(func, axis=1)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(management, factor_solvency, how='outer', on='security_code')
return factor_solvency
@staticmethod
def EquityPCToIBDebt(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable']):
"""
:name:归属母公司股东的权益/带息负债
:desc:归属母公司股东的权益/带息负债(补充 带息负债 = 短期借款+一年内到期的长期负债+长期借款+应付债券+应付利息)
"""
management = tp_solvency.loc[:, dependencies]
management["debt"] = (management.shortterm_loan +
management.non_current_liability_in_one_year +
management.longterm_loan +
management.bonds_payable +
management.interest_payable)
management['EquityPCToIBDebt'] = np.where(
CalcTools.is_zero(management.debt.values), 0,
management.equities_parent_company_owners.values / management.debt.values)
dependencies = dependencies + ['debt']
management = management.drop(dependencies, axis=1)
factor_solvency = | pd.merge(factor_solvency, management, how='outer', on="security_code") | pandas.merge |
# %%
import pandas as pd
from collections import defaultdict
import pickle
from typing import DefaultDict
cmap_data = pickle.load(open("./cmap_transformer.pkl", "rb"))
mm_data = pickle.load(open("./mm_report_transformer.pkl", "rb"))
# %%
def convert_to_metric_first(data):
rows = defaultdict(dict)
for model, metrics in data.items():
for metric, values in metrics.items():
for i, value in enumerate(values):
rows[metric][model + f"_{i}"] = value
return rows
def save_to_csv(data, save_path):
df = | pd.DataFrame(data) | pandas.DataFrame |
# # # # # # # # # # # # # # # # # # # # # # # #
# #
# Module to run real time contingencies #
# By: <NAME> and <NAME> #
# 09-08-2018 #
# Version Aplha-0. 1 #
# #
# Module inputs: #
# -> File name #
# # # # # # # # # # # # # # # # # # # # # # # #
import pandapower as pp
import pandas as pd
import json
import copy
import calendar
from time import time
import datetime
from inspyred import ec
import inspyred
import math
from random import Random
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Disconet_Asset(net,Asset_type,Asset_to_disc, Service=False):
net_lf = copy.deepcopy(net)
if Asset_type=='GEN': # Disconnect Generators
index = net_lf.sgen.loc[net_lf.sgen['name'] == Asset_to_disc].index[0]
net_lf.sgen.in_service[index] = Service
elif Asset_type=='TR': # Disconnect Transformers
index = net_lf.trafo.loc[net_lf.trafo['name'] == Asset_to_disc].index[0]
net_lf.trafo.in_service[index] = Service
elif Asset_type=='LN': # Disconnect Lines
index = net_lf.line.loc[net_lf.line['name'] == Asset_to_disc].index[0]
net_lf.line.in_service[index] = Service
elif Asset_type=='SW':
index = net_lf.switch.loc[net.switch['name'] == Asset_to_disc].index[0]
net_lf.switch.closed[index] = not Service
elif Asset_type=='LO':
index = net_lf.load.loc[net.load['name'] == Asset_to_disc].index[0]
net_lf.load.in_service[index] = Service
elif Asset_type=='BUS':
index = net_lf.bus.loc[net.bus['name'] == Asset_to_disc].index[0]
net_lf.bus.in_service[index] = Service
elif Asset_type=='ST':
index = net_lf.storage.loc[net.storage['name'] == Asset_to_disc].index[0]
net_lf.storage.in_service[index] = Service
else:
print('Asset to disconnet does not exist')
return net_lf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Network_Reconfiguration(net,strategy):
net_lf = copy.deepcopy(net)
for step in strategy:
l_sequence = strategy[step]
asset_type = l_sequence['Element_Type']
asset_to_disc = l_sequence['Element_Name']
net_lf = Disconet_Asset(net_lf,asset_type,asset_to_disc)
return net_lf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Load_Contingency_Strategies(File):
with open(File) as json_file:
data = json.load(json_file)
return data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Load_AM_Plan(File):
data = Load_Contingency_Strategies(File)
#with open(File) as json_file:
# data = json.load(json_file)
df = pd.DataFrame.from_dict(data, orient='index')
df['Date'] = pd.to_datetime(df['Date'])#pd.to_datetime(df['Date'])
return df
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Funtion to return the daily load growth
def Load_Growth_By_Day(L_growth):
daily_growth = pow(1+L_growth, 1/365)-1 # Daily growth rate
def f_Load_Daily_Growth(ndays): # Daily growth rate fuction
return pow(1+daily_growth,ndays)
return f_Load_Daily_Growth
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Risk assessment
def Power_Risk_assessment(net,secure=1):
assessment = {}
load = net.res_load['p_mw'].fillna(0)*secure
load_base = net.load['p_mw']*net.load.scaling
assessment['Load'] = pd.DataFrame(
{'name':net.load.name,
'ENS':load_base - load,
'ES': load})
assessment['T_ES'] = load.sum()
assessment['T_ENS'] = load_base.sum()-load.sum()
gen_name = pd.concat([net.sgen.name, net.storage.name,net.ext_grid.name], ignore_index=True)
p_gen = pd.concat([net.res_sgen.p_mw, net.res_storage.p_mw,net.res_ext_grid.p_mw], ignore_index=True)
p_gen = p_gen.fillna(0)*secure
net.res_sgen['Type'] = 'D_Gen'
net.res_storage['Type'] = 'Storage'
net.res_ext_grid['Type'] = 'External'
p_source = pd.concat([net.res_sgen.Type, net.res_storage.Type,net.res_ext_grid.Type], ignore_index=True)
assessment['Gen'] = pd.DataFrame(
{'name':gen_name,
'source': p_source,
'gen':p_gen})
assessment['purchased_E'] = secure*net.res_ext_grid['p_mw'].values[0]
# Delta of energy suplied
p_gen_base = pd.concat([net.sgen.p_mw, net.storage.p_mw], ignore_index=True)
p_gen_actual = pd.concat([net.res_sgen.p_mw, net.res_storage.p_mw], ignore_index=True)
assessment['Delta_E'] = secure*(p_gen_base.sum()-p_gen_actual.sum())
return assessment
# Function for get the contingency analysis
def ContingencyAnalysis(Netw):
OverloadLines =[]
OverLoadTrafos =[]
OverVoltageBuses=[]
AllOverloads =[]
# Bring all Buses
for bus in Netw.res_bus.iterrows():
indexb = bus[0]
vm_pu = bus[1].vm_pu
# Select into the list of Energized Buses, the Busses which have certain values of voltage in p.u
#if (vm_pu >1.1 or vm_pu < 0.9) and vm_pu !=nan:
if (vm_pu >1.1 or vm_pu < 0.9):
# Generate the list of results in a dictionary
temp_data = {'Name': Netw.bus.name[indexb],
'Type': 'BU',
'Serial': '0',
'Mag': vm_pu}
OverVoltageBuses.append(temp_data)
for trafo in Netw.res_trafo.iterrows():
indext = trafo[0]
loadingt = trafo[1].loading_percent
# Select into the list of Energized Transformer which have loading parameter higher than 100
if loadingt>100:
# Generate the list of results in a dictionary
temp_data = {'Name': Netw.trafo.name[indext],
'Type': 'TR',
'Serial': '1',
'Mag': loadingt/100}
OverLoadTrafos.append(temp_data)
for line in Netw.res_line.iterrows():
indexl = line[0]
loadingl = line[1].loading_percent
# Select into the list of Energized Lines which have loading parameter higher than 100
if loadingl>100:
# Generate the list of results in a dictionary
temp_data = {'Name': Netw.line.name[indexl],
'Type': 'LN',
'Serial': '2',
'Mag': loadingl/100}
OverloadLines.append(temp_data)
# Set a variable which have the results of all elements
AllOverloads=OverVoltageBuses+OverloadLines+OverLoadTrafos
df =pd.DataFrame(AllOverloads)
# Define the order of the variables in the dictionary
if not df.empty:
df =df[['Serial','Name','Type','Mag']]
df.set_index(['Serial'], inplace=True)
return df
def Load_Net_Pandapower(data_file,pp_case=None):
# data_file -> File name which contains network data, if file name is none by default is load the CIGRE model
if data_file==None:
import pandapower.networks as pn
if pp_case==None:
net = pn.create_cigre_network_mv(with_der=False)
elif pp_case == 'case33bw':
net = pn.case33bw()
load_name = []
s_val = []
for index, row in net.load.iterrows():
load_name.append('load_'+str(row.bus))
s_val.append(math.sqrt(row.p_mw**2+row.q_mvar**2))
net.load.name = load_name
net.load.sn_mva = load_name
line_name = []
for index, row in net.line.iterrows():
line_name.append('line_'+str(row.from_bus)+'_'+str(row.to_bus))
net.line.name = line_name
else:
if pp_case=='json':
net = pp.from_json(data_file)
else:
# Import network data using excel
data = pd.read_excel(open(data_file, 'rb'), sheet_name='DATA')
# Create Network
net = pp.create_empty_network(name = data.loc[0,'Name'],f_hz =data.loc[0,'f'],sn_mva=data.loc[0,'sb_mva'])
# # # # # # # # # # # # # # # # # # Load elements # # # # # # # # # # #
# Buses
net.bus = pd.read_excel(open(data_file, 'rb'), sheet_name='BUS')
# Lines
net.line = pd.read_excel(open(data_file, 'rb'), sheet_name='LINE')
# Load
net.load = pd.read_excel(open(data_file, 'rb'), sheet_name='LOAD')
# External grid
df = pd.read_excel(open(data_file, 'rb'), sheet_name='EXT_GRID')
if not df.empty:
net.ext_grid = df
# Generators
df = pd.read_excel(open(data_file, 'rb'), sheet_name='GEN')
if not df.empty:
net.gen = df
# Static generators
df = pd.read_excel(open(data_file, 'rb'), sheet_name='SGEN')
if not df.empty:
net.sgen = df
# Transformers
df = pd.read_excel(open(data_file, 'rb'), sheet_name='TRAFO')
if not df.empty:
net.trafo = df
# 3 winding transformer
df = pd.read_excel(open(data_file, 'rb'), sheet_name='TRAFO3W')
if not df.empty:
net.trafo3w = df
# SWITCHES
df = pd.read_excel(open(data_file, 'rb'), sheet_name='SWITCH')
if not df.empty:
net.switch = df
# Shunt element
df = pd.read_excel(open(data_file, 'rb'), sheet_name='SHUNT')
if not df.empty:
net.shunt = df
return net # Return network
# Function to load forecatings
def Forecating_Data(net_lf,file,today):
from Load_Historic_Load import Load_Historical_Data
data = pd.read_excel(open(file, 'rb'), sheet_name='LOAD_TAGS') # Sheet with loads tags
data = data.set_index('Name')
load_names = net_lf.load['name']
df_col_name = ['Name','Hour','Val']
hour = list(range(24))
df = pd.DataFrame(columns=df_col_name)
df_by_load = pd.DataFrame()
for loads in load_names: # Forecast model for each load
tag = data.loc[loads]['TAG'] # Tag ID
base = data.loc[loads]['Base'] # Power base
test = Load_Historical_Data(tag,base) # Load historical data
day_data = test.days[today] # Day to analize
f_forecast = day_data.Filt.fitt.Load_Forecast_by_Day # Function fitted
# Load forecasting
l_t0 = day_data.i_rms[-1][0] # Initial load, at time 0
load_forec = f_forecast(l_t0,1) # Load forecasting result
# update dataframe
df_by_load['Val'] = list(load_forec) # Assign data frame values
df_by_load['Hour'] = hour
df_by_load['Name'] = loads
df = pd.concat([df,df_by_load],sort=True)
return df
# Function to load forecatings
def Fourier_Fit(file):
from Load_Historic_Load import Load_Historical_Data
data = pd.read_excel(open(file, 'rb'), sheet_name='LOAD_TAGS') # Sheet with loads tags
data = data.set_index('Name')
#load_names = net_lf.load['name']
df_col_name = ['Name','Hour','Val','Day']
hour = list(range(24))
df = pd.DataFrame(columns=df_col_name)
df_by_load = pd.DataFrame()
for loads in data.index: # Forecast model for each load
tag = data.loc[loads]['TAG'] # Tag ID
base = data.loc[loads]['Base'] # Power base
hist_data = Load_Historical_Data(tag,base) # Load historical data
for day in list(calendar.day_name): # Eval each week day
day_data = hist_data.days[day] # Day to analize
f_forecast = day_data.Filt.fitt.Load_Forecast_by_Day # Function fitted
# Load forecasting
l_t0 = day_data.i_rms[-1][0] # Initial load, at time 0
load_forec = f_forecast(l_t0,1) # Load forecasting result
# update dataframe
df_by_load['Val'] = list(load_forec) # Assign data frame values
df_by_load['Hour'] = hour
df_by_load['Name'] = loads
df_by_load['Day'] = day
df = pd.concat([df,df_by_load],sort=True)
return df
# Function to allocate asset list
def Make_Asset_List(file):
df = pd.read_excel(open(file, 'rb'), sheet_name='ASSETS') # Sheet with loads tags
df = df.set_index('Name')
return df
# Function to allocate asset list
def User_Data_List(file,sheet='LOAD_TAGS'):
df = pd.read_excel(open(file, 'rb'), sheet_name=sheet) # Sheet with loads tags
df = df.set_index('Name')
df = df.drop(columns=['TAG', 'Base'])
return df
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
class Real_Time_Contingencies:
# Main file
def __init__(self,data_file,pp_case=None):
try:
# Net data
self.net = Load_Net_Pandapower(data_file['net_file'],pp_case)
self.load_forecast = Fourier_Fit(data_file['load_data'])
self.asset_list = Make_Asset_List(data_file['portfolio_source'])
self.load_user = User_Data_List(data_file['load_data']) # Users data by load
self.gen_data = User_Data_List(data_file['load_data'],sheet='GEN_TAGS') # Generation data
self.Cont_Strategies = Load_Contingency_Strategies(data_file['cont_stra'])
self.AM_Plan = Load_AM_Plan(data_file['AM_Plan'])
self.N_Users = self.load_user['N_Users'].sum()
except:
self.cont_df = pd.DataFrame()
print('Error running contingencies')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Network during the the contingecy
def Net_Configurarion_during_Contingency(self,net,Asset_id, Service=False):
net_lf = copy.deepcopy(net)
# Contingency strategy
strategy_id = self.asset_list.loc[Asset_id].Strategy
if strategy_id in self.Cont_Strategies.keys():
net_lf = Network_Reconfiguration(net_lf,self.Cont_Strategies[strategy_id])
asset_type = self.asset_list.loc[Asset_id].Disc_Type
asset_to_disc = self.asset_list.loc[Asset_id].Asset_To_Disconet
net_lf = Disconet_Asset(net_lf,asset_type,asset_to_disc)
return net_lf
# Function to filter forecast data frame
def Forecast_Val_By_Day_By_Hour(self,Day,Hour):
DF = self.load_forecast
df = DF[DF.Hour==Hour]
df = df[df.Day==Day]
df = df.drop(columns = ['Hour','Day'])
df = df.set_index('Name')
return df
# Sett contingiencies
def Update_Net_With_Load_Forecast(self,Net,DF,*load_shed):
l_net = copy.deepcopy(Net)
load_factor = []
for ind,row in l_net.load.iterrows():
load_name =row['name']
lf = DF.loc[load_name]['Val']
try:
if not load_shed==():
l_shed_factor = load_shed[0][load_name]
else:
l_shed_factor = 1
except:
l_shed_factor = 1
lf = lf*l_shed_factor
load_factor.append(lf)
#print(load_factor)
cond_new = (l_net.load['p_mw']*load_factor).sum()
cond_base = l_net.load['p_mw'].sum()
# Generation load factor
g_f = cond_new/cond_base
l_net.load['p_mw'] = l_net.load['p_mw']*load_factor
l_net.load['q_mvar'] = l_net.load['q_mvar']*load_factor
l_net.gen['p_mw'] = l_net.gen['p_mw']*g_f
return l_net
# Load growth function
def Load_Growth_Update(self,growth_rate):
self.f_growth_rate = Load_Growth_By_Day(growth_rate)
# Run Non-Contingencies case
#-> def Run_Case_Load_growth(self,net,L_growth,date_beg,hour=0,opt_load_sheeding=False):
def Run_Case_Load_growth(self,net,L_growth,hour=0,day=None):
ndays = datetime.timedelta(hours=hour).days
self.Load_Growth_Update(L_growth)
growth_rate = self.f_growth_rate(ndays)
return self.Run_Case_Base(net,growth_rate,day_list=day)
# Run Non-Contingencies case
def Run_Case_Base(self,net,growth_rate=1,opt_load_sheeding=False,day_list=None):
df_sec = pd.DataFrame() # Dataframe that return security margins results
df_load = pd.DataFrame() # Dataframe that return the load forecasting
cont_assessment = {} # Dataframe with the contigency assessment
cr_assessment = {} # Criticality assessment
if day_list==None:
day_list = list(calendar.day_name)
for day in day_list: # Loop for each day
cont_assessment_by_hour = {} # Contingency assessment by hour
cr_assessment_by_hour = {} # Criticality assessment by hours
for hour in range(24): # Loop for each hour
df_load_forecast = self.Forecast_Val_By_Day_By_Hour(day,hour) #Load forecast filtered
net_lf = self.Update_Net_With_Load_Forecast(net,df_load_forecast) # Update network with forecast
net_lf.load.scaling = growth_rate
net_lf.gen.scaling = growth_rate
try:
pp.runpp(net_lf) # Run load flow with pandapower
lf_error = False
except:
print('Error running load flow')
lf_error = True
if opt_load_sheeding:
dave = RTC(net_lf)
net_lf = dave.Main_Load_Shedding_Opt()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # Security margins dataframe # # # # # # # #
# Lines data frame
df_temp_0 = pd.DataFrame()
df_temp_0['Loading'] = net_lf.res_line['loading_percent']
df_temp_0['Load'] = (net_lf.res_line['p_from_mw']**2+net_lf.res_line['q_from_mvar']**2).pow(1./2)
df_temp_0['Name'] = net_lf.line['name']
df_temp_0['Type'] = 'LN'
# Transformer data frame
df_temp = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from random import seed
RANDOM_SEED = 54321
seed(RANDOM_SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(RANDOM_SEED)
def load_spambase_data():
# input vars
data_name = 'spambase'
raw_data_file = os.path.join(os.path.dirname(__file__), 'Spambase.csv')
processed_file = os.path.join(os.path.dirname(__file__), 'Spambase-processed.csv')
##### Spambase Data Processing
raw_df = | pd.read_csv(raw_data_file) | pandas.read_csv |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = | offsets.Minute(50) | pandas.tseries.offsets.Minute |
#-*- coding: utf-8 -*-
# 阈值寻优
import numpy as np
import pandas as pd
inputfile = '../data/water_heater.xls' # 输入数据路径,需要使用Excel格式
n = 4 # 使用以后四个点的平均斜率
threshold = pd.Timedelta(minutes=5) # 专家阈值
data = pd.read_excel(inputfile)
data[u'发生时间'] = pd.to_datetime(data[u'发生时间'], format='%Y%m%d%H%M%S')
data = data[data[u'水流量'] > 0] # 只要流量大于0的记录
def event_num(ts):
d = data[u'发生时间'].diff() > ts # 相邻时间作差分,比较是否大于阈值
return d.sum() + 1 # 这样直接返回事件数
dt = [pd.Timedelta(minutes=i) for i in np.arange(1, 9, 0.25)]
h = | pd.DataFrame(dt, columns=[u'阈值']) | pandas.DataFrame |
"""This module is dedicated to helpers for the DeepDAO class"""
import pandas as pd
def unpack_dataframe_of_lists(df_in: pd.DataFrame) -> pd.DataFrame:
"""Unpacks a dataframe where all entries are list of dicts
Parameters
----------
df_in: pd.DataFrame
input DataFrame
Returns
-------
DataFrame
formated pandas DataFrame
"""
df_list=[]
for column_name in df_in.columns:
sub_df = df_in[column_name]
tmp_df_list=[]
for entry in sub_df:
if isinstance(entry, list):
tmp_df = pd.DataFrame(entry)
tmp_df_list.append(tmp_df)
reorg_df = pd.concat(tmp_df_list)
reorg_df.reset_index(drop=True, inplace=True) # Reset indexes so there are no repeats
df_list.append(reorg_df)
df_out = | pd.concat(df_list, keys=df_in.columns, axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = | pd.period_range("1/1/2000", freq="Q", periods=3) | pandas.period_range |
#!/usr/bin/env python3
"""Universal kernel blocks"""
import re
import os
import time
import datetime as dt
import numpy as np
import scipy as ss
import pandas as pd
import requests
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor, CatBoostClassifier
from sklearn import metrics
from numba import jit
###################################################################################################
# constants
###################################################################################################
PROJECT_DIR = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
SRC_PATH = PROJECT_DIR + '/src/'
DATASETS_PATH = PROJECT_DIR + '/datasets/'
DATASETS_ORIGINAL_PATH = DATASETS_PATH + 'original/'
DATASETS_DEV_PATH = DATASETS_PATH + 'dev/'
DATASETS_PRED_PATH = DATASETS_PATH + 'predictions/'
###################################################################################################
# resources optimization
###################################################################################################
def reduce_mem_usage(df, verbose=True):
"""
Reduce memory costs of df via changing numeric column types to more efficient ones
Takes a lot of time, try only once
"""
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
@jit
def fast_auc(y_true, y_prob):
"""
fast roc_auc computation: https://www.kaggle.com/c/microsoft-malware-prediction/discussion/76013
"""
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
nfalse = 0
auc = 0
n = len(y_true)
for i in range(n):
y_i = y_true[i]
nfalse += (1 - y_i)
auc += y_i * nfalse
auc /= (nfalse * (n - nfalse))
return auc
def eval_auc(y_true, y_pred):
"""
Fast auc eval function for lgb.
"""
return 'auc', fast_auc(y_true, y_pred), True
def group_mean_log_mae(y_true, y_pred, types, floor=1e-9):
"""
Fast metric computation for this competition: https://www.kaggle.com/c/champs-scalar-coupling
Code is from this kernel: https://www.kaggle.com/uberkinder/efficient-metric
"""
maes = (y_true-y_pred).abs().groupby(types).mean()
return np.log(maes.map(lambda x: max(x, floor))).mean()
###################################################################################################
# descibe & visualise
###################################################################################################
def resumetable(df):
"""
Table about table
"""
print(f"Dataset Shape: {df.shape}")
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary['Name'] = summary['index']
summary = summary[['Name', 'dtypes']]
summary['Missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
summary['First Value'] = df.loc[0].values
summary['Second Value'] = df.loc[1].values
summary['Third Value'] = df.loc[2].values
for name in summary['Name'].value_counts().index:
summary.loc[summary['Name'] == name, 'Entropy'] = \
round(ss.stats.entropy(
df[name].value_counts(normalize=True), base=2), 2)
return summary
###################################################################################################
# preprocessing
###################################################################################################
def my_csv_read(csv_file):
"""Solve function pickle issues
https://stackoverflow.com/questions/8804830/
python-multiprocessing-picklingerror-cant-pickle-type-function
"""
return pd.read_csv(csv_file)
def get_floats_from_string(string_to_parse):
"""finds all float numbers in string"""
res_list = re.findall(r"[-+]?\d*\.\d+|\d+", string_to_parse)
return res_list
def none_or_first(list_to_get):
"""gets first element of list of None"""
if list_to_get:
return list_to_get[0]
else:
return None
def clean_inf_nan(df):
"""nan instead of inf"""
return df.replace([np.inf, -np.inf], np.nan)
def add_datetime_info(df_trans, ts_column='TransactionDT', start_date=dt.datetime(2017, 12, 1)):
"""adds _Weekdays, _Hours, _Days columns to df
Args:
df_trans (DataFrame):
With timestamp column.
ts_column (string):
Column with second.
start_date (datetime):
Starting point if ts_column has no full timestamp
Returns:
df_trans (DataFrame):
With 4 additional columns
"""
if start_date:
df_trans["_Date"] = df_trans[ts_column].apply(lambda x:
(start_date + dt.timedelta(seconds=x)))
else:
df_trans["_Date"] = df_trans[ts_column].apply(
dt.datetime.fromtimestamp)
df_trans['_Weekdays'] = df_trans['_Date'].dt.dayofweek
df_trans['_Hours'] = df_trans['_Date'].dt.hour
df_trans['_Days'] = df_trans['_Date'].dt.day
df_trans.drop(['_Date'], axis=1, inplace=True)
return df_trans
def correct_card_id(x):
"""Just replacement of characters"""
x = x.replace('.0', '')
x = x.replace('-999', 'NNNN')
while len(x) < 4:
x += 'N'
return x
def add_card_id(df):
"""Apply correct_card_id to df columns"""
cards_cols = ['card1', 'card2', 'card3', 'card5']
for card in cards_cols:
if '1' in card:
df['Card_ID'] = df[card].map(str)
else:
df['Card_ID'] += ' ' + df[card].map(str)
return df
def drop_columns_nan_null(df_look,
keep_cols,
drop_proportion=0.9):
""" drop columns with lots of nans or without values """
one_value_cols = [
col for col in df_look.columns if df_look[col].nunique() <= 1]
many_null_cols = [col for col in df_look.columns if
df_look[col].isnull().sum() / df_look.shape[0] > drop_proportion]
big_top_value_cols = [col for col in df_look.columns if
df_look[col].value_counts(dropna=False, normalize=True).
values[0] > drop_proportion]
cols_to_drop = list(set(many_null_cols +
big_top_value_cols +
one_value_cols
))
for keep_col in keep_cols:
if keep_col in cols_to_drop:
cols_to_drop.remove(keep_col)
# for col in cols_to_drop:
# if col in df_drop:
# df_drop.drop([col], axis=1, inplace=True)
# print(len(cols_to_drop), ' columns were removed because of nulls and NaNs')
# print(f'dropped ones: {cols_to_drop}')
return cols_to_drop
def drop_columns_corr(df_look,
keep_cols,
drop_threshold=0.98):
"""drop columns with high correlation
"""
# Absolute value correlation matrix
corr_matrix = df_look[df_look['isFraud'].notnull()].corr().abs()
# Getting the upper triangle of correlations
upper = corr_matrix.where(np.array(np.triu(np.ones(corr_matrix.shape), k=1)).astype(np.bool))
# Select columns with correlations above threshold
cols_to_drop = [column for column in upper.columns if any(upper[column] > drop_threshold)]
for keep_col in keep_cols:
if keep_col in cols_to_drop:
cols_to_drop.remove(keep_col)
# df_drop.drop(cols_to_drop, axis=1, inplace=True)
# for col in cols_to_drop:
# if col in df_drop:
# df_drop.drop([col], axis=1, inplace=True)
# print(len(cols_to_drop), ' columns were removed because of high corr')
# print(f'dropped ones: {cols_to_drop}')
return cols_to_drop
###################################################################################################
# training model
###################################################################################################
def train_model_regression(X, X_test, y, params, folds=None, model_type='lgb',
eval_metric='mae', columns=None,
plot_feature_importance=False, model=None,
verbose=10000, early_stopping_rounds=200,
n_estimators=50000, splits=None, n_folds=3):
"""
A function to train a variety of regression models.
Returns dictionary with oof predictions, test predictions,
scores and, if necessary, feature importances.
:params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing)
:params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing)
:params: y - target
:params: folds - folds to split data
:params: model_type - type of model to use
:params: eval_metric - metric to use
:params: columns - columns to use. If None - use all columns
:params: plot_feature_importance - whether to plot feature importance of LGB
:params: model - sklearn model, works only for "sklearn" model type
"""
columns = X.columns if columns is None else columns
X_test = X_test[columns]
# check for different Kfolds
if str(type(folds)) == "<class 'sklearn.model_selection._split.StratifiedKFold'>":
splits = folds.split(X, y)
elif str(type(folds)) == "<class 'sklearn.model_selection._split.TimeSeriesSplit'>":
splits = folds.split(X) if splits is None else splits
else:
splits = folds.split(X) if splits is None else splits
n_splits = folds.n_splits if splits is None else n_folds
# to set up scoring parameters
metrics_dict = {'mae': {'lgb_metric_name': 'mae',
'catboost_metric_name': 'MAE',
'sklearn_scoring_function': metrics.mean_absolute_error},
'group_mae': {'lgb_metric_name': 'mae',
'catboost_metric_name': 'MAE',
'scoring_function': group_mean_log_mae},
'mse': {'lgb_metric_name': 'mse',
'catboost_metric_name': 'MSE',
'sklearn_scoring_function': metrics.mean_squared_error}
}
result_dict = {}
# out-of-fold predictions on train data
oof = np.zeros(len(X))
# averaged predictions on train data
prediction = np.zeros(len(X_test))
# list of scores on folds
scores = []
feature_importance = | pd.DataFrame() | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = Series({"a": 1, "b": 2}, name=s1name)
s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = DataFrame(
{
"A": Series(["a", "b", "c", np.nan], dtype="category"),
"B": Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
def test_concat_empty_df_object_dtype():
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = pd.concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = Series(SparseArray([0, 1, 2]))
expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_dense_sparse():
# GH 30668
a = Series(pd.arrays.SparseArray([1, None]), dtype=float)
b = Series([1], dtype=float)
expected = | Series(data=[1, None, 1], index=[0, 1, 0]) | pandas.Series |
#!/usr/bin/python3
# coding: utf-8
import sys
import os.path
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# get_ipython().run_line_magic('matplotlib', 'inline')
# plt.close('all')
# dpi = 300
# figsize = (1920 / dpi, 1080 / dpi)
from plotHitMissUnkRate import plotHitMissUnkRate
def getExamplesDf(path):
assert os.path.isfile(path), "file '%s' not found." % path
df = | pd.read_csv(filepath_or_buffer=path, header=None) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
| pd.Index(['g1', 'g1'], dtype='object') | pandas.Index |
"""
trees_matplotlib_seaborn.py
An extension of trees.py using the matplotlib and seaborn libraries.
"""
import datetime as dt
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
st.title("SF Trees")
st.write(
"This app analyzes San Francisco's tree data provided by the SF DPW."
)
# Collect the data from the CSV file:
trees_df = pd.read_csv("trees.csv")
# Determine the age of each tree + add to the dataframe:
trees_df['age'] = (pd.to_datetime('today') -
| pd.to_datetime(trees_df['date']) | pandas.to_datetime |
from dotmap import DotMap
from model.BLRPRx import *
from calendar import month_abbr
from datetime import timedelta as td
from datetime import datetime as dt
from datetime import datetime
from utils.utils import *
from utils.stats_calculation import *
import numpy as np
import pandas as pd
import os
from sampling.mergeCells import *
from sampling.sampling import *
from fitting import fitting, objectiveFunction
import warnings, yaml
args = yaml.load(open('./config/default.yaml'), Loader=yaml.FullLoader)
args = DotMap(args)
args.sampling.start_time = dt.strptime(args.sampling.start_time, '%Y-%m-%d')
args.sampling.end_time = dt.strptime(args.sampling.end_time, '%Y-%m-%d')
total_sample_sta_prop = []
num_month = | pd.read_csv(args.IO.stats_file_path,index_col=0, header=0) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from statsmodels.tsa.seasonal import STL, seasonal_decompose
# from numpy.typing import ArrayLike
ArrayLike = Union[np.ndarray, Sequence[float]]
Figsize = Tuple[int, int]
def _identity(x: ArrayLike) -> ArrayLike:
return x
class TimeSeriesDecomposition:
"""Model class for Time Series Decomposition.
This class provides utilities to decompose an input time series
Pass specific arguments to seasonal_decompose and STL functions via kwargs.
Attributes:
data: the input time series data as `TimeSeriesData`
decomposition: `additive` or `multiplicative` decomposition
method: `STL decompostion` or `seasonal_decompose`
"""
freq: Optional[str] = None
results: Optional[Dict[str, TimeSeriesData]] = None
def __init__(
self,
data: TimeSeriesData,
decomposition: str = "additive",
method: str = "STL",
**kwargs,
) -> None:
if not isinstance(data.value, pd.Series):
msg = f"Only support univariate time series, but got {type(data.value)}."
logging.error(msg)
raise ValueError(msg)
self.data = data
if decomposition in ("additive", "multiplicative"):
self.decomposition = decomposition
else:
logging.info(
"Invalid decomposition setting specified; "
"defaulting to Additive Decomposition."
)
self.decomposition = "additive"
if method == "seasonal_decompose":
self.method = self.__decompose_seasonal
else:
if method != "STL":
logging.info(
f"""Invalid decomposition setting {method} specified.
Possible Values: STL, seasonal_decompose.
Defaulting to STL."""
)
self.method = self.__decompose_STL
## The following are params for the STL Module
self.period = kwargs.get("period", None)
self.seasonal = kwargs.get("seasonal", 7)
self.trend = kwargs.get("trend", None)
self.low_pass = kwargs.get("low_pass", None)
self.seasonal_deg = kwargs.get("seasonal_deg", 1)
self.trend_deg = kwargs.get("trend_deg", 1)
self.low_pass_deg = kwargs.get("low_pass_deg", 1)
self.robust = kwargs.get("robust", False)
self.seasonal_jump = kwargs.get("seasonal_jump", 1)
self.trend_jump = kwargs.get("trend_jump", 1)
self.low_pass_jump = kwargs.get("low_pass_jump", 1)
def __clean_ts(self) -> pd.DataFrame:
"""Internal function to clean the time series.
Internal function to interpolate time series and infer frequency of
time series required for decomposition.
"""
original = pd.DataFrame(
list(self.data.value), index=pd.to_datetime(self.data.time), columns=["y"]
)
if | pd.infer_freq(original.index) | pandas.infer_freq |
"""Clean, bundle and create API to load KSSL data
The KSSL database is provided as a Microsoft Access database designed
as an OLTP. The purposes of this module are: (i) to export all tables
as independent .csv files to make it platform independent; (ii) to
make it amenable to multi-dimensional analytical queries (OLAP);
(iii) to provide an API for easy loading of the dataset as numpy arrays.
For further information on KSSL database contact:
* https://www.nrcs.usda.gov/wps/portal/nrcs/main/soils/research/
"""
import subprocess
from pathlib import Path
from .base import select_rows, chunk
from spectrai.core import get_kssl_config
import pandas as pd
import numpy as np
import re
import opusFC # Ref.: https://stuart-cls.github.io/python-opusfc-dist/
from tqdm import tqdm
DATA_KSSL, DATA_NORM, DATA_SPECTRA, DB_NAME = get_kssl_config()
def access_to_csv(in_folder=None, out_folder=DATA_NORM, db_name=DB_NAME):
"""Exports KSSL '.accdb' tables to individual '.csv' files.
Linux-like OS only as depends on 'mdbtools'
https://github.com/brianb/mdbtools
Parameters
----------
in_folder: string, optional
Specify the path of the folder containing the '.accdb' KSSL file
out_folder: string, optional
Specify the path of the folder that will contain exported tables
db_name: string, optional
Specify name of the KSSL Microsoft Access database
Returns
-------
None
"""
in_folder = Path(in_folder)
out_folder = Path(out_folder)
if not in_folder.exists():
raise IOError('in_folder not found.')
if not out_folder.exists():
out_folder.mkdir(parents=True)
script_name = Path(__file__).parent / 'scripts/access2csv.sh'
out = subprocess.run([script_name, in_folder / DB_NAME, out_folder])
if out.returncode == 0:
print('KSSL tables exported successfully to .csv files.')
else:
raise OSError('Execution of access2csv.sh failed.')
def _get_layer_analyte_tbl():
"""Returns relevant clean subset of `layer_analyte.csv` KSSL DB table.
Notes
----
Only `master_prep_id` relevant to MIRS analysis selected
`calc_value` are by default `str` as possibly containing
values such as (slight, 1:2, ...). Only numeric ones are
selected
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'layer_analyte.csv', low_memory=False) \
.dropna(subset=['analyte_id', 'calc_value']) \
.pipe(select_rows, {
'master_prep_id': lambda d: d in [18, 19, 27, 28],
'calc_value': lambda d: re.search(r'[a-zA-Z]|:|\s', str(d)) is None}) \
.loc[:, ['lay_id', 'analyte_id', 'calc_value']] \
.astype({'calc_value': float})
def _get_layer_tbl():
"""Returns relevant clean subset of `analyte.csv` KSSL DB table.
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'layer.csv', low_memory=False) \
.loc[:, ['lay_id', 'lims_pedon_id', 'lims_site_id', 'lay_depth_to_top']] \
.dropna() \
.astype({'lims_pedon_id': 'int32', 'lims_site_id': 'int32'})
def _get_sample_tbl():
"""Returns relevant clean subset of `sample.csv` KSSL DB table.
Notes
----
Only `smp_id` > 1000 relevant to MIRS analysis selected
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'sample.csv', low_memory=False) \
.pipe(select_rows, {'smp_id': lambda d: d > 1000}) \
.loc[:, ['smp_id', 'lay_id']]
def _get_mirs_det_tbl(valid_name=['XN', 'XS']):
"""Returns relevant clean subset of `mir_scan_det_data.csv` KSSL DB table.
Notes
----
Only `scan_path_name` containing valid substring `['XN', 'XS'] by default.
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'mir_scan_det_data.csv', low_memory=False) \
.dropna(subset=['scan_path_name', 'mir_scan_mas_id']) \
.loc[:, ['mir_scan_mas_id', 'scan_path_name']] \
.pipe(select_rows, {
'scan_path_name': lambda d: re.search(r'X.', str(d))[0] in valid_name})
def _get_mirs_mas_tbl():
"""Returns relevant clean subset of `mir_scan_mas_data.csv` KSSL DB table.
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'mir_scan_mas_data.csv', low_memory=False) \
.loc[:, ['smp_id', 'mir_scan_mas_id']]
def _get_lookup_smp_id_scan_path():
"""Returns relevant clean subset of `mir_scan_mas_data.csv` KSSL DB table.
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.merge(_get_mirs_mas_tbl(), _get_mirs_det_tbl(), on='mir_scan_mas_id', how='inner') \
.loc[:, ['smp_id', 'scan_path_name']] \
.astype({'smp_id': int, 'scan_path_name': 'string'})
def build_analyte_dim_tbl(out_folder=DATA_KSSL):
"""Builds/creates analyte_dim dim table (star schema) for KSSL dataset
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
df = pd.read_csv(DATA_NORM / 'analyte.csv') \
.loc[:, ['analyte_id', 'analyte_name', 'analyte_abbrev', 'uom_abbrev']]
df.to_csv(out_folder / 'analyte_dim_tbl.csv', index=False)
return df
def build_taxonomy_dim_tbl(out_folder=DATA_KSSL):
"""Returns relevant subset of `lims_ped_tax_hist.csv` KSSL DB table
Notes
----
A same `lims_pedon_id` column as duplicates (several classifi. version).
Only `taxonomic_classification_type` = `'sampled as'` should be considered.
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
df = pd.read_csv(DATA_NORM / 'lims_ped_tax_hist.csv') \
.pipe(select_rows, {'taxonomic_classification_type': lambda d: d == 'sampled as'}) \
.loc[:, ['lims_pedon_id', 'taxonomic_order', 'taxonomic_suborder',
'taxonomic_great_group', 'taxonomic_subgroup']]
df.to_csv(out_folder / 'taxonomy_dim_tbl.csv', index=False)
return df
def build_location_dim_tbl(out_folder=DATA_KSSL):
pass
def build_sample_analysis_fact_tbl(out_folder=DATA_KSSL):
"""Builds/creates sample_analysis fact table (star schema) for KSSL dataset
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
df = pd.merge(
pd.merge(_get_layer_tbl(), _get_sample_tbl(), on='lay_id'),
_get_layer_analyte_tbl(), on='lay_id')
df.to_csv(out_folder / 'sample_analysis_fact_tbl.csv', index=False)
return df
def build_kssl_star_tbl():
"""Builds/creates star schema version of the KSSL DB"""
print('Building analyte_dim_tbl...')
build_analyte_dim_tbl()
print('Building taxonomy_dim_tbl...')
build_taxonomy_dim_tbl()
print('Building spectra_dim_tbl...')
bundle_spectra_dim_tbl()
print('Building sample_analysis_fact_tbl...')
build_sample_analysis_fact_tbl()
print('Success!')
def export_spectra(in_folder=None, out_folder=DATA_KSSL,
nb_decimals=4, max_wavenumber=4000, valid_name=['XN', 'XS'], nb_chunks=1):
"""Exports KSSL MIRS spectra into a series of .csv files
Parameters
----------
in_folder: string, optional
Specify the path of the folder containing the KSSL MIRS spectra
out_folder: string, optional
Specify the path of the folder that will contain exported files
nb_decimals: int, optional
Specify floating point precision (to save memory)
max_wavenumber: int, optional
Specify the max wavenumber to be considered in spectra
valid_name: list of str, optional
Specify valid spectra file names
nb_chunks: int, optional
Specify tne number of chunks/files to be created
Returns
-------
None
"""
in_folder = Path(in_folder)
out_folder = Path(out_folder)
if not in_folder.exists():
raise IOError('in_folder not found.')
if not out_folder.exists():
out_folder.mkdir(parents=True)
columns = None
valid_files = [f for f in in_folder.rglob('*.0')
if re.search(r'X.', f.name)[0] in valid_name]
for (l_bound, u_bound) in list(chunk(len(valid_files), nb_chunks)):
columns = None
rows_list = []
for i, f in enumerate(tqdm(valid_files[l_bound:u_bound])):
dbs = opusFC.listContents(f)
if dbs:
data = opusFC.getOpusData(str(f), dbs[0])
row = [f.name] + list(data.y[data.x <= max_wavenumber])
rows_list.append(row)
if columns is None:
columns = list((data.x[data.x <= max_wavenumber]).astype(int))
df = pd.DataFrame(rows_list, columns=['id'] + list(columns))
df = df.round(nb_decimals)
df.to_csv(out_folder / 'spectra_{}_{}.csv'.format(l_bound, u_bound-1), index=False)
def bundle_spectra_dim_tbl(in_folder=DATA_SPECTRA, out_folder=DATA_KSSL, with_replicates=False):
"""Creates MIRS spectra dimension table of new KSSL star-like schema
Parameters
----------
in_folder: string, optional
Specify the path of the folder containing the KSSL MIRS spectra
out_folder: string, optional
Specify the path of the folder that will contain exported files
with_replicates: boolean, optional
Specify whether to include spectra replicates (averaged otherwise)
Returns
-------
Pandas DataFrame
Spectra dimension table
"""
all_files = list(in_folder.glob('*.csv'))
li = []
columns = None
for filename in tqdm(all_files):
if columns is None:
columns = pd.read_csv(filename).columns
df = pd.read_csv(filename, header=None, skiprows=1)
df.columns = columns
df = _get_lookup_smp_id_scan_path() \
.merge(df, left_on='scan_path_name', right_on='id', how='inner') \
.drop(['id', 'scan_path_name'], axis=1)
if not with_replicates:
df = df.groupby('smp_id').mean()
li.append(df)
df = pd.concat(li)
df = df.reset_index()
print('Writing spectra_dim_tbl.csv...')
df.to_csv(out_folder / 'spectra_dim_tbl.csv', index=False)
return df.reset_index()
def load_spectra(in_folder=DATA_KSSL):
"""Loads Spectra dimension table"""
return pd.read_csv(in_folder / 'spectra_dim_tbl.csv') \
.drop_duplicates(subset='smp_id', keep=False)
def load_taxonomy(in_folder=DATA_KSSL):
"""Loads taxonomy dimension table
Notes
----
'mollisols' order is sometimes mispelled so fixing it
"""
return pd.read_csv(in_folder / 'taxonomy_dim_tbl.csv') \
.replace({'mollisol': 'mollisols'})
def get_tax_orders_lookup_tbl(order_to_int=True):
"""Returns a lookup table of taxonomic order names and respective ids"""
df = load_taxonomy()
orders = df['taxonomic_order'].unique()
idx = range(len(orders))
key_values = zip(orders, idx)
if not order_to_int:
key_values = zip(idx, orders)
return dict(key_values)
def load_fact_tbl(in_folder=DATA_KSSL):
return pd.read_csv(in_folder / 'sample_analysis_fact_tbl.csv')
def load_analytes(in_folder=DATA_KSSL, like=None):
return | pd.read_csv(in_folder / 'analyte_dim_tbl.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# four representative days in each season
winter_day = '01-15'
spring_day = '04-15'
summer_day = '07-15'
fall_day = '10-15'
# define a function to plot household profile and battery storage level
def plot_4days(mode, tmy_code, utility, year, c_cost):
df = pd.read_csv('/Users/jiajiazheng/Box/Suh\'s lab/GSRs/Jiajia/3-Residential Solar-plus-storage/Results/'
'optimized/minCost_%(mode)s_%(year)s_cc_%(c_cost)s/'
'optimal_minCost_%(tmy_code)s_%(utility)s_%(year)s_cc_%(c_cost)s.csv'
% {'year': year, 'mode': mode, 'tmy_code': tmy_code, 'utility': utility, 'c_cost': c_cost},
index_col=0)
df.rename(columns={'Time': 'Hour'}, inplace=True)
s = df['Hour'].str.split()
df['Hour'] = | pd.to_datetime(s.str[0], format="%m/%d") | pandas.to_datetime |
from sklearn.metrics import accuracy_score
import pandas as pd
import joblib
from sklearn.tree import DecisionTreeClassifier
import sys
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
def PClassification(name, clf, loadFilename=False):
# Dataset
c = dt.shape[1] - 1
check_data = dt.iloc[:,:c]
check_answ = dt.iloc[:,c]
# Load model
if loadFilename: clf = joblib.load(loadFilename)
# Prediction
preds = pd.Series(clf.predict(check_data), name='preds')
reals = | pd.Series(check_answ, name='reales') | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = | Series([True, False, False, True, False], index=s.index) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Purpose: Uses the statement data to train a classifier and use the classifier
for the prediction of the alternatives in the bluebook from 1988-2008
Here: standard classifier as RF, MN logitic regression, SVM
Status: Draft
Author: olivergiesecke
"""
###############################################################################
### Import packages
import pandas as pd
import re
import os
from io import StringIO
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
###############################################################################
## Define function to reshape the sentences in the bluebooks
def transform_merged(merge_class_d_e):
for alt in ['a','b','c','d','e']:
merge_class_d_e['C_TREATMENT_SIZE_alt_'+ alt] = ""
merge_class_d_e['Sentences_alt_'+alt] = ""
sentence_columns = []
for sentence_num in range(1, 12):
col_name = 'Sentence_' + str(sentence_num) + "_alt_" + alt
if col_name in merge_class_d_e and type(merge_class_d_e[col_name]=="str"):
sentence_columns.append(col_name)
#merge_class_d_e['Sentences_alt_'+alt] = merge_class_d_e[sentence_columns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
for index,row in merge_class_d_e.iterrows():
sentence_content = []
for sentence_col in sentence_columns:
sentence = row[sentence_col]
pattern = "(alternatives?\s+[^"+alt+"])([^a-z])"
if type(sentence)==str and not re.search(pattern,sentence,re.IGNORECASE):
# and not re.search(pattern,sentence,re.IGNORECASE)
sentence_content.append(sentence)
merge_class_d_e.at[index,'Sentences_alt_'+alt] = ' '.join(sentence_content)
return merge_class_d_e
## Get bluebooks alternatives and statements in one file
def get_merged_data():
### Open the csv with the statements (note: this is the manual statement file)
data= | pd.read_csv("../data/statements_text_extraction_cleaned.csv") | pandas.read_csv |
# fmt: off
import numpy as np
import pandas as pd
import h5py
import scipy.signal
import shutil
import skimage as sk
import os
import pickle
import sys
import h5py_cache
import copy
import pickle as pkl
from parse import compile
from time import sleep
from distributed.client import futures_of
import dask.dataframe as dd
import dask.delayed as delayed
from skimage import filters
from .trcluster import hdf5lock
from .utils import multifov,pandas_hdf5_handler,writedir
from tifffile import imread
class kymograph_cluster:
def __init__(self,headpath="",trenches_per_file=20,paramfile=False,all_channels=[""],trench_len_y=270,padding_y=20,trench_width_x=30,use_median_drift=False,\
invert=False,y_percentile=85,y_min_edge_dist=50,smoothing_kernel_y=(1,9),y_percentile_threshold=0.2,\
top_orientation=0,expected_num_rows=None,alternate_orientation=True,orientation_on_fail=None,x_percentile=85,background_kernel_x=(1,21),\
smoothing_kernel_x=(1,9),otsu_scaling=1.,min_threshold=0,trench_present_thr=0.):
if paramfile:
parampath = headpath + "/kymograph.par"
with open(parampath, 'rb') as infile:
param_dict = pickle.load(infile)
all_channels = param_dict["All Channels"]
trench_len_y = param_dict["Trench Length"]
padding_y = param_dict["Y Padding"]
trench_width_x = param_dict["Trench Width"]
use_median_drift = param_dict['Use Median Drift?']
# t_range = param_dict["Time Range"]
invert = param_dict["Invert"]
y_percentile = param_dict["Y Percentile"]
y_min_edge_dist = param_dict["Minimum Trench Length"]
smoothing_kernel_y = (1,param_dict["Y Smoothing Kernel"])
y_percentile_threshold = param_dict['Y Percentile Threshold']
top_orientation = param_dict["Orientation Detection Method"]
expected_num_rows = param_dict["Expected Number of Rows (Manual Orientation Detection)"]
alternate_orientation = param_dict['Alternate Orientation']
orientation_on_fail = param_dict["Top Orientation when Row Drifts Out (Manual Orientation Detection)"]
x_percentile = param_dict["X Percentile"]
background_kernel_x = (1,param_dict["X Background Kernel"])
smoothing_kernel_x = (1,param_dict["X Smoothing Kernel"])
otsu_scaling = param_dict["Otsu Threshold Scaling"]
min_threshold= param_dict['Minimum X Threshold']
trench_present_thr = param_dict["Trench Presence Threshold"]
self.headpath = headpath
self.kymographpath = self.headpath + "/kymograph"
self.hdf5path = self.headpath + "/hdf5"
self.all_channels = all_channels
self.seg_channel = self.all_channels[0]
self.metapath = self.headpath + "/metadata.hdf5"
self.meta_handle = pandas_hdf5_handler(self.metapath)
self.trenches_per_file = trenches_per_file
# self.t_range = t_range
self.invert = invert
#### important paramaters to set
self.trench_len_y = trench_len_y
self.padding_y = padding_y
ttl_len_y = trench_len_y+padding_y
self.ttl_len_y = ttl_len_y
self.trench_width_x = trench_width_x
self.use_median_drift = use_median_drift
#### params for y
## parameter for reducing signal to one dim
self.y_percentile = y_percentile
self.y_min_edge_dist = y_min_edge_dist
## parameters for threshold finding
self.smoothing_kernel_y = smoothing_kernel_y
self.y_percentile_threshold = y_percentile_threshold
###
self.top_orientation = top_orientation
self.expected_num_rows = expected_num_rows
self.alternate_orientation = alternate_orientation
self.orientation_on_fail = orientation_on_fail
#### params for x
## parameter for reducing signal to one dim
self.x_percentile = x_percentile
## parameters for midpoint finding
self.background_kernel_x = background_kernel_x
self.smoothing_kernel_x = smoothing_kernel_x
## parameters for threshold finding
self.otsu_scaling = otsu_scaling
self.min_threshold = min_threshold
## New
self.trench_present_thr = trench_present_thr
self.output_chunk_shape = (1,1,self.ttl_len_y,(self.trench_width_x//2)*2)
self.output_chunk_bytes = (2*np.multiply.accumulate(np.array(self.output_chunk_shape))[-1])
self.output_chunk_cache_mem_size = 2*self.output_chunk_bytes
self.kymograph_params = {"trench_len_y":trench_len_y,"padding_y":padding_y,"ttl_len_y":ttl_len_y,\
"trench_width_x":trench_width_x,"y_percentile":y_percentile,"invert":invert,\
"y_min_edge_dist":y_min_edge_dist,"smoothing_kernel_y":smoothing_kernel_y,\
"y_percentile_threshold":y_percentile_threshold,\
"top_orientation":top_orientation,"expected_num_rows":expected_num_rows,"alternate_orientation":alternate_orientation,\
"orientation_on_fail":orientation_on_fail,"x_percentile":x_percentile,\
"background_kernel_x":background_kernel_x,"smoothing_kernel_x":smoothing_kernel_x,\
"otsu_scaling":otsu_scaling,"min_x_threshold":min_threshold,"trench_present_thr":trench_present_thr}
def median_filter_2d(self,array,smoothing_kernel):
"""Two-dimensional median filter, with average smoothing at the signal
edges in the second dimension (the non-time dimension).
Args:
array_list (list): List containing a single array of 2 dimensional signal to be smoothed.
smoothing_kernel (tuple): A tuple of ints specifying the kernel under which
the median will be taken.
Returns:
array: Median-filtered 2 dimensional signal.
"""
kernel = np.array(smoothing_kernel) #1,9
kernel_pad = kernel//2 + 1 #1,5
med_filter = scipy.signal.medfilt(array,kernel_size=kernel)
start_edge = np.mean(med_filter[:,kernel_pad[1]:kernel[1]])
end_edge = np.mean(med_filter[:,-kernel[1]:-kernel_pad[1]])
med_filter[:,:kernel_pad[1]] = start_edge
med_filter[:,-kernel_pad[1]:] = end_edge
return med_filter
def get_smoothed_y_percentiles(self,file_idx,y_percentile,smoothing_kernel_y):
"""For each imported array, computes the percentile along the x-axis of
the segmentation channel, generating a (y,t) array. Then performs
median filtering of this array for smoothing.
Args:
imported_hdf5_handle (h5py.File): Hdf5 file handle corresponding to the input hdf5 dataset
"data" of shape (channel,y,x,t).
y_percentile (int): Percentile to apply along the x-axis.
smoothing_kernel_y (tuple): Kernel to use for median filtering.
Returns:
h5py.File: Hdf5 file handle corresponding to the output hdf5 dataset "data", a smoothed
percentile array of shape (y,t).
"""
with h5py_cache.File(self.hdf5path+"/hdf5_"+str(file_idx)+".hdf5","r",chunk_cache_mem_size=self.metadata["chunk_cache_mem_size"]) as imported_hdf5_handle:
img_arr = imported_hdf5_handle[self.seg_channel][:] #t x y
if self.invert:
img_arr = sk.util.invert(img_arr)
perc_arr = np.percentile(img_arr,y_percentile,axis=2,interpolation='lower')
y_percentiles_smoothed = self.median_filter_2d(perc_arr,smoothing_kernel_y)
min_qth_percentile = y_percentiles_smoothed.min(axis=1)[:, np.newaxis]
max_qth_percentile = y_percentiles_smoothed.max(axis=1)[:, np.newaxis]
y_percentiles_smoothed = (y_percentiles_smoothed - min_qth_percentile)/(max_qth_percentile - min_qth_percentile)
return y_percentiles_smoothed
def get_edges_from_mask(self,mask):
"""Finds edges from a boolean mask of shape (t,y). Filters out rows of
length smaller than y_min_edge_dist.
Args:
mask (array): Boolean of shape (y,t) resulting from triangle thresholding.
y_min_edge_dist (int): Minimum row length necessary for detection.
Returns:
list: List containing arrays of edges for each timepoint, filtered for rows that are too small.
"""
edges_list = []
start_above_list = []
end_above_list = []
for t in range(mask.shape[0]):
edge_mask = (mask[t,1:] != mask[t,:-1])
start_above,end_above = (mask[t,0]==True,mask[t,-1]==True)
edges = np.where(edge_mask)[0]
edges_list.append(edges)
start_above_list.append(start_above)
end_above_list.append(end_above)
return edges_list,start_above_list,end_above_list
def get_trench_edges_y(self,y_percentiles_smoothed_array,y_percentile_threshold,y_min_edge_dist):
"""Detects edges in the shape (t,y) smoothed percentile arrays for each
input array.
Args:
y_percentiles_smoothed_array (array): A shape (y,t) smoothed percentile array.
triangle_nbins (int): Number of bins to be used to construct the thresholding histogram.
triangle_scaling (float): Factor by which to scale the threshold.
y_min_edge_dist (int): Minimum row length necessary for detection.
Returns:
list: List containing arrays of edges for each timepoint, filtered for rows that are too small.
"""
trench_mask_y = y_percentiles_smoothed_array>y_percentile_threshold
edges_list,start_above_list,end_above_list = self.get_edges_from_mask(trench_mask_y)
return edges_list,start_above_list,end_above_list
def repair_out_of_frame(self,trench_edges_y,start_above,end_above):
if start_above:
trench_edges_y = np.array([0] + trench_edges_y.tolist())
if end_above:
trench_edges_y = np.array(trench_edges_y.tolist() + [int(self.metadata['height'])])
return trench_edges_y
def remove_small_rows(self,edges,min_edge_dist):
"""Filters out small rows when performing automated row detection.
Args:
edges (array): Array of edges along y-axis.
min_edge_dist (int): Minimum row length necessary for detection.
Returns:
array: Array of edges, filtered for rows that are too small.
"""
grouped_edges = edges.reshape(-1,2)
row_lens = np.diff(grouped_edges,axis=1)
row_mask = (row_lens>min_edge_dist).flatten()
filtered_edges = grouped_edges[row_mask]
return filtered_edges.flatten()
def remove_out_of_frame(self,orientations,repaired_trench_edges_y,start_above,end_above):
"""Takes an array of trench row edges and removes the first/last edge,
if that edge does not have a proper partner (i.e. trench row mask takes
value True at boundaries of image).
Args:
edges (array): Array of edges along y-axis.
start_above (bool): True if the trench row mask takes value True at the
starting edge of the mask.
end_above (bool): True if the trench row mask takes value True at the
ending edge of the mask.
Returns:
array: Array of edges along y-axis, corrected for edge pairs that
are out of frame.
"""
drop_first_row,drop_last_row = (False,False)
if start_above and orientations[0] == 0: #if the top is facing down and is cut
drop_first_row = True
orientations = orientations[1:]
repaired_trench_edges_y = repaired_trench_edges_y[2:]
if end_above and orientations[-1] == 1: #if the bottom is facing up and is cut
drop_last_row = True
orientations = orientations[:-1]
repaired_trench_edges_y = repaired_trench_edges_y[:-2]
return orientations,drop_first_row,drop_last_row,repaired_trench_edges_y
def get_manual_orientations(self,trench_edges_y_list,start_above_list,end_above_list,expected_num_rows,alternate_orientation,top_orientation,orientation_on_fail,y_min_edge_dist):
trench_edges_y = trench_edges_y_list[0]
start_above = start_above_list[0]
end_above = end_above_list[0]
orientations = []
repaired_trench_edges_y = self.repair_out_of_frame(trench_edges_y,start_above,end_above)
repaired_trench_edges_y = self.remove_small_rows(repaired_trench_edges_y,y_min_edge_dist)
if repaired_trench_edges_y.shape[0]//2 == expected_num_rows:
orientation = top_orientation
for row in range(repaired_trench_edges_y.shape[0]//2):
orientations.append(orientation)
if alternate_orientation:
orientation = (orientation+1)%2
orientations,drop_first_row,drop_last_row,repaired_trench_edges_y = self.remove_out_of_frame(orientations,repaired_trench_edges_y,start_above,end_above)
elif (repaired_trench_edges_y.shape[0]//2 < expected_num_rows) and orientation_on_fail is not None:
orientation = orientation_on_fail
for row in range(repaired_trench_edges_y.shape[0]//2):
orientations.append(orientation)
if alternate_orientation:
orientation = (orientation+1)%2
orientations,drop_first_row,drop_last_row,repaired_trench_edges_y = self.remove_out_of_frame(orientations,repaired_trench_edges_y,start_above,end_above)
else:
print("Start frame does not have expected number of rows!")
return orientations,drop_first_row,drop_last_row
def get_trench_ends(self,trench_edges_y_list,start_above_list,end_above_list,orientations,drop_first_row,drop_last_row,y_min_edge_dist):
top_orientation = orientations[0]
y_ends_list = []
for t,trench_edges_y in enumerate(trench_edges_y_list):
start_above = start_above_list[t]
end_above = end_above_list[t]
repaired_trench_edges_y = self.repair_out_of_frame(trench_edges_y,start_above,end_above)
repaired_trench_edges_y = self.remove_small_rows(repaired_trench_edges_y,y_min_edge_dist)
if (repaired_trench_edges_y.shape[0]//2 > len(orientations)) and drop_first_row:
repaired_trench_edges_y = repaired_trench_edges_y[2:]
if (repaired_trench_edges_y.shape[0]//2 > len(orientations)) and drop_last_row:
repaired_trench_edges_y = repaired_trench_edges_y[:-2]
grouped_edges = repaired_trench_edges_y.reshape(-1,2) # or,2
y_ends = []
for edges,orientation in enumerate(orientations):
y_ends.append(grouped_edges[edges,orientation])
y_ends = np.array(y_ends)
y_ends_list.append(y_ends)
return y_ends_list
def get_y_drift(self,y_ends_list):
"""Given a list of midpoints, computes the average drift in y for every
timepoint.
Args:
y_midpoints_list (list): A list containing, for each fov, a list of the form [time_list,[midpoint_array]]
containing the trench row midpoints.
Returns:
list: A nested list of the form [time_list,[y_drift_int]] for fov i.
"""
y_drift = []
for t in range(len(y_ends_list)-1):
diff_mat = np.subtract.outer(y_ends_list[t+1],y_ends_list[t])
if len(diff_mat) > 0:
min_dist_idx = np.argmin(abs(diff_mat),axis=0)
min_dists = []
for row in range(diff_mat.shape[0]):
min_dists.append(diff_mat[row,min_dist_idx[row]])
min_dists = np.array(min_dists)
median_translation = np.median(min_dists)
else:
median_translation = 0
y_drift.append(median_translation)
net_y_drift = np.append(np.array([0]),np.add.accumulate(y_drift)).astype(int)
return net_y_drift
def keep_in_frame_kernels(self,y_ends_list,y_drift,orientations,padding_y,trench_len_y):
"""Removes those kernels which drift out of the image during any timepoint.
Args:
trench_edges_y_lists (list): A list containing, for each fov, a time-ordered list of trench edge arrays.
y_drift_list (list): A list containing, for each fov, a nested list of the form [time_list,[y_drift_int]].
imported_array_list (int): A numpy array containing the hdf5 file image data.
padding_y (int): Y-dimensional padding for cropping.
Returns:
list: Time-ordered list of trench edge arrays, filtered for images which
stay in frame for all timepoints, for fov i.
"""
init_y_ends = y_ends_list[0]
max_y_dim = self.metadata['height']
max_drift,min_drift = np.max(y_drift),np.min(y_drift)
valid_y_ends_list = []
valid_orientations = []
for j,orientation in enumerate(orientations):
y_end = init_y_ends[j]
if orientation == 0:
bottom_edge = y_end+trench_len_y+max_drift
top_edge = y_end-padding_y+min_drift
edge_under_max = bottom_edge<max_y_dim
edge_over_min = top_edge >= 0
else:
bottom_edge = y_end+padding_y+max_drift
top_edge = y_end-trench_len_y+min_drift
edge_under_max = bottom_edge<max_y_dim
edge_over_min = top_edge >= 0
edge_in_bounds = edge_under_max*edge_over_min
if edge_in_bounds:
valid_y_ends_list.append([y_end[j] for y_end in y_ends_list])
valid_orientations.append(orientation)
valid_y_ends = np.array(valid_y_ends_list).T # t,edge
return valid_y_ends,valid_orientations
def get_ends_and_orientations(self,fov_idx,edges_futures,expected_num_rows,alternate_orientation,top_orientation,orientation_on_fail,y_min_edge_dist,padding_y,trench_len_y):
fovdf = self.meta_handle.read_df("global",read_metadata=False)
# fovdf = fovdf.loc[(slice(None), slice(self.t_range[0],self.t_range[1])),:]
working_fovdf = fovdf.loc[fov_idx]
trench_edges_y_list = []
start_above_list = []
end_above_list = []
for j,file_idx in enumerate(working_fovdf["File Index"].unique().tolist()):
working_filedf = working_fovdf[working_fovdf["File Index"]==file_idx]
img_indices = working_filedf["Image Index"].unique()
first_idx,last_idx = (img_indices[0],img_indices[-1])
trench_edges_y_list += edges_futures[j][0][first_idx:last_idx+1]
start_above_list += edges_futures[j][1][first_idx:last_idx+1]
end_above_list += edges_futures[j][2][first_idx:last_idx+1]
orientations,drop_first_row,drop_last_row = self.get_manual_orientations(trench_edges_y_list,start_above_list,end_above_list,expected_num_rows,alternate_orientation,top_orientation,orientation_on_fail,y_min_edge_dist)
y_ends_list = self.get_trench_ends(trench_edges_y_list,start_above_list,end_above_list,orientations,drop_first_row,drop_last_row,y_min_edge_dist)
y_drift = self.get_y_drift(y_ends_list)
valid_y_ends,valid_orientations = self.keep_in_frame_kernels(y_ends_list,y_drift,orientations,padding_y,trench_len_y)
return y_drift,valid_orientations,valid_y_ends
def get_median_y_drift(self,drift_orientation_and_initend_futures):
y_drift_list = [item[0] for item in drift_orientation_and_initend_futures]
median_drift = np.round(np.median(np.array(y_drift_list),axis=0)).astype(int)
return median_drift
def update_y_drift_futures(self,new_y_drift,drift_orientation_and_initend_future):
drift_orientation_and_initend_future = tuple((new_y_drift,drift_orientation_and_initend_future[1],drift_orientation_and_initend_future[2]))
return drift_orientation_and_initend_future
def crop_y(self,file_idx,drift_orientation_and_initend_future,padding_y,trench_len_y):
"""Performs cropping of the images in the y-dimension.
Args:
i (int): Specifies the current fov index.
trench_edges_y_list (list): List containing, for each fov entry, a list of time-sorted edge arrays.
row_num_list (list): List containing The number of trench rows detected in each fov.
imported_array_list (list): A list containing numpy arrays containing the hdf5 file image
data of shape (channel,y,x,t).
padding_y (int): Padding to be used when cropping in the y-dimension.
trench_len_y (int): Length from the end of the tenches to be used when cropping in the
y-dimension.
top_orientation (int, optional): The orientation of the top-most row where 0 corresponds to a trench with
a downward-oriented trench opening and 1 corresponds to a trench with an upward-oriented trench opening.
Returns:
array: A y-cropped array of shape (rows,channels,x,y,t).
"""
fovdf = self.meta_handle.read_df("global",read_metadata=False)
# fovdf = fovdf.loc[(slice(None), slice(self.t_range[0],self.t_range[1])),:]
filedf = fovdf.reset_index(inplace=False)
filedf = filedf.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
filedf = filedf.sort_index()
working_filedf = filedf.loc[file_idx]
timepoint_indices = working_filedf["timepoints"].unique().tolist()
image_indices = working_filedf.index.get_level_values("Image Index").unique().tolist()
# first_idx,last_idx = (timepoint_indices[0]-self.t_range[0],timepoint_indices[-1]-self.t_range[0])
first_idx,last_idx = (timepoint_indices[0],timepoint_indices[-1])
y_drift = drift_orientation_and_initend_future[0][first_idx:last_idx+1]
valid_orientations,valid_y_ends = drift_orientation_and_initend_future[1:]
drift_corrected_edges = np.add.outer(y_drift,valid_y_ends[0])
channel_arr_list = []
for c,channel in enumerate(self.all_channels):
with h5py_cache.File(self.hdf5path+"/hdf5_"+str(file_idx)+".hdf5","r",chunk_cache_mem_size=self.metadata["chunk_cache_mem_size"]) as imported_hdf5_handle:
img_arr = imported_hdf5_handle[channel][image_indices[0]:image_indices[-1]+1]
time_list = []
lane_y_coords_list = []
for t in range(img_arr.shape[0]):
trench_ends_y = drift_corrected_edges[t]
row_list = []
lane_y_coords = []
for r,orientation in enumerate(valid_orientations):
trench_end = trench_ends_y[r]
if orientation == 0:
upper = max(trench_end-padding_y,0)
lower = min(trench_end+trench_len_y,img_arr.shape[1])
else:
upper = max(trench_end-trench_len_y,0)
lower = min(trench_end+padding_y,img_arr.shape[1])
lane_y_coords.append(upper)
output_array = img_arr[t,upper:lower,:]
row_list.append(output_array)
time_list.append(row_list)
lane_y_coords_list.append(lane_y_coords)
cropped_in_y = np.array(time_list) # t x row x y x x
if len(cropped_in_y.shape) != 4:
print("Error in crop_y")
raise
else:
channel_arr_list.append(cropped_in_y)
return channel_arr_list,lane_y_coords_list
def get_smoothed_x_percentiles(self,file_idx,drift_orientation_and_initend_future,padding_y,trench_len_y,x_percentile,background_kernel_x,smoothing_kernel_x):
"""Summary.
Args:
array_tuple (tuple): A singleton tuple containing the y-cropped hdf5 array of shape (rows,x,y,t).
background_kernel_x (tuple): Two-entry tuple specifying a kernel size for performing background subtraction
on xt signal when cropping in the x-dimension. Dim_1 (time) should be set to 1.
smoothing_kernel_x (tuple): Two-entry tuple specifying a kernel size for performing smoothing
on xt signal when cropping in the x-dimension. Dim_1 (time) should be set to 1.
Returns:
array: A smoothed and background subtracted percentile array of shape (rows,x,t)
"""
channel_arr_list,_ = self.crop_y(file_idx,drift_orientation_and_initend_future,padding_y,trench_len_y)
cropped_in_y = channel_arr_list[0]
if self.invert:
cropped_in_y = sk.util.invert(cropped_in_y)
# cropped_in_y = y_crop_future[0][0] # t x row x y x x # (24, 1, 330, 2048)
x_percentiles_smoothed = []
for row_num in range(cropped_in_y.shape[1]):
cropped_in_y_seg = cropped_in_y[:,row_num] # t x y x x
x_percentiles = np.percentile(cropped_in_y_seg,x_percentile,axis=1) # t x x
x_background_filtered = x_percentiles - self.median_filter_2d(x_percentiles,background_kernel_x)
x_smooth_filtered = self.median_filter_2d(x_background_filtered,smoothing_kernel_x)
x_smooth_filtered[x_smooth_filtered<0.] = 0.
x_percentiles_smoothed.append(x_smooth_filtered)
x_percentiles_smoothed=np.array(x_percentiles_smoothed) # row x t x x
return x_percentiles_smoothed
def get_midpoints_from_mask(self,mask):
"""Using a boolean x mask, computes the positions of trench midpoints.
Args:
mask (array): x boolean array, specifying where trenches are present.
Returns:
array: array of trench midpoint x positions.
"""
transitions = mask[:-1].astype(int) - mask[1:].astype(int)
trans_up = np.where((transitions==-1))[0]
trans_dn = np.where((transitions==1))[0]
if len(np.where(trans_dn>trans_up[0])[0])>0:
first_dn = np.where(trans_dn>trans_up[0])[0][0]
trans_dn = trans_dn[first_dn:]
trans_up = trans_up[:len(trans_dn)]
midpoints = (trans_dn + trans_up)//2
else:
midpoints = []
return midpoints
def get_x_row_midpoints(self,x_percentiles_t,otsu_scaling,min_threshold):
"""Given an array of signal in x, determines the position of trench
midpoints.
Args:
x_percentiles_t (array): array of trench intensities in x, at time t.
otsu_nbins (int): Number of bins to use when applying Otsu's method to x-dimension signal.
otsu_scaling (float): Threshold scaling factor for Otsu's method thresholding.
Returns:
array: array of trench midpoint x positions.
"""
otsu_threshold = sk.filters.threshold_otsu(x_percentiles_t[:,np.newaxis],nbins=50)*otsu_scaling
modified_otsu_threshold = max(otsu_threshold,min_threshold)
x_mask = x_percentiles_t>modified_otsu_threshold
midpoints = self.get_midpoints_from_mask(x_mask)
return midpoints
def get_x_midpoints(self,x_percentiles_smoothed,otsu_scaling,min_threshold):
"""Given an x percentile array of shape (rows,t,x), determines the
trench midpoints of each row array at each time t.
Args:
x_percentiles_smoothed_array (array): A smoothed and background subtracted percentile array of shape (rows,x,t)
otsu_nbins (int): Number of bins to use when applying Otsu's method to x-dimension signal.
otsu_scaling (float): Threshold scaling factor for Otsu's method thresholding.
Returns:
list: A nested list of the form [row_list,[time_list,[midpoint_array]]].
"""
all_midpoints_list = []
for row in range(x_percentiles_smoothed.shape[0]):
row_x_percentiles = x_percentiles_smoothed[row]
all_midpoints = []
midpoints = self.get_x_row_midpoints(row_x_percentiles[0],otsu_scaling,min_threshold)
if len(midpoints) == 0:
return None
all_midpoints.append(midpoints)
for t in range(1,row_x_percentiles.shape[0]):
midpoints = self.get_x_row_midpoints(row_x_percentiles[t],otsu_scaling,min_threshold)
if len(midpoints)/(len(all_midpoints[-1])+1) < 0.5:
all_midpoints.append(all_midpoints[-1])
else:
all_midpoints.append(midpoints)
all_midpoints_list.append(all_midpoints)
return all_midpoints_list
def compile_midpoint_futures(self,midpoint_futures):
num_rows = len(midpoint_futures[0])
all_midpoints_list = []
for row in range(num_rows):
row_midpoints_list = []
for midpoint_future in midpoint_futures:
row_midpoints_list += midpoint_future[row]
all_midpoints_list.append(row_midpoints_list)
return all_midpoints_list
def get_x_drift(self,midpoint_futures):
"""Given a list of midpoints, computes the average drift in x for every
timepoint.
Args:
all_midpoints_list (list): A nested list of the form [row_list,[time_list,[midpoint_array]]] containing
the trench midpoints.
Returns:
list: A nested list of the form [row_list,[time_list,[x_drift_int]]].
"""
all_midpoints_list = self.compile_midpoint_futures(midpoint_futures)
x_drift_list = []
for all_midpoints in all_midpoints_list:
x_drift = []
for t in range(len(all_midpoints)-1):
diff_mat = np.subtract.outer(all_midpoints[t+1],all_midpoints[t])
min_dist_idx = np.argmin(abs(diff_mat),axis=0)
min_dists = diff_mat[min_dist_idx]
median_translation = int(np.median(min_dists))
x_drift.append(median_translation)
net_x_drift = np.append(np.array([0]),np.add.accumulate(x_drift))
x_drift_list.append(net_x_drift)
return x_drift_list
def get_median_x_drift(self,x_drift_futures):
uppacked_x_drift_futures = [row for fov in x_drift_futures for row in fov]
median_drift = np.round(np.median(np.array(uppacked_x_drift_futures),axis=0)).astype(int)
return median_drift
def update_x_drift_futures(self,new_x_drift,x_drift_future):
x_drift_future = [copy.copy(new_x_drift) for row in x_drift_future]
return x_drift_future
def filter_midpoints(self,all_midpoints,x_drift,trench_width_x,trench_present_thr):
drift_corrected_midpoints = []
for t in range(len(x_drift)):
drift_corrected_t = all_midpoints[t]-x_drift[t]
drift_corrected_midpoints.append(drift_corrected_t)
midpoints_up,midpoints_dn = (all_midpoints[0]-trench_width_x//2,\
all_midpoints[0]+trench_width_x//2+1)
trench_present_t = []
for t in range(len(drift_corrected_midpoints)):
above_mask = np.greater.outer(drift_corrected_midpoints[t],midpoints_up)
below_mask = np.less.outer(drift_corrected_midpoints[t],midpoints_dn)
in_bound_mask = (above_mask*below_mask)
trench_present = np.any(in_bound_mask,axis=0)
trench_present_t.append(trench_present)
trench_present_t = np.array(trench_present_t)
trench_present_perc = np.sum(trench_present_t,axis=0)/trench_present_t.shape[0]
presence_filter_mask = trench_present_perc>=trench_present_thr
midpoint_seeds = all_midpoints[0][presence_filter_mask]
return midpoint_seeds
def get_in_bounds(self,all_midpoints,x_drift,trench_width_x,trench_present_thr):
"""Produces and writes a trench mask of shape (y_dim,t_dim,x_dim). This
will be used to mask out trenches from the reshaped "cropped_in_y"
array at a later step.
Args:
cropped_in_y (array): A y-cropped hdf5 array of shape (rows,y,x,t) containing y-cropped image data.
all_midpoints (list): A list containing, for each time t, an array of trench midpoints.
x_drift (list): A list containing, for each time t, an int corresponding to the drift of the midpoints in x.
trench_width_x (int): Width to be used when cropping in the x-dimension.
Returns:
h5py.File: Hdf5 file handle corresponding to the trench mask hdf5 dataset
"data" of shape (y_dim,t_dim,x_dim).
int: Total number of trenches detected in the image.
"""
midpoint_seeds = self.filter_midpoints(all_midpoints,x_drift,trench_width_x,trench_present_thr)
corrected_midpoints = x_drift[:,np.newaxis]+midpoint_seeds[np.newaxis,:]
midpoints_up,midpoints_dn = (corrected_midpoints-trench_width_x//2,\
corrected_midpoints+trench_width_x//2+1)
stays_in_frame = np.all(midpoints_up>=0,axis=0)*np.all(midpoints_dn<=self.metadata["width"],axis=0) #filters out midpoints that stay in the frame for the whole time...
# no_overlap = np.append(np.array([True]),(corrected_midpoints[0,1:]-corrected_midpoints[0,:-1])>=(trench_width_x+1)) #corrects for overlap
# if np.sum(no_overlap)/len(no_overlap)<0.9:
# print("Trench overlap issue!!!")
# valid_mask = stays_in_frame*no_overlap
in_bounds = np.array([midpoints_up[:,stays_in_frame],\
midpoints_dn[:,stays_in_frame]])
k_tot = in_bounds.shape[2]
x_coords = in_bounds[0].T
return in_bounds,x_coords,k_tot
def get_all_in_bounds(self,midpoint_futures,x_drift_future,trench_width_x,trench_present_thr):
"""Generates complete kymograph arrays for all trenches in the fov in
every channel listed in 'self.all_channels'. Writes hdf5 files
containing datasets of shape (trench_num,y_dim,x_dim,t_dim) for each
row,channel combination. Dataset keys follow the convention.
["[row_number]/[channel_name]"].
Args:
cropped_in_y_handle (h5py.File): Hdf5 file handle corresponding to the y-cropped hdf5 dataset
"data" of shape (rows,channels,x,y,t).
all_midpoints_list (list): A nested list of the form [row_list,[time_list,[midpoint_array]]] containing
the trench midpoints.
x_drift_list (list): A nested list of the form [row_list,[time_list,[x_drift_int]]] containing the computed
drift in the x dimension.
trench_width_x (int): Width to be used when cropping in the x-dimension.
"""
all_midpoints_list = self.compile_midpoint_futures(midpoint_futures)
in_bounds_list = []
x_coords_list = []
k_tot_list = []
for row_num,all_midpoints in enumerate(all_midpoints_list):
x_drift = x_drift_future[row_num]
in_bounds,x_coords,k_tot = self.get_in_bounds(all_midpoints,x_drift,trench_width_x,trench_present_thr)
in_bounds_list.append(in_bounds)
x_coords_list.append(x_coords)
k_tot_list.append(k_tot)
return in_bounds_list,x_coords_list,k_tot_list
def crop_with_bounds(self,output_kymograph,cropped_in_y_list,working_in_bounds,k_tot,row_num):
"""Generates and writes kymographs of a single row from the already
y-cropped image data, using a pregenerated kymograph mask of shape
(y_dim,t_dim,x_dim).
Args:
cropped_in_y_handle (h5py.File): Hdf5 file handle corresponding to the y-cropped hdf5 dataset
"data" of shape (rows,channels,x,y,t).
k_mask_handle (h5py.File): Hdf5 file handle corresponding to the trench mask hdf5 dataset
"data" of shape (y_dim,t_dim,x_dim).
row_num (int): The row number to crop kymographs from.
k_tot (int): Int specifying the total number of detected trenches in the fov.
"""
for c,channel in enumerate(self.all_channels):
dataset_name = str(row_num) + "/" + str(channel)
cropped_in_y = cropped_in_y_list[c][:,row_num] # t,y,x
k_len,t_len,y_len,x_len = (working_in_bounds.shape[2],working_in_bounds.shape[1],cropped_in_y.shape[1],working_in_bounds[1,0,0]-working_in_bounds[0,0,0])
kymo_out = np.zeros((k_len,t_len,y_len,x_len),dtype="uint16")
for t in range(working_in_bounds.shape[1]):
for k in range(working_in_bounds.shape[2]):
bounds = working_in_bounds[:,t,k]
kymo_out[k,t] = cropped_in_y[t,:,bounds[0]:bounds[1]]
# kymo_out = self.apply_kymo_mask(kymo_mask,cropped_in_y,k_tot) # k x t x y x x
hdf5_dataset = output_kymograph.create_dataset(dataset_name,data=kymo_out,chunks=self.output_chunk_shape, dtype='uint16')
def crop_x(self,file_idx,drift_orientation_and_initend_future,in_bounds_future,padding_y,trench_len_y):
"""Generates complete kymograph arrays for all trenches in the fov in
every channel listed in 'self.all_channels'. Writes hdf5 files
containing datasets of shape (trench_num,y_dim,x_dim,t_dim) for each
row,channel combination. Dataset keys follow the convention.
["[row_number]/[channel_name]"].
Args:
cropped_in_y_handle (h5py.File): Hdf5 file handle corresponding to the y-cropped hdf5 dataset
"data" of shape (rows,channels,x,y,t).
all_midpoints_list (list): A nested list of the form [row_list,[time_list,[midpoint_array]]] containing
the trench midpoints.
x_drift_list (list): A nested list of the form [row_list,[time_list,[x_drift_int]]] containing the computed
drift in the x dimension.
trench_width_x (int): Width to be used when cropping in the x-dimension.
"""
fovdf = self.meta_handle.read_df("global",read_metadata=False)
# fovdf = fovdf.loc[(slice(None), slice(self.t_range[0],self.t_range[1])),:]
filedf = fovdf.reset_index(inplace=False)
filedf = filedf.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
filedf = filedf.sort_index()
working_filedf = filedf.loc[file_idx]
timepoint_indices = working_filedf["timepoints"].unique().tolist()
image_indices = working_filedf.index.get_level_values("Image Index").unique().tolist()
# first_idx,last_idx = (timepoint_indices[0]-self.t_range[0],timepoint_indices[-1]-self.t_range[0]) #CHANGED
first_idx,last_idx = (timepoint_indices[0],timepoint_indices[-1]) #CHANGED
channel_arr_list,lane_y_coords_list = self.crop_y(file_idx,drift_orientation_and_initend_future,padding_y,trench_len_y)
num_rows = channel_arr_list[0].shape[1]
in_bounds_list,x_coords_list,k_tot_list = in_bounds_future
# counting_arr = self.init_counting_arr(self.metadata["width"])
with h5py_cache.File(self.kymographpath+"/kymograph_processed_"+str(file_idx)+".hdf5","w",chunk_cache_mem_size=self.output_chunk_cache_mem_size) as output_kymograph:
for row_num in range(num_rows):
in_bounds,k_tot = (in_bounds_list[row_num],k_tot_list[row_num])
working_in_bounds = in_bounds[:,first_idx:last_idx+1]
# kymo_mask = self.get_trench_mask(in_bounds[:,first_idx:last_idx+1],counting_arr)
self.crop_with_bounds(output_kymograph,channel_arr_list,working_in_bounds,k_tot,row_num)
return lane_y_coords_list
def save_coords(self,fov_idx,x_crop_futures,in_bounds_future,drift_orientation_and_initend_future):
fovdf = self.meta_handle.read_df("global",read_metadata=False)
# fovdf = fovdf.loc[(slice(None), slice(self.t_range[0],self.t_range[1])),:]
fovdf = fovdf.loc[fov_idx]
x_coords_list = in_bounds_future[1]
orientations = drift_orientation_and_initend_future[1]
y_coords_list = []
for j,file_idx in enumerate(fovdf["File Index"].unique().tolist()):
working_filedf = fovdf[fovdf["File Index"]==file_idx]
img_indices = working_filedf["Image Index"].unique()
y_coords_list += x_crop_futures[j] # t x row list
pixel_microns = self.metadata['pixel_microns']
y_coords = np.array(y_coords_list) # t x row array
scaled_y_coords = y_coords*pixel_microns
t_len = scaled_y_coords.shape[0]
fs = np.repeat([fov_idx],t_len)
orit_dict = {0:"top",1:"bottom"}
tpts = np.array(range(t_len))
missing_metadata = ('x' not in fovdf.columns)
if not missing_metadata:
global_x,global_y,ts,file_indices,img_indices = (fovdf["x"].values,fovdf["y"].values,fovdf["t"].values,fovdf["File Index"].values,fovdf["Image Index"].values)
else:
file_indices,img_indices = (fovdf["File Index"].values,fovdf["Image Index"].values)
pd_output = []
for l,x_coord in enumerate(x_coords_list):
scaled_x_coord = x_coord*pixel_microns
yt = scaled_y_coords[:,l]
orit = np.repeat([orit_dict[orientations[l]]],t_len)
if not missing_metadata:
global_yt = yt+global_y
ls = np.repeat([l],t_len)
for k in range(scaled_x_coord.shape[0]):
xt = scaled_x_coord[k]
if not missing_metadata:
global_xt = xt+global_x
ks = np.repeat([k],t_len)
if not missing_metadata:
pd_output.append(np.array([fs,ls,ks,tpts,file_indices,img_indices,ts,orit,yt,xt,global_yt,global_xt]).T)
else:
pd_output.append(np.array([fs,ls,ks,tpts,file_indices,img_indices,orit,yt,xt]).T)
pd_output = np.concatenate(pd_output,axis=0)
if not missing_metadata:
df = pd.DataFrame(pd_output,columns=["fov","row","trench","timepoints","File Index","Image Index","time (s)","lane orientation","y (local)","x (local)","y (global)","x (global)"])
df = df.astype({"fov":int,"row":int,"trench":int,"timepoints":int,"File Index":int,"Image Index":int,"time (s)":float,"lane orientation":str,"y (local)":float,"x (local)":float,\
"y (global)":float,"x (global)":float})
else:
df = | pd.DataFrame(pd_output,columns=["fov","row","trench","timepoints","File Index","Image Index","lane orientation","y (local)","x (local)"]) | pandas.DataFrame |
import sys
import os.path
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import numpy as np
import pandas as pd
from urllib.parse import quote
import os
from utils.scraping_utils import get_soup_for_url, get_postcode_prefix, identify_postcode, strip_text
from utils.geo import all_region_postcodes
import re
import ast
sleep = .1
def map_to_companies_house_name(company_name, postcode=None,):
if company_name.lower() == "myworkwear":
return "J.M. WORTHINGTON & CO. LIMITED"
return search_companies_house(company_name, postcode, postcode is not None)[0]
def search_companies_house(company_name, postcode, check_postcode=True):
company_name = company_name#.lower()
# replace +
company_name = company_name.replace("+", "plus")
postcode_prefix = get_postcode_prefix(postcode)
print ("searching companies house for company:", company_name,
"using postcode", postcode)
# company_name_url = "+".join(company_name.split(" "))
company_name_url = quote(company_name)
url = f"https://find-and-update.company-information.service.gov.uk/search?q={company_name_url}"
soup, status_code = get_soup_for_url(url, sleep=sleep)
assert status_code == 200
# postcode_prefix = get_postcode_prefix(postcode)
results = soup.find_all("li", {"class": 'type-company'})
if results is None:
return None
for result in results:
anchor = result.find("a", {"title": 'View company'})
link = anchor["href"]
link_company_name = strip_text(anchor.text)
print ("COMPANIES HOUSE NAME", link_company_name)
# check for post code
address = result.find("p", {"class": None}).get_text(strip=True)
if not check_postcode:
return link_company_name, address, link
if address is None or address == "":
continue
result_postcode = identify_postcode(address)
if result_postcode is None:
continue
result_postcode_prefix = get_postcode_prefix(result_postcode)
if result_postcode_prefix is None:
continue
if postcode_prefix == result_postcode_prefix:
print ("POSTCODE MATCH", postcode, result_postcode,
get_postcode_prefix(postcode), get_postcode_prefix(result_postcode))
return link_company_name, address, link
elif result_postcode_prefix in all_region_postcodes["midlands"] \
or result_postcode_prefix in all_region_postcodes["yorkshire"]:
print ("postcode region match", address, link)
return link_company_name, address, link
# raise Exception
return None, None, None
def scrape(link):
# pass
base_url = "https://find-and-update.company-information.service.gov.uk" + link
overview_url = base_url
soup, status_code = get_soup_for_url(overview_url, sleep=sleep)
assert status_code == 200
# company status
status_element = soup.find("dd", {"id": "company-status"})
if status_element is not None:
# remove non alphabetic characters
company_status = re.sub(r"[^A-Za-z]", "", status_element.text.rstrip())
else:
company_status = None
print ("company status", company_status)
# get SIC codes
sic_elements = soup.find_all(id=re.compile('^sic[0-9]+'))
sic_codes = []
for sic_element in sic_elements:
sic_codes.append(strip_text(sic_element.text))
return company_status, sic_codes
def get_companies_house_name_for_members():
'''
THIS SEARCHES COMPAIES HOUSE FOR THE BEST MATCHING NAME
BUT THE RESULTS WILL HAVE TO BE MANUALLY CHECKED
'''
for membership_level in (
"Patron",
"Platinum",
"Gold",
"Silver",
"Bronze",
"Digital",
"Freemium",
):
summary_filename = os.path.join("data_for_graph", "members", f"{membership_level}_members.csv")
member_summaries = pd.read_csv(summary_filename, index_col=0)
member_summaries["companies_house_name"] = member_summaries.apply(
lambda row: map_to_companies_house_name(
company_name=row["member_name"],
# postcode=row["postcode"]
),
axis=1
)
member_summaries.to_csv(summary_filename)
def scrape_companies_house_for_members():
output_dir = os.path.join("data_for_graph", "members")
for membership_level in (
"Patron",
"Platinum",
"Gold",
"Silver",
"Bronze",
"Digital",
"Freemium",
):
filename = f"{membership_level}_members"
companies = pd.read_csv(
os.path.join(output_dir, f"{filename}.csv"),
index_col=0,
)
company_name_col = "companies_house_name"
assert company_name_col in companies.columns
companies = companies.drop_duplicates(company_name_col)
output_filename = os.path.join(output_dir, f"{filename}_companies_house")
output_filename_csv = f"{output_filename}.csv"
if os.path.exists(output_filename_csv):
full_company_info = pd.read_csv(output_filename_csv, index_col=0)
existing_companies = set(full_company_info.index)
else:
full_company_info = pd.DataFrame()
existing_companies = set()
for i, company in companies.iterrows():
company_name = company[company_name_col]
if | pd.isnull(company_name) | pandas.isnull |
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
import sympy as sp
from sympy import Matrix, Symbol
from sympy.utilities.lambdify import lambdify
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
# Seaborn, useful for graphics
import seaborn as sns
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
# Functions
def calc_V_bcc(a):
return a**3
def calc_V_hcp(a,c):
return (np.sqrt(3)/2)*a**2*c
def calc_dV_bcc(a,da):
return 3*a**2*da
def calc_dV_hcp(a,c,da,dc):
return np.sqrt( (np.sqrt(3)*a*c*da)**2 + ((np.sqrt(3)/2)*a**2*dc)**2 )
# Numeric Vinet EOS, used for everything except calculating dP
def VinetEOS(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * np.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Symbolic Vinet EOS, needed to calculate dP
def VinetEOS_sym(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * sp.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Create a covariance matrix from EOS_df with V0, K0, and K0prime; used to get dP
def getCov3(EOS_df, phase):
dV0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dV0'])
dK0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dK0'])
dKprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dKprime0'])
V0K0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0K0 corr'])
V0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0Kprime0 corr'])
K0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['K0Kprime0 corr'])
corr_matrix = np.eye(3)
corr_matrix[0,1] = V0K0_corr
corr_matrix[1,0] = V0K0_corr
corr_matrix[0,2] = V0Kprime0_corr
corr_matrix[2,0] = V0Kprime0_corr
corr_matrix[1,2] = K0Kprime0_corr
corr_matrix[2,1] = K0Kprime0_corr
sigmas = np.array([[dV0,dK0,dKprime0]])
cov = (sigmas.T@sigmas)*corr_matrix
return cov
# Create a covariance matrix with V, V0, K0, and K0prime; used to get dP
def getVinetCov(dV, EOS_df, phase):
cov3 = getCov3(EOS_df, phase)
cov = np.eye(4)
cov[1:4,1:4] = cov3
cov[0,0] = dV**2
return cov
def calc_dP_VinetEOS(V, dV, EOS_df, phase):
# Create function for Jacobian of Vinet EOS
a,b,c,d = Symbol('a'),Symbol('b'),Symbol('c'),Symbol('d') # Symbolic variables V, V0, K0, K'0
Vinet_matrix = Matrix([VinetEOS_sym(a,b,c,d)]) # Create a symbolic Vinet EOS matrix
param_matrix = Matrix([a,b,c,d]) # Create a matrix of symbolic variables
# Symbolically take the Jacobian of the Vinet EOS and turn into a column matrix
J_sym = Vinet_matrix.jacobian(param_matrix).T
# Create a numpy function for the above expression
# (easier to work with numerically)
J_Vinet = lambdify((a,b,c,d), J_sym, 'numpy')
J = J_Vinet(V,*getEOSparams(EOS_df, phase)) # Calculate Jacobian
cov = getVinetCov(dV, EOS_df, phase) # Calculate covariance matrix
dP = (J.T@cov@J).item() # Calculate uncertainty and convert to a scalar
return dP
def getEOSparams(EOS_df, phase):
V0 = np.float(EOS_df[EOS_df['Phase'] == phase]['V0'])
K0 = np.float(EOS_df[EOS_df['Phase'] == phase]['K0'])
Kprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['Kprime0'])
return V0, K0, Kprime0
def calc_rho(V,dV,M):
# Convert from cubic angstroms to cm^3/mol
V_ccpermol = (V/2)*constants.N_A/(10**24)
rho = M/V_ccpermol
drho = (M*2*10**24/constants.N_A)*(dV/(V**2))
return rho, drho
# Import EOS information
EOS_df = pd.read_csv('FeAlloyEOS.csv')
# Find the filepath of all .xy XRD pattern files
patternfilepath_list = [filepath for filepath in glob.glob('*/*/*.xy')]
allresults_df = pd.DataFrame()
bccFe_df = pd.DataFrame()
bccFeNi_df = pd.DataFrame()
bccFeNiSi_df = | pd.DataFrame() | pandas.DataFrame |
from matplotlib.dates import DateFormatter, WeekdayLocator, \
DayLocator, MONDAY
import pandas as pd
import numpy as np
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
#from matplotlib.finance import candlestick_ohlc
from mpl_finance import candlestick_ochl as candlestick
from utilities import log
def pandas_candlestick_ohlc(dataframe, stick="day", otherseries=None):
"""
:param dataframe: pandas DataFrame object with datetime64 index, and float columns "Open", "High", "Low", and "Close", likely created via DataReader from "yahoo"
:param stick: A string or number indicating the period of time covered by a single candlestick. Valid string inputs include "day", "week", "month", and "year", ("day" default), and any numeric input indicates the number of trading days included in a period
:param otherseries: An iterable that will be coerced into a list, containing the columns of dataframe that hold other series to be plotted as lines
This will show a Japanese candlestick plot for stock data stored in dataframe, also plotting other series if passed.
"""
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
dayFormatter = DateFormatter('%d') # e.g., 12
# Create a new DataFrame which includes OHLC data for each period specified by stick input
transdat = dataframe.loc[:, ["Open", "High", "Low", "Close"]]
if (type(stick) == str):
if stick == "day":
plotdat = transdat
stick = 1 # Used for plotting
elif stick in ["week", "month", "year"]:
if stick == "week":
transdat["week"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[1]) # Identify weeks
elif stick == "month":
transdat["month"] = pd.to_datetime(transdat.index).map(lambda x: x.month) # Identify months
transdat["year"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[0]) # Identify years
grouped = transdat.groupby(list(set(["year", stick]))) # Group by year and other appropriate variable
plotdat = pd.DataFrame({"Open": [], "High": [], "Low": [],
"Close": []}) # Create empty data frame containing what will be plotted
for name, group in grouped:
plotdat = plotdat.append(pd.DataFrame({"Open": group.iloc[0, 0],
"High": max(group.High),
"Low": min(group.Low),
"Close": group.iloc[-1, 3]},
index=[group.index[0]]))
if stick == "week":
stick = 5
elif stick == "month":
stick = 30
elif stick == "year":
stick = 365
elif (type(stick) == int and stick >= 1):
transdat["stick"] = [np.floor(i / stick) for i in range(len(transdat.index))]
grouped = transdat.groupby("stick")
plotdat = pd.DataFrame(
{"Open": [], "High": [], "Low": [], "Close": []}) # Create empty data frame containing what will be plotted
for name, group in grouped:
plotdat = plotdat.append(pd.DataFrame({"Open": group.iloc[0, 0],
"High": max(group.High),
"Low": min(group.Low),
"Close": group.iloc[-1, 3]},
index=[group.index[0]]))
else:
raise ValueError(
'Valid inputs to argument "stick" include the strings "day", "week", "month", "year", or a positive integer')
# Set plot parameters, including the axis object ax used for plotting
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
if plotdat.index[-1] - plotdat.index[0] < | pd.Timedelta('730 days') | pandas.Timedelta |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/11/4 17:39
describe: A股强势股票传感器
"""
import os
import os.path
import traceback
import inspect
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from collections import Counter
from tqdm import tqdm
from typing import Callable
from czsc.objects import Event
from czsc.utils import io
from czsc.data.ts_cache import TsDataCache, Freq
from czsc.sensors.utils import get_index_beta, generate_signals, max_draw_down, turn_over_rate
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def selected_filter_by_index(dc: TsDataCache, dfg: pd.DataFrame, index_code=None):
"""使用指数成分过滤
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:param index_code: 指数代码
:return: 过滤后的选股结果
"""
if not index_code or dfg.empty:
return dfg
assert dfg['trade_date'].nunique() == 1
trade_date = dfg['trade_date'].max()
index_members = dc.index_weight(index_code, trade_date)
ts_codes = list(index_members['con_code'].unique())
return dfg[dfg.ts_code.isin(ts_codes)]
def selected_filter_by_concepts(dc, dfg, top_n=20, min_n=3, method='v1'):
"""使用板块效应过滤
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:param top_n: 选取前 n 个密集概念
:param min_n: 单股票至少要有 n 个概念在 top_n 中
:param method: 打分计算方法
v1 直接取板块中的强势股数量作为分数
v2 板块内强势股数 / 板块内股数
:return: 过滤后的选股结果
"""
if dfg.empty or not top_n or not min_n:
return dfg, []
ths_members = dc.get_all_ths_members(exchange="A", type_="N")
ths_members = ths_members[~ths_members['概念名称'].isin([
'MSCI概念', '沪股通', '深股通', '融资融券', '上证180成份股', '央企国资改革',
'标普道琼斯A股', '中证500成份股', '上证380成份股', '沪深300样本股',
])]
ths_concepts = ths_members[ths_members.code.isin(dfg.ts_code)]
if method == 'v1':
key_concepts = [k for k, v in Counter(ths_concepts['概念名称'].to_list()).most_common(top_n)]
elif method == 'v2':
all_count = Counter(ths_members['概念名称'].to_list())
sel_count = Counter(ths_concepts['概念名称'].to_list())
df_scores = pd.DataFrame([{"concept": k, 'score': sel_count[k] / all_count[k]}
for k in sel_count.keys()])
key_concepts = df_scores.sort_values('score', ascending=False).head(top_n)['concept'].to_list()
else:
raise ValueError(f"method value error")
sel = ths_concepts[ths_concepts['概念名称'].isin(key_concepts)]
ts_codes = [k for k, v in Counter(sel.code).most_common() if v >= min_n]
dfg = dfg[dfg.ts_code.isin(ts_codes)]
dfg.loc[:, '概念板块'] = dfg.ts_code.apply(lambda x: ths_concepts[ths_concepts.code == x]['概念名称'].to_list())
dfg.loc[:, '概念数量'] = dfg['概念板块'].apply(len)
return dfg, key_concepts
def selected_filter_by_market_value(dfg, min_total_mv=None):
"""使用总市值过滤
:param dfg: 单个交易日的强势股选股结果
:param min_total_mv: 最小总市值,单位为万元,1e6万元 = 100亿
:return: 过滤后的选股结果
"""
if dfg.empty or not min_total_mv:
return dfg
return dfg[dfg['total_mv'] >= min_total_mv]
def selected_filter_by_rps(dfg, n=21, v_range=(0.2, 0.8), max_count=-1):
"""使用b20b过滤,b20b 表示前20个交易日的涨跌幅
:param dfg: 单个交易日的强势股选股结果
:param n: RPS的计算区间
:param v_range: RPS值按从大到小排序后的可选区间
默认为 0.2 ~ 0.8,表示可选区间为排序位置在 20% ~ 80% 区间的股票
:param max_count: 最多保留结果数量
:return: 过滤后的选股结果
"""
if dfg.empty or (not max_count) or len(dfg) < max_count:
return dfg
rps_col = f"b{n}b"
# dfg = dfg.sort_values(rps_col, ascending=True)
# dfg = dfg.reset_index(drop=True)
# dfg = dfg.iloc[int(len(dfg) * v_range[0]): int(len(dfg) * v_range[1])]
# return dfg.tail(max_count)
split = v_range[1]
dfg = dfg.sort_values(rps_col, ascending=True)
head_i = int((len(dfg) - max_count) * split) + 1
tail_i = len(dfg) - int((len(dfg) - max_count) * (1 - split))
return dfg.iloc[head_i: tail_i]
def create_next_positions(dc: TsDataCache, dfg: pd.DataFrame):
"""构建某天选股结果对应的下一交易日持仓明细
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:return: 下一交易日持仓明细
"""
if dfg.empty:
return dfg
trade_cal = dc.trade_cal()
trade_cal = trade_cal[trade_cal.is_open == 1]
trade_dates = trade_cal.cal_date.to_list()
trade_date = dfg['trade_date'].iloc[0]
hold = dfg.copy()
hold['成分日期'] = trade_dates[trade_dates.index(trade_date.strftime("%Y%m%d")) + 1]
hold['持仓权重'] = 0.98 / len(dfg)
hold.rename({'ts_code': "证券代码", "close": "交易价格"}, inplace=True, axis=1)
hold = hold[['证券代码', '持仓权重', '交易价格', '成分日期']]
hold['成分日期'] = pd.to_datetime(hold['成分日期']).apply(lambda x: x.strftime("%Y/%m/%d"))
return hold
def plot_alpha_v1(beta_name, df_alpha, file_png) -> None:
"""用三个并列线图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
plt.close()
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(9, 5*3))
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
for i, col in enumerate(['alpha_curve', 'selector_curve', f"beta_curve:{beta_name}"], 0):
ax = axes[i]
sns.lineplot(x='date', y=col, data=df_alpha, ax=ax)
ax.text(x=df_alpha['date'].iloc[0], y=int(df_alpha[col].mean()),
s=f"{col}:{int(df_alpha[col].iloc[-1])}", fontsize=12)
ax.set_title(f"{col}", loc='center')
ax.set_xlabel("")
plt.savefig(file_png, bbox_inches='tight', dpi=100)
plt.close()
def plot_alpha_v2(beta_name, df_alpha, file_png) -> None:
"""用线图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
plt.close()
plt.figure(figsize=(9, 5))
sns.lineplot(x='date', y='alpha_curve', data=df_alpha)
sns.lineplot(x='date', y='selector_curve', data=df_alpha)
sns.lineplot(x='date', y=f"beta_curve:{beta_name}", data=df_alpha)
plt.legend(labels=['超额', '选股', f"基准{beta_name}"])
plt.savefig(file_png, bbox_inches='tight', dpi=100)
def plot_alpha_v3(beta_name, df_alpha, file_png) -> None:
"""用类似MACD图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha'] = df_alpha['selector'] - df_alpha['beta']
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
plt.close()
plt.figure(figsize=(9, 5))
x = df_alpha['date']
plt.bar(x, height=df_alpha['alpha'], width=0.01, color='blue', label='alpha')
plt.plot(x, df_alpha['alpha_curve'], label='alpha_curve')
plt.plot(x, df_alpha['selector_curve'], label='selector_curve')
plt.plot(x, df_alpha[f"beta_curve:{beta_name}"], label=f"beta_curve:{beta_name}")
plt.legend()
plt.savefig(file_png, bbox_inches='tight', dpi=100)
class StocksDaySensor:
"""以日线为基础周期的强势股票感应器
输入:市场个股全部行情、概念板块成分信息
输出:强势个股列表以及概念板块分布
"""
def __init__(self,
experiment_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
strategy: Callable,
signals_n: int = 0,
):
self.name = self.__class__.__name__
self.version = "V20220404"
self.experiment_path = experiment_path
self.results_path = os.path.join(experiment_path, f"{strategy()[1]().name}_{sdt}_{edt}")
self.signals_path = os.path.join(experiment_path, 'signals')
os.makedirs(self.experiment_path, exist_ok=True)
os.makedirs(self.results_path, exist_ok=True)
os.makedirs(self.signals_path, exist_ok=True)
self.sdt = sdt
self.edt = edt
self.verbose = os.environ.get('verbose', False)
self.strategy = strategy
self.signals_n = signals_n
self.get_signals, self.get_event = strategy()
self.event: Event = self.get_event()
self.base_freq = Freq.D.value
self.freqs = [Freq.W.value, Freq.M.value]
self.file_docx = os.path.join(self.results_path, f'{self.event.name}_{sdt}_{edt}.docx')
writer = WordWriter(self.file_docx)
if not os.path.exists(self.file_docx):
writer.add_title("股票选股强度验证")
writer.add_page_break()
writer.add_heading(f"{datetime.now().strftime('%Y-%m-%d %H:%M')} {self.event.name}", level=1)
writer.add_heading("参数配置", level=2)
writer.add_paragraph(f"测试方法描述:{self.event.name}")
writer.add_paragraph(f"测试起止日期:{sdt} ~ {edt}")
writer.add_paragraph(f"信号计算函数:\n{inspect.getsource(self.get_signals)}")
writer.add_paragraph(f"事件具体描述:\n{inspect.getsource(self.get_event)}")
writer.save()
with open(os.path.join(self.results_path, f"{strategy.__name__}.txt"), mode='w') as f:
f.write(inspect.getsource(strategy))
self.writer = writer
self.dc = dc
self.betas = ['000905.SH', '000300.SH', '399006.SZ']
get_index_beta(dc, sdt, edt, freq='D', indices=self.betas,
file_xlsx=os.path.join(self.results_path, 'betas.xlsx'))
file_dfm = os.path.join(self.results_path, f'df_event_matched_{sdt}_{edt}.pkl')
file_dfb = os.path.join(self.experiment_path, f'df_all_bars_{sdt}_{edt}.pkl')
if os.path.exists(file_dfm):
self.dfm = io.read_pkl(file_dfm)
self.dfb = io.read_pkl(file_dfb)
else:
self.dfm, self.dfb = self.get_stock_strong_days()
io.save_pkl(self.dfm, file_dfm)
io.save_pkl(self.dfb, file_dfb)
self.nb_cols = [x for x in self.dfb.columns if x[0] == 'n' and x[-1] == 'b']
def get_share_strong_days(self, ts_code: str, name: str):
"""获取单个标的全部强势信号日期"""
dc = self.dc
event = self.event
sdt = self.sdt
edt = self.edt
file_signals = os.path.join(self.signals_path, f"{ts_code}.pkl")
if os.path.exists(file_signals):
signals, n_bars = io.read_pkl(file_signals)
if self.verbose:
print(f"get_share_strong_days: load signals from {file_signals}")
else:
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals,
signals_n=self.signals_n)
io.save_pkl([signals, n_bars], file_signals)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
event_matched = []
for s in signals:
m, f = event.is_match(s)
if m:
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
r = {'name': name, 'event_match': True, 'factor_match': f}
if nb_info:
r.update(nb_info)
event_matched.append(r)
dfs = pd.DataFrame(event_matched)
if event_matched:
df_ = dc.daily_basic(ts_code, sdt, dc.edt)
df_['trade_date'] = pd.to_datetime(df_['trade_date'])
dfs = dfs.merge(df_[['trade_date', 'total_mv']], on='trade_date', how='left')
dfs = dfs[pd.to_datetime(sdt) <= dfs['trade_date']]
dfs = dfs[dfs['trade_date'] <= pd.to_datetime(edt)]
print(f"{ts_code} - {name}: {len(dfs)}")
return dfs, n_bars
def get_stock_strong_days(self):
"""获取全部股票的强势日期"""
stocks = self.dc.stock_basic()
all_matched = []
all_bars = []
for row in tqdm(stocks.to_dict('records'), desc="get_stock_strong_days"):
ts_code = row['ts_code']
name = row['name']
try:
dfs, n_bars = self.get_share_strong_days(ts_code, name)
all_matched.append(dfs)
all_bars.append(n_bars)
except:
print(f"get_share_strong_days error: {ts_code}, {name}")
traceback.print_exc()
dfm = pd.concat(all_matched, ignore_index=True)
dfb = pd.concat(all_bars, ignore_index=True)
return dfm, dfb
def get_selected(self,
index_code=None,
fc_top_n=None,
fc_min_n=None,
min_total_mv=None,
max_count=None):
"""验证传感器在一组过滤参数下的表现
:param index_code: 指数成分过滤
:param fc_top_n: 板块效应过滤参数1
:param fc_min_n: 板块效应过滤参数2
:param min_total_mv: 市值效应过滤参数
:param max_count: 控制最大选出数量
:return:
"""
dc = self.dc
df = self.dfm
selected_dfg = {}
for trade_date, dfg in df.groupby('trade_date'):
try:
if dfg.empty:
print(f"{trade_date} 选股结果为空")
continue
dfg, key_concepts = selected_filter_by_concepts(dc, dfg, top_n=fc_top_n, min_n=fc_min_n)
dfg = selected_filter_by_market_value(dfg, min_total_mv)
dfg = selected_filter_by_index(dc, dfg, index_code)
dfg = selected_filter_by_rps(dfg, n=21, v_range=(0.1, 0.8), max_count=max_count)
selected_dfg[trade_date] = {'dfg': dfg, 'key_concepts': key_concepts}
except:
traceback.print_exc()
return selected_dfg
def get_agg_selected(self, selected_dfg, window_size: int = 1):
"""滑动窗口聚合选股结果
:param selected_dfg: get_selected 输出的结果
:param window_size: 聚合窗口大小
:return:
"""
dc = self.dc
dfb = self.dfb
assert window_size > 0
if window_size > 1:
trade_cal = dc.trade_cal()
trade_cal = trade_cal[trade_cal['is_open'] == 1].cal_date.to_list()
trade_cal = [pd.to_datetime(x) for x in trade_cal]
selected_agg = {}
for td, dfg in tqdm(selected_dfg.items(), desc='agg_by_window'):
i = trade_cal.index(td)
windows = trade_cal[i-window_size+1: i+1]
res = []
for td_ in windows:
dfg = selected_dfg.get(td_, None)
if dfg:
df = dfg['dfg']
df['trade_date'] = td
res.append(dfg['dfg'])
dfd = pd.concat(res, ignore_index=True)
dfd_cols = ['name', 'event_match', 'factor_match', 'ts_code', 'trade_date',
'total_mv', '概念板块', '概念数量']
dfd = dfd.drop_duplicates('ts_code', ignore_index=True)[dfd_cols]
dfd = dfd.merge(dfb, on=['ts_code', 'trade_date'], how='left')
selected_agg[td] = dfd
else:
selected_agg = {dt: x['dfg'] for dt, x in selected_dfg.items()}
return selected_agg
def validate_performance(self,
index_code=None,
fc_top_n=None,
fc_min_n=None,
min_total_mv=None,
max_count=None,
window_size=1,
save: bool = False,
):
"""验证传感器在一组过滤参数下的表现
:param index_code: 指数成分过滤
:param fc_top_n: 板块效应过滤参数1
:param fc_min_n: 板块效应过滤参数2
:param min_total_mv: 市值效应过滤参数
:param max_count: b20b过滤参数,控制最大选出数量
:param window_size: 按 window_size 聚合多天的结果到一天
:param save: 是否保存结果到本地
:return:
"""
dc = self.dc
sdt = self.sdt
edt = self.edt
selected_dfg = self.get_selected(index_code, fc_top_n, fc_min_n, min_total_mv, max_count)
selected_agg = self.get_agg_selected(selected_dfg, window_size)
# 分析
performances = []
for td, df in selected_agg.items():
p = {'trade_date': td, "number": len(df)}
if df.empty:
p.update({x: 0 for x in self.nb_cols})
else:
p.update(df[self.nb_cols].mean().to_dict())
performances.append(p)
df_p = pd.DataFrame(performances)
df_detail = pd.concat([v for k, v in selected_agg.items()])
df_holds = pd.concat([create_next_positions(dc, v) for k, v in selected_agg.items()], ignore_index=True)
df_turns, tor = turn_over_rate(df_holds)
beta = get_index_beta(dc, sdt, edt, freq='D', file_xlsx=None, indices=self.betas)
df_n1b = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from scipy.io.arff import loadarff
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, LabelEncoder
from sklearn.compose import ColumnTransformer
from collections import defaultdict
def load_kropt():
# Read input dataset
dataset = os.path.join('datasets', 'kropt.arff')
data = loadarff(dataset)
df_data = pd.DataFrame(data[0])
df_describe = df_data.describe().reset_index()
feat = list(df_data.columns)
feat.remove('game')
#feat.remove('Class')
#print(feat)
# Pre-Processing
# Convert label features into numerical with Label Encoder
le = LabelEncoder()
label_encoder = np.zeros((df_data.shape[0], df_data.shape[1] - 1))
for i in range(len(feat)):
le.fit(df_data[feat[i]])
label_encoder[:, i] = le.transform(df_data[feat[i]])
y = le.fit_transform(df_data['game'])
#df_data_pp_prev = pd.DataFrame(label_encoder, columns=feat)
#df_data_pp_prev = df_data_pp_prev.describe().reset_index()
scaler = MinMaxScaler()
scaler.fit(label_encoder)
data_trans_scaled = scaler.transform(label_encoder)
df_data_pp = pd.DataFrame(data_trans_scaled, columns=feat)
return df_data_pp, y
def load_satimage():
path = os.path.join('datasets', 'satimage.arff')
data_satimage, meta_satimage = loadarff(path)
df_satimage = pd.DataFrame(data_satimage)
# Drop NaN values
df_satimage.dropna(inplace=True)
df_satimage.reset_index(drop=True,inplace=True)
# Save cluster columns for accuracy
cluster_satimage = pd.DataFrame(df_satimage["clase"])
cluster_satimage = cluster_satimage.astype(int) - 1
# Delete cluster columns
del df_satimage["clase"]
return df_satimage, cluster_satimage
def load_credita():
path = os.path.join('datasets', 'credit-a.arff')
raw_data = loadarff(path)
df = pd.DataFrame(raw_data[0])
y = df.pop('class')
X = df
y_label_encoder = LabelEncoder()
y = y_label_encoder.fit_transform(y)
# fill missing numerical values
X.fillna(X.mean(), inplace=True)
# fill missing categorical values
categ_cols = X.select_dtypes(include=['category', object]).columns
for col in categ_cols:
X[col].replace(b'?', X[col].mode()[0], inplace=True)
# standarize numerical features
num_cols = X.select_dtypes(include=['number']).columns
mm_scaler = MinMaxScaler()
X[num_cols] = mm_scaler.fit_transform(X[num_cols])
# use one transformer per feature to preserve its name in the generated features
# since new feature names are based on the transformer's name
transformers = [(col, OneHotEncoder(drop='first'), [col]) for col in categ_cols]
col_transformer = ColumnTransformer(transformers, remainder='passthrough')
X_arr = col_transformer.fit_transform(X)
X = pd.DataFrame(X_arr, columns=col_transformer.get_feature_names())
return X, y
def load_waveform():
raw_data = loadarff('datasets/waveform.arff')
df = | pd.DataFrame(raw_data[0]) | pandas.DataFrame |
## Making the code corpus
## This involves
## Hit every directory and read every supported files
## Form a corpus of words without special symbols
## Tokenize Camel case and Hungarian to split out new words
## Any word below 3 letter is not Allowed
import os
import pandas as pd
import sys
import pickle
import configparser
from CodeComb_Core.embeddings import *
from CodeComb_Core.utils import *
from CodeComb_Core.env import *
## Read the file and pack it into file_info dict
def prepare_file(filename, location):
with open(os.path.join(location,filename), "r", encoding = 'latin-1') as fp:
file_data = fp.read()
#file_data = process_text(file_data)
file_info = dict()
file_info["body"] = file_data
file_info["name"] = filename
file_info["location"] = location
file_info["ext"] = filename[filename.rfind('.')+1:]
return file_info
# From file metas read all file content of supported formats
def read_all_supported_files(file_metas):
print ("READING FILES")
print ("Total files-{}".format(len(file_metas)))
file_contents = []
for i, info in enumerate(file_metas):
path_to_file = info['location']
file_name = info['name']
if ((i % 100) == 0):
print ("Processed {} files".format(i-1))
content = prepare_file(file_name, path_to_file)
file_contents.append(content)
return file_contents
def test_read_all_supported_files():
path = os.getcwd()
print ("TEST READ ALL CPP FILES")
files = get_supported_files_name_location(path)
file_contents = read_all_supported_files(files)
print (file_contents)
def test_prepare_file():
location = os.getcwd()
filename = "sample_cpp.cpp"
file_info = prepare_file(filename, location)
print ("TEST PREPARE FILE")
print (file_info)
return
## Load Formats from the config file
def load_format():
config = configparser.ConfigParser()
config.read(os.path.join(os.path.expanduser("~"), "codecomb_config.ini"))
fmts = list(config['FORMAT'].values())
formats_list = ["."+fmt for fmt in fmts]
return formats_list
# Get Metas
def get_supported_files_name_location(path):
files = []
# r=root, d=directories, f = files
for r, _, f in os.walk(path):
for file in f:
formats = load_format()
for fmt in formats:
if file.endswith(fmt):
info = {}
info['name'] = file
info['location'] = r
files.append(info)
return files
def test_get_supported_files_name_location():
print ("TEST GET SUPPORTED FILES NAME LOCATION")
path = "."
file_infos = get_supported_files_name_location(path)
print (file_infos)
print (len(file_infos))
## Recursively reads all supported file names from current locations
## And forms a DF
def get_all_files_currentdir(pickle_file):
PATH = os.getcwd()
files = get_supported_files_name_location(PATH)
file_contents = read_all_supported_files(files)
df_corpus = | pd.DataFrame(file_contents) | pandas.DataFrame |
from pathlib import Path
import nibabel as nib
import numpy as np
import pandas as pd
from scipy.stats import ttest_rel
import tqdm
from nipype.interfaces import fsl
from utils.parcellation import (
parcellation_labels,
parcellation_fname,
)
def get_available_parcellations(mother_dir: Path):
parcellations = []
for f in mother_dir.rglob("registrations/*_FS/*_native.nii.gz"):
parcellations.append("_".join(f.name.split("_")[:-1]))
return list(set(parcellations))
def generate_parcellations_dict(
parcellations_dir: Path, avalilable_parcellations: list
):
parcellations_dict = {}
for parcellation in sorted(avalilable_parcellations):
parcellations_dict[parcellation] = {}
if "MMP" in parcellation:
atlas_parcels = pd.read_csv(
parcellations_dir / "MMP" / "MMP_parcels.csv", index_col=0
)
atlas_img = nib.load(
parcellations_dir / "MMP" / "MMP_in_MNI_corr.nii.gz"
)
elif "Brainnetome" in parcellation:
atlas_parcels = pd.read_csv(
parcellations_dir / "BNA_with_cerebellum.csv", index_col=0
)
atlas_parcels.index = atlas_parcels.Label
atlas_img = nib.load(
parcellations_dir / "BN_Atlas_274_combined.nii.gz"
)
elif "Schaefer" in parcellation:
n_networks, n_parcels = parcellation.split("_")[1:]
atlas_parcels = pd.DataFrame(
pd.read_csv(
parcellations_dir
/ f"Schaefer2018_{n_parcels}_{n_networks}_order.txt",
index_col=0,
sep="\t",
header=None,
).iloc[:, 0]
)
atlas_parcels.columns = ["ROIname"]
atlas_parcels["Label"] = atlas_parcels.index
atlas_img = nib.load(
parcellations_dir
/ f"Schaefer2018_{n_parcels}_{n_networks}_order_FSLMNI152_1mm.nii.gz"
)
parcellations_dict[parcellation]["atlas_parcels"] = atlas_parcels
parcellations_dict[parcellation]["atlas_img"] = atlas_img
return parcellations_dict
def read_atlas_parcels(atlas_parcels: Path) -> pd.DataFrame:
df = pd.read_csv(atlas_parcels, sep=";", index_col=0)
return df.iloc[:, :-1]
def read_parcels(atlas_parcels: Path) -> pd.DataFrame:
return pd.read_csv(atlas_parcels, index_col=0)
def read_bna(atlas_parcels: Path) -> pd.DataFrame:
df = pd.read_csv(atlas_parcels, sep=" ", header=None, index_col=0)
df = pd.DataFrame(
df.iloc[:, 0].values, columns=["ROIname"], index=df.index.values
)
return df
def read_aal_parcels(atlas_txt: Path) -> pd.DataFrame:
df = pd.read_csv(atlas_txt, sep=" ", header=None)
df.columns = ["Label", "ROIname", "2"]
return df.iloc[:, :-1]
def gather_subject_data(
derivatives_dir: Path, subj: str, atlas_name: str, norm_method: str = "CAT"
) -> dict:
fmriprep_dir = derivatives_dir.parent / "fmriprep" / subj
if not fmriprep_dir.exists():
return None, None
sessions = [f.name for f in fmriprep_dir.glob("ses-*")]
if len(sessions) > 1:
native_parcels = fmriprep_dir / "anat" / f"{atlas_name}_native.nii.gz"
gm_probseg = fmriprep_dir / "anat" / f"{subj}_label-GM_probseg.nii.gz"
else:
native_parcels = (
fmriprep_dir / sessions[0] / "anat" / f"{atlas_name}_native.nii.gz"
)
gm_probseg = (
fmriprep_dir
/ sessions[0]
/ "anat"
/ f"{subj}_{sessions[0]}_label-GM_probseg.nii.gz"
)
return native_parcels, gm_probseg
def crop_to_gm(native_parcels: Path, gm_probseg: Path):
cropped_parcels = (
native_parcels.parent
/ f"{native_parcels.name.split('.')[0]}_GM.nii.gz"
)
if not cropped_parcels.exists():
gm_mask = nib.load(gm_probseg).get_fdata().astype(bool)
orig_img = nib.load(native_parcels)
gm_parcels = orig_img.get_fdata()
gm_parcels[~gm_mask] = np.nan
gm_img = nib.Nifti1Image(gm_parcels, orig_img.affine)
nib.save(gm_img, cropped_parcels)
return cropped_parcels
def calc_average(parcel, atlas_data: np.ndarray, subj_data: np.ndarray, temp):
# temp = []
# for parcel in parcels:
mask = atlas_data == parcel
temp[parcel] = np.nanmean(subj_data[mask.astype(bool)])
return temp
# @jit(parallel=True)
def average_parcels(
atlas_data: np.ndarray, subj_img: Path, temp_df: pd.DataFrame
):
subj_data = nib.load(subj_img).get_fdata()
temp = np.zeros(temp_df.index.shape)
for i, parcel in enumerate(temp_df.index):
# print(temp_df.ROIname[parcel])
roi = temp_df.loc[parcel, "Label"]
mask = atlas_data == roi
temp[i] = np.nanmean(subj_data[mask.astype(bool)])
return temp
def parcellate_subjects_data(
derivatives_dir: Path,
atlas_parcels: pd.DataFrame,
norm_method: str = "CAT",
coreg_dirname: str = "coregistered",
):
subjects_dict = {}
for subj in sorted(derivatives_dir.glob("sub-*")):
print(subj.name)
try:
subj_data = {}
native_parcels_full, gm_mask = gather_subject_data(
derivatives_dir, subj.name, atlas_name, norm_method
)
if not native_parcels_full:
continue
native_parcels = crop_to_gm(native_parcels_full, gm_mask)
atlas_data = nib.load(native_parcels).get_fdata()
except FileNotFoundError:
print(
f"No {atlas_name} native parcellation found for {subj.name}!"
)
continue
for session in subj.glob("ses-*"):
print(session.name)
session_df = atlas_parcels.copy()
tensor_dir = session / "tensors_parameters" / coreg_dirname
tensor_dir.mkdir(exist_ok=True)
out_fname = tensor_dir / f"{atlas_name}_parcels.csv"
subj_data[session.name] = out_fname
# if not out_fname.exists():
params = [p for p in tensor_dir.glob("*.nii.gz")]
if not params or out_fname.exists():
continue
for param in tqdm.tqdm(params):
# print(param.name.split(".")[0])
session_df[param.name.split(".")[0]] = average_parcels(
atlas_data, param, session_df
)
session_df.to_csv(out_fname)
subjects_dict[subj.name] = subj_data
return (
subjects_dict,
[f.name.split(".")[0] for f in tensor_dir.glob("*.nii.gz")],
)
def generate_statistics(
subjects: dict,
out_dir: Path,
parameters: list,
atlas_parcels: pd.DataFrame,
):
out_dict = {}
for param in tqdm.tqdm(parameters):
out_dict[param] = {}
param_dir = out_dir / param
param_dir.mkdir(exist_ok=True)
param_df = pd.DataFrame(
index=subjects.keys(), columns=atlas_parcels.ROIname.values
)
for session in ["ses-1", "ses-2"]:
out_fname = param_dir / f"{session}.csv"
# print(out_fname)
out_dict[param][session] = out_fname
# if out_fname.exists():
# continue
session_df = param_df.copy()
for subj, subj_data in subjects.items():
# print(subj)
session_df.loc[subj] = pd.read_csv(
subj_data.get(session), index_col=0
)[param].values
session_df.to_csv(out_fname)
return out_dict
def calculate_statistics(out_dir: Path, inputs: dict):
for param, sessions in inputs.items():
# print(Path(out_dir / param).exists())
out_fname = out_dir / param / "statistics.csv"
# if out_fname.exists():
# continue
before, after = [
pd.read_csv(session, index_col=0) for session in sessions.values()
]
t, p = ttest_rel(before, after, axis=0, nan_policy="omit")
df = | pd.DataFrame(columns=["t", "p"], index=before.columns) | pandas.DataFrame |
import sys
sys.path.append('../')
def WriteAriesScenarioToDB(scenarioName, ForecastName, ForecastYear, start_date, end_date, User, Area, GFO = False, CorpID = ['ALL']):
from Model import ImportUtility as i
from Model import BPXDatabase as bpxdb
from Model import ModelLayer as m
import datetime as dt
Success = True
Messages = []
try:
#Query the Aries database using import methods
scenario_results, Success, Messages = i.ImportAriesByScenario(scenarioName, start_date, end_date, Area)
#Create NF columns for oil and gas (replace nan with 0)
scenario_results['OilNF'] = scenario_results['C754'] / scenario_results['GasProduction']
scenario_results['GasNF'] = scenario_results['C753'] / scenario_results['OilProduction']
scenario_results = scenario_results.fillna(0)
#Obtain list from scenario query results
CorpID_list = scenario_results['CorpID'].to_list()
CorpID_list = list(set(CorpID_list))
config = m.GetConfig()
DBObj = bpxdb.BPXDatabase(config['server'], config['database'], config['UID'])
#Linearly regress the data
#Two segments: previous month's mid average and next month's mid average - regress to both to get the values.
count = 1
for corpID in CorpID_list:
#Get the subset of results that match this wellflac
corpid_scenario_df = scenario_results.query('CorpID == @corpID')
corpid_scenario_df = corpid_scenario_df.sort_values(by = ['Date'], ascending = True)
if corpid_scenario_df.shape[0] > 1:
df_previous_row = (0, corpid_scenario_df.iloc[1])
wellflac_count = 1
header_corpID = ''
for df_row in corpid_scenario_df.iterrows():
if wellflac_count == 1:
df_next_row = corpid_scenario_df.iloc[wellflac_count]
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row)
else:
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1])
Success, Message = WriteInterpolatedForecastToDB(df_row[1]['WellName'], corpID, ForecastName, ForecastYear, scenarioName, GFO, User, results)
if not Success:
Messages.append(Message)
break
df_previous_row = df_row
wellflac_count = wellflac_count + 1
callprogressbar(count, len(CorpID_list))
count = count + 1
except Exception as ex:
Success = False
Messages.append('Failed to write the results from chosen scenario in Aries database. ' + str(ex))
return Success, Messages
def SOHA_WriteGFOToDB_2019Database(ForecastName, ForecastYear, User, start_date, end_date, WellFlac = ['ALL'], GFO = False):
#Part of to be deprecated methods to convert SoHa internal GFO data to standard
from Model import BPXDatabase as bpxdb
from Model import QueryFile as qf
from Model import ImportUtility as imp
from Model import ModelLayer as m
import datetime as dt
import numpy as np
Sucess = True
Messages = []
try:
config = m.GetConfig()
#Create DB Object
return_df, Success, Message = imp.ImportGFOFromDB2019(start_date, end_date, WellFlac)
if not Success:
Messages.append(Message)
Production_Column_Name = '2019Zmcfd'
Success, Message = WriteInternalForecasttoDB(return_df, ForecastName, ForecastYear, Production_Column_Name, User, GFO)
if not Success:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error writing GFO to DB. ' + str(ex))
return Success, Messages
def SOHA_WriteGFOToDB_2018Database(ForecastName, ForecastYear, User, start_date, end_date, WellFlac = ['ALL'], GFO = False):
#Part of to be deprecated methods to convert SoHa internal GFO data to standard
from Model import BPXDatabase as bpxdb
from Model import QueryFile as qf
from Model import ImportUtility as imp
from Model import ModelLayer as m
import datetime as dt
import numpy as np
Sucess = True
Messages = []
try:
config = m.GetConfig()
#Create DB Object
return_df, Success, Message = imp.ImportGFOFromDB2019(start_date, end_date, WellFlac)
if not Success:
Messages.append(Message)
Production_Column_Name = '2018Zmcfd'
Success, Message = WriteInternalForecasttoDB(return_df, ForecastName, ForecastYear, Production_Column_Name, User, GFO)
if not Success:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error writing GFO to DB. ' + str(ex))
return Success, Messages
def SOHA_WriteInternalForecasttoDB(df,ForecastName, ForecastYear, Production_Column_Name, User, GFO=True):
#Part of to be deprecated methods to convert SoHa internal GFO data to standard
from Model import BPXDatabase as bpx
from Model import ModelLayer as m
import datetime as dt
from Model import QueryFile as qf
Success = True
Messages = []
try:
config = m.GetConfig()
DBObj = bpx.BPXDatabase(config['server'], config['database'], config['UID'])
EDWObj = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE')
wellname_list = df['WellName'].unique()
wellname_list = list(wellname_list)
if '' in wellname_list:
wellname_list.remove('')
count = 1
for name in wellname_list:
monthly_df = df.query('WellName == @name')
monthly_df = monthly_df.sort_values(by = ['Date'], ascending = True)
df_previous_row = (0, monthly_df.iloc[1])
nettingFactor = monthly_df['NettingFactor'].values[0]
well_count = 1
header_corpid = ''
for df_row in monthly_df.iterrows():
if well_count == 1:
df_next_row = monthly_df.iloc[well_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row, GasRateField=Production_Column_Name)
elif well_count != monthly_df.shape[0] and well_count != 1:
df_next_row = monthly_df.iloc[well_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row, PreviousMonthVal = df_previous_row[1], GasRateField=Production_Column_Name)
elif well_count == monthly_df.shape[0]:
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1], GasRateField=Production_Column_Name)
for row in results.iterrows():
corpid_query = qf.EDWKeyQueryFromWellName([name])
corpid_results = EDWObj.Query(corpid_query)
if not corpid_results[1].empty:
CorpID = corpid_results[1].at[0,'CorpID']
else:
CorpID = name
WellName = name
Update_Date = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
Update_User = User
if header_corpid != CorpID:
#Create Header entry
header_corpid = CorpID
ForecastHeaderObj = m.ForecastHeaderRow(WellName, CorpID, ForecastName, ForecastYear, '', [], GFO, DBObj)
Success, Message = ForecastHeaderObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
Date_Key = row[1]['Date'].strftime('%m/%d/%Y')
Gas_Production = row[1]['GasProduction']
GasNF = row[1]['GasNF']
if Gas_Production >= 0 and Date_Key:
ForecastDataObj = m.ForecastDataRow(ForecastName, CorpID, Date_Key, Gas_Production, 0, 0, GasNF, 0, 0, DBObj)
Success, Message = ForecastDataObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
df_previous_row = df_row
well_count = well_count + 1
callprogressbar(count, len(wellname_list))
count = count + 1
except Exception as ex:
Success = False
Messages.append('Error writing Forecast to Database. ' + str(ex))
return Success, Messages
def SOHA_WriteGasNettingFactorsFromDB(Update_User, Update_Date, wellnames = []):
from Model import BPXDatabase as bpx
from Model import QueryFile as qf
from Model import ModelLayer as m
import datetime as datetime
Success = True
Messages = []
try:
config = m.GetConfig()
DBObj = bpx.BPXDatabase(config['server'], config['database'], config['UID'])
TeamOpsObj = bpx.GetDBEnvironment('OnPrem', 'OVERRIDE')
EDWObj = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE')
#Get Well List of required netting values from data that is already in database.
query = qf.GetNettingFactorsfromDB(wellnames)
res, res_df = TeamOpsObj.Query(query)
count = 1
for idx, item in res_df.iterrows():
wellquery = qf.EDWKeyQueryFromWellName([item['WellName']])
res, well_row = EDWObj.Query(wellquery)
if not well_row.empty:
corpID = well_row['CorpID'].values[0]
NettingObj = m.GasNettingRow(item['WellName'], corpID, item['NF'], item['FirstSalesDateInput'], DBObj)
Success, Message = NettingObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
callprogressbar(count, res_df.shape[0])
count = count + 1
except Exception as ex:
Success = False
Messages.append('Error during write of netting factors to DB. ' + str(ex))
return Success, Messages
def WriteDefaultMultipliers(LE_Name, DefaultValue, Update_User, Update_Date, SuppressMessages):
import datetime as datetime
from Model import BPXDatabase as bpx
from Model import ModelLayer as m
Success = True
Messages = []
try:
config = m.GetConfig()
DBObj = bpx.BPXDatabase(config['server'], config['database'], config['UID'])
#Query the LE results
LE_query = 'select * from [LEForecastDatabase].[dbo].[LE_Data] where HeaderName = \'' + LE_Name + '\''
res, df = DBObj.Query(LE_query)
count = 1
for idx, row in df.iterrows():
FracHitObj = m.FracHitMultipliersRow(row['HeaderName'], row['CorpID'], row['Date_Key'], str(DefaultValue), DBObj)
Success, Message = FracHitObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
if not SuppressMessages:
callprogressbar(count, df.shape[0])
count = count + 1
except Exception as ex:
Success = False
Messages.append('Error during write of default frac hit multipliers. ' + str(ex))
return Success, Messages
def WriteLEFromExcel(LEName, LE_Date,filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, InterpolationMethod, Phase, Update_User, Update_Date, IDs = ['ALL'] ):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Messages = []
Success = True
try:
all_data_df, Success, Message = i.ImportForecastFromExcel(filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, Phase, '', '', ['ALL'])
if Success:
if corpID_col:
IDCol = 'CorpID'
else:
IDCol = 'WellName'
Success, Message = WriteLEFromTemplate(all_data_df, InterpolationMethod, LEName, LE_Date, Update_User, IDCol)
if not Success:
Messages.append(Message)
else:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error during write of LE data from Excel sheet. ' + str(ex))
return Success, Messages
def WriteForecastFromExcel(ForecastName, ForecastYear,scenarioName, GFO, filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, InterpolationMethod, Phase, Update_User, Update_Date, IDs = ['ALL'] ):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Messages = []
Success = True
try:
all_data_df, Success, Message = i.ImportForecastFromExcel(filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, Phase, '', '', ['ALL'])
if Success:
if corpID_col:
IDCol = 'CorpID'
else:
IDCol = 'WellName'
Success, Message = WriteForecastFromTemplate(all_data_df, InterpolationMethod, ForecastName, ForecastYear, scenarioName, GFO, Update_User, IDCol)
if not Success:
Messages.append(Message)
else:
Messages.append(Message)
if not Success:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error during the write of Forecast from Excel sheet. ' + str(ex))
return Success, Messages
def WriteForecastFromTemplate(all_data_df, InterpolationMethod, ForecastName, ForecastYear, scenarioName, GFO, Update_User, IDCol='WellName'):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Success = True
Messages = []
results = []
try:
#Data Frame must be the same structure as the output from the 'Read From Excel Function
#'CorpID', 'WellName', 'Wedge', 'Date', 'Gas', 'Oil', 'Water', 'OilNF', 'GasNF'
wellname = ''
if not Success:
Messages.append(Message)
if IDCol == 'CorpID':
corpid_list = list(all_data_df['CorpID'].unique())
corpid_query = qf.EDWKeyQueryFromCorpID(corpid_list)
corpid_results, corpid_df = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE').Query(corpid_query)
well_list = list(corpid_df['WellName'].unique())
well_query = 'CorpID == @corpid'
else:
well_list = list(all_data_df['WellName'].unique())
well_query = 'WellName == @wellname'
well_list = [i for i in well_list if i]
for wellname in well_list:
wellname, corpid = i.GetWellandCorpID(wellname, '')
if not corpid:
corpid = wellname
data_df = all_data_df.query(well_query)
row_count = 1
if not data_df.empty:
df_previous_row = (0, data_df.iloc[1])
for idx, df_row in data_df.iterrows():
if InterpolationMethod == 'MonthlyRates':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, GasProduction='Gas', OilProduction='Oil')
elif row_count != data_df.shape[0] and row_count != 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif row_count == data_df.shape[0]:
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif InterpolationMethod == 'MonthlyVolume':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row)
else:
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1])
elif InterpolationMethod == 'None':
results = ConvertNonInterpolatedResults(df_row)
Success, Message = WriteInterpolatedForecastToDB(wellname, corpid, ForecastName, ForecastYear, scenarioName, GFO, Update_User, results)
if not Success:
Messages.append(Message)
df_previous_row = df_row
row_count = row_count + 1
except Exception as ex:
Success = False
Messages.append('Error during the writing of the forecast from template. ' + str(ex))
return Success, Messages
def WriteLEFromTemplate(all_data_df, InterpolationMethod, LEName, LE_Date, Update_User, IDCol = 'WellName'):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Success = True
Messages = []
results = []
try:
#Data Frame must be the same structure as the output from the 'Read From Excel Function
#'CorpID', 'WellName', 'Wedge', 'Date', 'Gas', 'Oil', 'Water', 'OilNF', 'GasNF'
wellname = ''
if not Success:
Messages.append(Message)
if IDCol == 'CorpID':
corpid_list = list(all_data_df['CorpID'].unique())
corpid_query = qf.EDWKeyQueryFromCorpID(corpid_list)
corpid_results, corpid_df = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE').Query(corpid_query)
well_list = list(corpid_df['WellName'].unique())
well_query = 'CorpID == @corpid'
else:
well_list = list(all_data_df['WellName'].unique())
well_query = 'WellName == @wellname'
well_list = [i for i in well_list if i]
for wellname in well_list:
wellname, corpid = i.GetWellandCorpID(wellname, '')
if not corpid:
corpid = wellname
data_df = all_data_df.query(well_query)
row_count = 1
if not data_df.empty:
df_previous_row = (0, data_df.iloc[1])
for idx, df_row in data_df.iterrows():
if InterpolationMethod == 'MonthlyRates':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, GasProduction='Gas', OilProduction='Oil')
elif row_count != data_df.shape[0] and row_count != 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif row_count == data_df.shape[0]:
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif InterpolationMethod == 'MonthlyVolume':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row)
else:
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1])
elif InterpolationMethod == 'None':
results = ConvertNonInterpolatedResults(df_row)
Wedge, Message = i.GetWedgeData(corpid, True)
Success, Message = WriteInterpolatedLEToDB(LEName, wellname, corpid, '', Wedge, LE_Date, Update_User, results)
if not Success:
Messages.append(Message)
df_previous_row = df_row
row_count = row_count + 1
except Exception as ex:
Success = False
Messages.append('Error during the writing of the LE from template. ' + str(ex))
return Success, Messages
def WriteInterpolatedForecastToDB(WellName, corpID, ForecastName, ForecastYear, scenarioName, GFO, UserName, results):
import datetime as dt
import pandas as pd
from Model import ModelLayer as m
header_corpID = ''
Messages = []
for item in results.iterrows():
idx = item[0]
UpdateDate = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if header_corpID != corpID:
ForecastHeaderObj = m.ForecastHeaderRow(WellName, corpID, ForecastName, ForecastYear, scenarioName, [], GFO, '')
Success, Message = ForecastHeaderObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
else:
header_corpID = corpID
Date_Key = item[1]['Date'].strftime('%m/%d/%Y')
Gas_Production = item[1]['GasProduction']
Oil_Production = item[1]['OilProduction']
GasNF = item[1]['GasNF']
OilNF = item[1]['OilNF']
ForecastDataObj = m.ForecastDataRow(ForecastName, corpID, Date_Key, Gas_Production, Oil_Production, 0, GasNF, OilNF, 0, '')
Success, Message = ForecastDataObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
return Success, Messages
def WriteInterpolatedLEToDB(LEName, WellName, CorpID, ForecastGeneratedFrom, Wedge, LE_Date, UserName, results):
import datetime as dt
import pandas as pd
from Model import ModelLayer as m
header_corpID = ''
Messages = []
for item in results.iterrows():
idx = item[0]
UpdateDate = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if header_corpID != CorpID:
LEHeaderObj = m.LEHeaderRow(LEName, WellName, CorpID, ForecastGeneratedFrom, Wedge, LE_Date, '')
Success, Message = LEHeaderObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
else:
header_corpID = CorpID
Date_Key = item[1]['Date'].strftime('%m/%d/%Y')
Gas_Production = item[1]['GasProduction']
Oil_Production = item[1]['OilProduction']
LEDataObj = m.LEDataRow(LEName, CorpID, Date_Key, Gas_Production, Oil_Production, 0, '')
Success, Message = LEDataObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
return Success, Messages
def callprogressbar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def InterpolateDailyRatesFromMonthlyVolumes(**kwargs):
#Take in the monthly cumulative volumes that are assinged at the 'end' of the month in Aries
#Assign daily production and return the results
from datetime import timedelta
import pandas as pd
import math
previous_month_val = ''
current_month_val = ''
next_month_val = ''
return_df = pd.DataFrame(columns = ['GasProduction', 'OilProduction', 'GasNF', 'OilNF', 'Date'])
previous_month_bool = False
current_month_bool = False
next_month_bool = False
for key, value in kwargs.items():
if key=='PreviousMonthVal':
previous_month_val = value
previous_month_bool = True
elif key=='CurrentMonthVal':
current_month_val = value
current_month_bool = True
elif key == 'NextMonthVal':
next_month_val = value
next_month_bool = True
if previous_month_bool and current_month_bool and not next_month_bool:
#Get number of days between previous val and current val (should be roughly 1 month)
previous_date = previous_month_val['Date']
current_month_date = current_month_val['Date']
diff = current_month_date - previous_date
days = diff.days
normal_days = 30.42
#Get slope between the two values
previous_gas_volume = previous_month_val['GasProduction']
current_gas_volume = current_month_val['GasProduction']
gas_slope = (current_gas_volume - previous_gas_volume) / normal_days #Average days in a month
previous_oil_volume = previous_month_val['OilProduction']
current_oil_volume = current_month_val['OilProduction']
oil_slope = (current_oil_volume - previous_oil_volume) / normal_days
if current_gas_volume > 0:
gasnettingFactor = current_month_val['GasNF']
else:
gasnettingFactor = 0
if current_oil_volume > 0:
oilnettingFactor = current_month_val['OilNF']
else:
oilnettingFactor = 0
return_row = {}
for day in range(days):
#Add an entry to the return data frame
gas_production = previous_gas_volume + (day + 1) * gas_slope
if gas_production > 0:
return_row['GasProduction'] = gas_production / normal_days
else:
return_row['GasProduction'] = 0
oil_production = previous_oil_volume + (day + 1) * oil_slope
if gas_production > 0:
return_row['OilProduction'] = oil_production / normal_days
else:
return_row['OilProduction'] = 0
return_row['Date'] = previous_date + timedelta(days = (day+1))
return_row['GasNF'] = gasnettingFactor
return_row['OilNF'] = oilnettingFactor
return_df = return_df.append(return_row, ignore_index = True)
elif current_month_bool and next_month_bool and not previous_month_bool:
current_month_date = current_month_val['Date']
next_month_date = next_month_val['Date']
diff = next_month_date - current_month_date
days =current_month_date.day
normal_days = 30.42
current_gas_volume = current_month_val['GasProduction']
next_gas_volume = next_month_val['GasProduction']
gas_slope = (next_gas_volume - current_gas_volume) / normal_days
current_oil_volume = current_month_val['OilProduction']
next_oil_volume = next_month_val['OilProduction']
oil_slope = (next_oil_volume - current_oil_volume) / normal_days
if current_gas_volume > 0:
gasnettingFactor = current_month_val['GasNF']
else:
gasnettingFactor = 0
if current_oil_volume > 0:
oilnettingFactor = current_month_val['OilNF']
else:
oilnettingFactor = 0
return_row = {}
for day in range(days):
gas_production = current_gas_volume - day * gas_slope
if gas_production > 0:
return_row['GasProduction'] = gas_production / normal_days
else:
return_row['GasProduction'] = 0
oil_production = current_oil_volume - day * oil_slope
if oil_production > 0:
return_row['OilProduction'] = oil_production / normal_days
else:
return_row['OilProduction'] = 0
return_row['Date'] = current_month_date - timedelta(days = day)
return_row['GasNF'] = gasnettingFactor
return_row['OilNF'] = oilnettingFactor
return_df = return_df.append(return_row, ignore_index = True)
return return_df
def ConvertNonInterpolatedResults(df_row):
from datetime import datetime, timedelta
import pandas as pd
return_df = pd.DataFrame(columns = ['GasProduction', 'OilProduction', 'GasNF', 'OilNF', 'Date'])
return_row = {}
return_row['GasProduction'] = df_row['Gas']
return_row['OilProduction'] = df_row['Oil']
return_row['GasNF'] = df_row['GasNF']
return_row['OilNF'] = df_row['OilNF']
return_row['Date'] = df_row['Date']
return_df = return_df.append(return_row, ignore_index = True)
return return_df
def InterpolateDailyRatesFromMonthlyRates(**kwargs):
#Take in the monthly average rates that are assinged at the 'beginning' of the month in internal databases
#Assign daily production and return the results
from datetime import datetime, timedelta
import pandas as pd
import math
previous_month_val = ''
current_month_val = ''
next_month_val = ''
return_df = pd.DataFrame(columns = ['GasProduction', 'OilProduction', 'GasNF', 'OilNF', 'Date'])
previous_month_bool = False
current_month_bool = False
next_month_bool = False
for key, value in kwargs.items():
if key=='PreviousMonthVal':
previous_month_val = value
previous_month_bool = True
elif key=='CurrentMonthVal':
current_month_val = value
current_month_bool = True
elif key == 'NextMonthVal':
next_month_val = value
next_month_bool = True
elif key == 'GasProduction':
gas_rate = value
elif key == 'OilProduction':
oil_rate = value
if previous_month_bool and current_month_bool and not next_month_bool:
#Scenario for the end of the analysis when no next month's data exists
current_month_date = current_month_val['Date']
previous_date = previous_month_val['Date']
current_gas_rate = current_month_val[gas_rate]
previous_gas_rate = previous_month_val[gas_rate]
current_oil_rate = current_month_val[oil_rate]
previous_oil_rate = previous_month_val[oil_rate]
days_in_month = pd.to_datetime(current_month_date + | pd.tseries.offsets.MonthEnd(1) | pandas.tseries.offsets.MonthEnd |
import os
import pyproj
import pandas as pd
import numpy as np
ancpth = os.path.join(os.path.dirname(__file__), 'ancillary')
shppth = os.path.join(os.path.dirname(__file__), 'shp')
lcc_wkt = \
"""PROJCS["North_America_Lambert_Conformal_Conic",
GEOGCS["GCS_North_American_1983",
DATUM["North_American_Datum_1983",
SPHEROID["GRS_1980",6378137,298.257222101]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["False_Easting",5632642.22547],
PARAMETER["False_Northing",4612545.65137],
PARAMETER["Central_Meridian",-107],
PARAMETER["Standard_Parallel_1",50],
PARAMETER["Standard_Parallel_2",50],
PARAMETER["Latitude_Of_Origin",50],
UNIT["Meter",1],
AUTHORITY["EPSG","102009"]]"""
nad83_wkt = \
"""GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]]"""
def fill(data, invalid=None):
"""
Taken from https://stackoverflow.com/a/9262129/698809
Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell
Input:
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'.
data value are replaced where invalid is True
If None (default), use: invalid = np.isnan(data)
Output:
Return a filled array.
"""
from scipy import ndimage as nd
if invalid is None: invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
return data[tuple(ind)]
class Pixels(object):
"""
:param object:
:return:
"""
def __init__(self):
self.nrow, self.ncol = 407, 474
self._pixel_list = self.load_pixels()
# Add projected coordinates
nad83 = pyproj.Proj(nad83_wkt)
lcc = pyproj.Proj(lcc_wkt)
# transformer = pyproj.Transformer.from_crs(nad83, lcc)
# _x, _y = transformer.transform(self._pixel_list.longitude.values,
# self._pixel_list.latitude.values)
_x, _y = pyproj.transform(nad83,
lcc,
self._pixel_list.latitude.values,
self._pixel_list.longitude.values
)
self.proj = lcc
self._pixel_list.loc[:, 'x'] = _x
self._pixel_list.loc[:, 'y'] = _y
# Reorder pixels based on NEXRAD sequencing
self._pixel_list = self._pixel_list.loc[self.calc_nrpix_sequence_number()]
# Add row/column information
i = np.zeros((self.nrow, self.ncol), dtype=int)
for ii in range(self.nrow):
i[ii, :] += ii
j = np.zeros((self.nrow, self.ncol), dtype=int)
for jj in range(self.ncol):
j[:, jj] += jj
self._pixel_list.loc[:, 'i'] = i.ravel()
self._pixel_list.loc[:, 'j'] = j.ravel()
# Add sequence numbers
self._pixel_list.loc[:, 'sequence_number'] = self.calc_sequence_number()
self._pixel_list.loc[:, 'nrpix_sequence_number'] = self.calc_nrpix_sequence_number()
self._pixel_list.loc[:, 'fortran_sequence_number'] = self.calc_fortran_sequence_number()
@staticmethod
def load_pixels():
fname = os.path.join(ancpth, 'pixels.txt')
pix_df = pd.read_csv(fname, index_col=['pixel'], usecols=['pixel', 'latitude', 'longitude'])
return pix_df
def calc_sequence_number(self):
"""
Pixel numbering starts at upper-left and increases with row-major order.
"""
return np.array(range(1, (self.nrow * self.ncol) + 1))
def calc_nrpix_sequence_number(self):
"""
Pixel numbering starts at lower-left and increases with row-major order.
"""
seq = np.array(range(1, (self.nrow * self.ncol) + 1))
seq = seq.reshape(self.nrow, self.ncol)
return seq[::-1, :].ravel()
def calc_fortran_sequence_number(self):
"""
Pixel numbering starts at lower-right and increases with column-major order.
"""
seq = np.array(range(1, (self.nrow * self.ncol) + 1))[::-1]
seq = seq.reshape(self.nrow, self.ncol, order='f')
return seq.ravel()
@property
def i(self):
return self._pixel_list.loc[:, 'i'].copy()
@property
def j(self):
return self._pixel_list.loc[:, 'j'].copy()
@property
def x(self):
return self._pixel_list.loc[:, 'x'].copy()
@property
def y(self):
return self._pixel_list.loc[:, 'y'].copy()
@property
def latitude(self):
return self._pixel_list.loc[:, 'latitude'].copy()
@property
def longitude(self):
return self._pixel_list.loc[:, 'longitude'].copy()
@property
def data(self):
return self._pixel_list
class GoesAsciiFile(object):
"""
A thin wrapper around the Pandas DataFrame class to help read
and manipulate legacy ASCII files.
Attributes
----------
Methods
-------
Examples
--------
>>> import goeset
>>> etfile = goeset.GoesAsciiFile('Florida_2017.txt')
>>> eto = etfile.get_array('RET')
>>> import goeset
>>> etfile = goeset.GoesAsciiFile('Florida_2017.txt')
>>> df = etfile.get_dataframe(nrows=500)
"""
def __init__(self, fpth):
s = f'Could not locate input file {fpth}.'
assert os.path.isfile(fpth), s
self.fpth = fpth
self.nodata_value = -9999.9
# self.aea_proj4 = '+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-84 +x_' \
# '0=0 +y_0=0 +datum=NAD83 +units=ft +no_defs '
self.pixels = Pixels()
self.reference_table = self.get_pixel_reference_table()
self.nrow, self.ncol = 407, 474
self._df = None
self._dates = None
# self._x, self._y = self._get_xy_arrays()
# self._latitude, self._longitude = self._get_latlon_arrays()
self._oldfmt, self._header = self.get_header()
@property
def df(self):
if self._df is None:
self._df = self.get_dataframe()
return self._df
@property
def dates(self):
if self._df is None:
self._df = self.get_dataframe()
return self._dates
@property
def header(self):
return self._header
@property
def latitude(self):
return self.pixels.data.latitude.values.reshape(self.nrow, self.ncol)
@property
def longitude(self):
return self.pixels.data.longitude.values.reshape(self.nrow, self.ncol)
@staticmethod
def get_pixel_reference_table():
"""
Load the list of pixels.
:return:
"""
dtype = {'NRpix': int, 'fips_county': str}
fname = os.path.join(ancpth, 'pixel_reference.csv')
tbl = pd.read_csv(fname, index_col=['NRpix'], dtype=dtype)
tbl = tbl.sort_index()
return tbl
# @staticmethod
# def get_pixels():
# """
# Load the list of pixels.
# :return:
# """
# fname = os.path.join(ancpth, 'pixels.txt')
# return pd.read_csv(fname, index_col=['pixel'])
def get_header(self):
with open(self.fpth, 'r') as f:
header = f.readline().strip().split()
if 'YYYYMMDD' in header:
header = ['YYYYMMDD', 'Lat', 'Lon', 'NRpix', 'PET', 'ETo',
'Solar', 'Albedo', 'RHmax', 'RHmin', 'Tmax', 'Tmin',
'ws2m']
oldfmt = False
else:
oldfmt = True
header = ['YYYYMMDD', 'Lat', 'Lon', 'NRpix', 'PET', 'ETo',
'Solar', 'RHmax', 'RHmin', 'Tmax', 'Tmin', 'ws2m']
return oldfmt, header
def get_dataframe(self, flush=True, **kwargs):
"""
Parameters
----------
flush : bool
If true, reload fresh copy of entire pandas.DataFrame
"""
if self._df is None or flush:
self._df = self._read_file(**kwargs)
self._dates = [ | pd.Timestamp(t) | pandas.Timestamp |
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
def clean_data(data, features_to_clean):
for feature in features_to_clean:
data.drop(feature, axis=1, inplace=True)
def fulfill_missing_values(data, metadata=None):
if metadata:
age_median = metadata['age_median']
fare_median = metadata['fare_median']
embarked_value = metadata['embarked_value']
else:
age_median = data['Age'].median()
fare_median = data['Fare'].median()
embarked_value = data['Embarked'].mode()[0]
data['Age'].fillna(age_median, inplace=True)
data['Fare'].fillna(fare_median, inplace=True)
data['Embarked'].fillna(embarked_value, inplace=True)
return {
'age_median': age_median,
'fare_median': fare_median,
'embarked_value': embarked_value
}
def one_hot_encoding(data, features, metadata={}):
assert len(features) > 0
for feature in features:
label_binarizer = LabelBinarizer()
if feature in metadata:
label_binarizer.classes_ = np.array(metadata[feature])
labeled_features = label_binarizer.transform(data[feature])
else:
labeled_features = label_binarizer.fit_transform(data[feature])
column_names_for_labeled_features = ['{}_{}'.format(feature, cls) for cls in label_binarizer.classes_] if len(
label_binarizer.classes_) >= 3 else ['{}_{}'.format(feature, label_binarizer.classes_[0])]
data = data.join(pd.DataFrame(labeled_features,
columns=column_names_for_labeled_features,
index=data.index))
data.drop(feature, axis=1, inplace=True)
metadata[feature] = label_binarizer.classes_.tolist()
return data, metadata
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-train-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--input-test-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--output-train-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--output-test-data-path', type=str, help='an integer for the accumulator')
args = parser.parse_args()
Path(args.output_train_data_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_test_data_path).parent.mkdir(parents=True, exist_ok=True)
train = pd.read_csv(args.input_train_data_path)
test = | pd.read_csv(args.input_test_data_path) | pandas.read_csv |